repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
anderson-dan-w/cartography
https://github.com/anderson-dan-w/cartography
684efce67d84355521f28f9adbd2c52e7db71cf2
b79695b706c21854795ba41e0a3f981025a3952d
e67310d80a43640ee31c0140b6f7f18c340c1698
refs/heads/master
2020-05-05T00:37:48.333572
2019-04-03T22:32:25
2019-04-03T22:32:25
179,581,173
0
0
Apache-2.0
2019-04-04T21:39:34
2019-04-04T21:27:07
2019-04-04T12:49:26
null
[ { "alpha_fraction": 0.6197751760482788, "alphanum_fraction": 0.6230641007423401, "avg_line_length": 38.5065803527832, "blob_id": "1eb5018e354bee2e790a56d2c4ac880a4252ca33", "content_id": "94ca71473f0462bb6cb0853465139c9899c09770", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24020, "license_type": "permissive", "max_line_length": 120, "num_lines": 608, "path": "/cartography/intel/aws/ec2.py", "repo_name": "anderson-dan-w/cartography", "src_encoding": "UTF-8", "text": "import botocore.config\nimport logging\nimport time\n\nfrom cartography.util import run_cleanup_job\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO memoize this\ndef _get_botocore_config():\n return botocore.config.Config(\n read_timeout=360,\n retries={\n 'max_attempts': 10,\n }\n )\n\n\ndef get_ec2_regions(session):\n client = session.client('ec2')\n result = client.describe_regions()\n return [r['RegionName'] for r in result['Regions']]\n\n\ndef get_ec2_security_group_data(session, region):\n client = session.client('ec2', region_name=region, config=_get_botocore_config())\n paginator = client.get_paginator('describe_security_groups')\n security_groups = []\n for page in paginator.paginate():\n security_groups.extend(page['SecurityGroups'])\n return {'SecurityGroups': security_groups}\n\n\ndef get_ec2_instances(session, region):\n client = session.client('ec2', region_name=region, config=_get_botocore_config())\n paginator = client.get_paginator('describe_instances')\n reservations = []\n for page in paginator.paginate():\n reservations.extend(page['Reservations'])\n return {'Reservations': reservations}\n\n\ndef get_ec2_auto_scaling_groups(session, region):\n client = session.client('autoscaling', region_name=region, config=_get_botocore_config())\n paginator = client.get_paginator('describe_auto_scaling_groups')\n asgs = []\n for page in paginator.paginate():\n asgs.extend(page['AutoScalingGroups'])\n return {'AutoScalingGroups': asgs}\n\n\ndef get_loadbalancer_data(session, region):\n client = session.client('elb', region_name=region, config=_get_botocore_config())\n paginator = client.get_paginator('describe_load_balancers')\n elbs = []\n for page in paginator.paginate():\n elbs.extend(page['LoadBalancerDescriptions'])\n return {'LoadBalancerDescriptions': elbs}\n\n\ndef load_ec2_instances(session, data, region, current_aws_account_id, aws_update_tag):\n ingest_reservation = \"\"\"\n MERGE (reservation:EC2Reservation{reservationid: {ReservationId}})\n ON CREATE SET reservation.firstseen = timestamp()\n SET reservation.ownerid = {OwnerId}, reservation.requesterid = {RequesterId}, reservation.region = {Region},\n reservation.lastupdated = {aws_update_tag}\n WITH reservation\n MATCH (awsAccount:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (awsAccount)-[r:RESOURCE]->(reservation)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_instance = \"\"\"\n MERGE (instance:EC2Instance{instanceid: {InstanceId}})\n ON CREATE SET instance.firstseen = timestamp()\n SET instance.publicdnsname = {PublicDnsName}, instance.privateipaddress = {PrivateIpAddress},\n instance.imageid = {ImageId}, instance.instancetype = {InstanceType}, instance.monitoringstate = {MonitoringState},\n instance.state = {State}, instance.launchtime = {LaunchTime}, instance.launchtimeunix = {LaunchTimeUnix},\n instance.region = {Region}, instance.lastupdated = {aws_update_tag}\n WITH instance\n MERGE (subnet:EC2Subnet{subnetid: {SubnetId}})\n ON CREATE SET subnet.firstseen = timestamp()\n SET subnet.region = {Region}, subnet.lastupdated = {aws_update_tag}\n MERGE (instance)-[r:PART_OF_SUBNET]->(subnet)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH instance\n MATCH (rez:EC2Reservation{reservationid: {ReservationId}})\n MERGE (instance)-[r:MEMBER_OF_EC2_RESERVATION]->(rez)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH instance\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(instance)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_security_groups = \"\"\"\n MERGE (group:EC2SecurityGroup{id: {GroupId}})\n ON CREATE SET group.firstseen = timestamp(), group.groupid = {GroupId}\n SET group.name = {GroupName}, group.region = {Region}, group.lastupdated = {aws_update_tag}\n WITH group\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH group\n MATCH (instance:EC2Instance{instanceid: {InstanceId}})\n MERGE (instance)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n for reservation in data['Reservations']:\n reservation_id = reservation[\"ReservationId\"]\n\n session.run(\n ingest_reservation,\n ReservationId=reservation_id,\n OwnerId=reservation.get(\"OwnerId\", \"\"),\n RequesterId=reservation.get(\"RequesterId\", \"\"),\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n aws_update_tag=aws_update_tag\n )\n\n for instance in reservation[\"Instances\"]:\n instanceid = instance[\"InstanceId\"]\n\n monitoring_state = instance.get(\"Monitoring\", {}).get(\"State\", \"\")\n\n instance_state = instance.get(\"State\", {}).get(\"Name\", \"\")\n\n # NOTE this is a hack because we're using a version of Neo4j that doesn't support temporal data types\n launch_time = instance.get(\"LaunchTime\", \"\")\n if launch_time:\n launch_time_unix = time.mktime(launch_time.timetuple())\n else:\n launch_time_unix = \"\"\n\n session.run(\n ingest_instance,\n InstanceId=instanceid,\n PublicDnsName=instance.get(\"PublicDnsName\", \"\"),\n PublicIpAddress=instance.get(\"PublicIpAddress\", \"\"),\n PrivateIpAddress=instance.get(\"PrivateIpAddress\", \"\"),\n ImageId=instance.get(\"ImageId\", \"\"),\n SubnetId=instance.get(\"SubnetId\", \"\"),\n InstanceType=instance.get(\"InstanceType\", \"\"),\n ReservationId=reservation_id,\n MonitoringState=monitoring_state,\n LaunchTime=str(launch_time),\n LaunchTimeUnix=launch_time_unix,\n State=instance_state,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n aws_update_tag=aws_update_tag\n )\n\n if instance.get(\"SecurityGroups\"):\n for group in instance[\"SecurityGroups\"]:\n session.run(\n ingest_security_groups,\n GroupId=group[\"GroupId\"],\n GroupName=group.get(\"GroupName\", \"\"),\n InstanceId=instanceid,\n Region=region,\n AWS_ACCOUNT_ID=current_aws_account_id,\n aws_update_tag=aws_update_tag\n )\n\n load_ec2_instance_network_interfaces(session, instance, aws_update_tag)\n\n\ndef load_ec2_instance_network_interfaces(session, instance_data, aws_update_tag):\n ingest_network_interface = \"\"\"\n MATCH (instance:EC2Instance{instanceid: {InstanceId}})\n MERGE (interface:NetworkInterface{id: {NetworkId}})\n ON CREATE SET interface.firstseen = timestamp()\n SET interface.status = {Status}, interface.mac_address = {MacAddress}, interface.description = {Description},\n interface.private_dns_name = {PrivateDnsName}, interface.private_ip_address = {PrivateIpAddress},\n interface.lastupdated = {aws_update_tag}\n MERGE (instance)-[r:NETWORK_INTERFACE]->(interface)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH interface\n MERGE (subnet:EC2Subnet{subnetid: {SubnetId}})\n ON CREATE SET subnet.firstseen = timestamp()\n SET subnet.lastupdated = {aws_update_tag}\n MERGE (interface)-[r:PART_OF_SUBNET]->(subnet)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_network_group = \"\"\"\n MATCH (interface:NetworkInterface{id: {NetworkId}}),\n (group:EC2SecurityGroup{groupid: {GroupId}})\n MERGE (interface)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n instance_id = instance_data[\"InstanceId\"]\n\n for interface in instance_data[\"NetworkInterfaces\"]:\n session.run(\n ingest_network_interface,\n InstanceId=instance_id,\n NetworkId=interface[\"NetworkInterfaceId\"],\n Status=interface[\"Status\"],\n MacAddress=interface.get(\"MacAddress\", \"\"),\n Description=interface.get(\"Description\", \"\"),\n PrivateDnsName=interface.get(\"PrivateDnsName\", \"\"),\n PrivateIpAddress=interface.get(\"PrivateIpAddress\", \"\"),\n SubnetId=interface.get(\"SubnetId\", \"\"),\n aws_update_tag=aws_update_tag\n )\n\n for group in interface.get(\"Groups\", []):\n session.run(\n ingest_network_group,\n NetworkId=interface[\"NetworkInterfaceId\"],\n GroupId=group[\"GroupId\"],\n aws_update_tag=aws_update_tag\n )\n\n\ndef load_ec2_security_groupinfo(session, data, region, current_aws_account_id, aws_update_tag):\n ingest_security_group = \"\"\"\n MERGE (group:EC2SecurityGroup{id: {GroupId}})\n ON CREATE SET group.firstseen = timestamp(), group.groupid = {GroupId}\n SET group.name = {GroupName}, group.description = {Description}, group.region = {Region},\n group.lastupdated = {aws_update_tag}\n WITH group\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n for group in data[\"SecurityGroups\"]:\n group_id = group[\"GroupId\"]\n\n session.run(\n ingest_security_group,\n GroupId=group_id,\n GroupName=group.get(\"GroupName\", \"\"),\n Description=group.get(\"Description\", \"\"),\n Region=region,\n AWS_ACCOUNT_ID=current_aws_account_id,\n aws_update_tag=aws_update_tag\n )\n\n load_ec2_security_group_rule(session, group, \"IpPermissions\", aws_update_tag)\n load_ec2_security_group_rule(session, group, \"IpPermissionEgress\", aws_update_tag)\n\n\ndef load_ec2_security_group_rule(session, group, rule_type, aws_update_tag):\n ingest_rule = \"\"\"\n MERGE (rule:#RULE_TYPE#{ruleid: {RuleId}})\n ON CREATE SET rule :IpRule, rule.firstseen = timestamp(), rule.fromport = {FromPort}, rule.toport = {ToPort},\n rule.protocol = {Protocol}\n SET rule.lastupdated = {aws_update_tag}\n WITH rule\n MATCH (group:EC2SecurityGroup{groupid: {GroupId}})\n MERGE (group)<-[r:MEMBER_OF_EC2_SECURITY_GROUP]-(rule)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag};\n \"\"\"\n\n ingest_rule_group_pair = \"\"\"\n MERGE (group:EC2SecurityGroup{id: {GroupId}})\n ON CREATE SET group.firstseen = timestamp(), group.groupid = {GroupId}\n SET group.lastupdated = {aws_update_tag}\n WITH group\n MATCH (inbound:IpRule{ruleid: {RuleId}})\n MERGE (inbound)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_range = \"\"\"\n MERGE (range:IpRange{id: {RangeId}})\n ON CREATE SET range.firstseen = timestamp(), range.range = {RangeId}\n SET range.lastupdated = {aws_update_tag}\n WITH range\n MATCH (rule:IpRule{ruleid: {RuleId}})\n MERGE (rule)<-[r:MEMBER_OF_IP_RULE]-(range)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n group_id = group[\"GroupId\"]\n rule_type_map = {\"IpPermissions\": \"IpPermissionInbound\", \"IpPermissionEgress\": \"IpPermissionEgress\"}\n\n if group.get(rule_type):\n for rule in group[rule_type]:\n protocol = rule.get(\"IpProtocol\", \"all\")\n from_port = rule.get(\"FromPort\", \"\")\n to_port = rule.get(\"ToPort\", \"\")\n\n ruleid = \"{0}/{1}/{2}{3}{4}\".format(group_id, rule_type, from_port, to_port, protocol)\n # NOTE Cypher query syntax is incompatible with Python string formatting, so we have to do this awkward\n # NOTE manual formatting instead.\n session.run(\n ingest_rule.replace(\"#RULE_TYPE#\", rule_type_map[rule_type]),\n RuleId=ruleid,\n FromPort=from_port,\n ToPort=to_port,\n Protocol=protocol,\n GroupId=group_id,\n aws_update_tag=aws_update_tag\n )\n\n session.run(\n ingest_rule_group_pair,\n GroupId=group_id,\n RuleId=ruleid,\n aws_update_tag=aws_update_tag\n )\n\n for ip_range in rule[\"IpRanges\"]:\n range_id = ip_range[\"CidrIp\"]\n session.run(\n ingest_range,\n RangeId=range_id,\n RuleId=ruleid,\n aws_update_tag=aws_update_tag\n )\n\n\ndef load_ec2_auto_scaling_groups(session, data, region, current_aws_account_id, aws_update_tag):\n ingest_group = \"\"\"\n MERGE (group:AutoScalingGroup{arn: {ARN}})\n ON CREATE SET group.firstseen = timestamp(), group.name = {Name}, group.createdtime = {CreatedTime}\n SET group.lastupdated = {aws_update_tag}, group.launchconfigurationname = {LaunchConfigurationName},\n group.maxsize = {MaxSize}, group.region={Region}\n WITH group\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_vpc = \"\"\"\n MERGE (subnet:EC2Subnet{subnetid: {SubnetId}})\n ON CREATE SET subnet.firstseen = timestamp()\n SET subnet.lastupdated = {aws_update_tag}\n WITH subnet\n MATCH (group:AutoScalingGroup{arn: {GROUPARN}})\n MERGE (subnet)<-[r:VPC_IDENTIFIER]-(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_instance = \"\"\"\n MERGE (instance:EC2Instance{instanceid: {InstanceId}})\n ON CREATE SET instance.firstseen = timestamp()\n SET instance.lastupdated = {aws_update_tag}, instance.region={Region}\n WITH instance\n MATCH (group:AutoScalingGroup{arn: {GROUPARN}})\n MERGE (instance)-[r:MEMBER_AUTO_SCALE_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH instance\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(instance)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n for group in data[\"AutoScalingGroups\"]:\n name = group[\"AutoScalingGroupName\"]\n createtime = group.get(\"CreatedTime\", \"\")\n lauchconfig_name = group.get(\"LaunchConfigurationName\", \"\")\n group_arn = group[\"AutoScalingGroupARN\"]\n max_size = group[\"MaxSize\"]\n\n session.run(\n ingest_group,\n ARN=group_arn,\n Name=name,\n CreatedTime=str(createtime),\n LaunchConfigurationName=lauchconfig_name,\n MaxSize=max_size,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n aws_update_tag=aws_update_tag\n )\n\n if group.get('VPCZoneIdentifier'):\n vpclist = group[\"VPCZoneIdentifier\"]\n for vpc in str(vpclist).split(','):\n session.run(\n ingest_vpc,\n SubnetId=vpc,\n GROUPARN=group_arn,\n aws_update_tag=aws_update_tag\n )\n\n if group.get(\"Instances\"):\n for instance in group[\"Instances\"]:\n instanceid = instance[\"InstanceId\"]\n session.run(\n ingest_instance,\n InstanceId=instanceid,\n GROUPARN=group_arn,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n aws_update_tag=aws_update_tag\n )\n\n\ndef load_load_balancers(session, data, region, current_aws_account_id, aws_update_tag):\n ingest_load_balancer = \"\"\"\n MERGE (elb:LoadBalancer{id: {ID}})\n ON CREATE SET elb.firstseen = timestamp(), elb.createdtime = {CREATED_TIME}\n SET elb.lastupdated = {aws_update_tag}, elb.name = {NAME}, elb.dnsname = {DNS_NAME},\n elb.canonicalhostedzonename = {HOSTED_ZONE_NAME}, elb.canonicalhostedzonenameid = {HOSTED_ZONE_NAME_ID},\n elb.scheme = {SCHEME}, elb.region = {Region}\n WITH elb\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(elb)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_load_balancersource_security_group = \"\"\"\n MATCH (elb:LoadBalancer{id: {ID}}),\n (group:EC2SecurityGroup{name: {GROUP_NAME}})\n MERGE (elb)-[r:SOURCE_SECURITY_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_load_balancer_security_group = \"\"\"\n MATCH (elb:LoadBalancer{id: {ID}}),\n (group:EC2SecurityGroup{groupid: {GROUP_ID}})\n MERGE (elb)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n ingest_instances = \"\"\"\n MATCH (elb:LoadBalancer{id: {ID}}), (instance:EC2Instance{instanceid: {INSTANCE_ID}})\n MERGE (elb)-[r:EXPOSE]->(instance)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n WITH instance\n MATCH (aa:AWSAccount{id: {AWS_ACCOUNT_ID}})\n MERGE (aa)-[r:RESOURCE]->(instance)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n for lb in data['LoadBalancerDescriptions']:\n load_balancer_id = lb[\"DNSName\"]\n\n session.run(\n ingest_load_balancer,\n ID=load_balancer_id,\n CREATED_TIME=str(lb[\"CreatedTime\"]),\n NAME=lb[\"LoadBalancerName\"],\n DNS_NAME=load_balancer_id,\n HOSTED_ZONE_NAME=lb.get(\"CanonicalHostedZoneName\", \"\"),\n HOSTED_ZONE_NAME_ID=lb.get(\"CanonicalHostedZoneNameID\", \"\"),\n SCHEME=lb.get(\"Scheme\", \"\"),\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n aws_update_tag=aws_update_tag\n )\n\n if lb[\"Subnets\"]:\n load_load_balancer_subnets(session, load_balancer_id, lb[\"Subnets\"], aws_update_tag)\n\n if lb[\"SecurityGroups\"]:\n for group in lb[\"SecurityGroups\"]:\n session.run(\n ingest_load_balancer_security_group,\n ID=load_balancer_id,\n GROUP_ID=str(group),\n aws_update_tag=aws_update_tag\n )\n\n if lb[\"SourceSecurityGroup\"]:\n source_group = lb[\"SourceSecurityGroup\"]\n session.run(\n ingest_load_balancersource_security_group,\n ID=load_balancer_id,\n GROUP_NAME=source_group[\"GroupName\"],\n aws_update_tag=aws_update_tag\n )\n\n if lb[\"Instances\"]:\n for instance in lb[\"Instances\"]:\n session.run(\n ingest_instances,\n ID=load_balancer_id,\n INSTANCE_ID=instance[\"InstanceId\"],\n AWS_ACCOUNT_ID=current_aws_account_id,\n aws_update_tag=aws_update_tag\n )\n\n if lb[\"ListenerDescriptions\"]:\n load_load_balancer_listeners(session, load_balancer_id, lb[\"ListenerDescriptions\"], aws_update_tag)\n\n\ndef load_load_balancer_subnets(session, load_balancer_id, subnets_data, aws_update_tag):\n ingest_load_balancer_subnet = \"\"\"\n MATCH (elb:LoadBalancer{id: {ID}}), (subnet:EC2Subnet{subnetid: {SUBNET_ID}})\n MERGE (elb)-[r:SUBNET]->(subnet)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n for subnet_id in subnets_data:\n session.run(\n ingest_load_balancer_subnet,\n ID=load_balancer_id,\n SUBNET_ID=subnet_id,\n aws_update_tag=aws_update_tag\n )\n\n\ndef load_load_balancer_listeners(session, load_balancer_id, listener_data, aws_update_tag):\n ingest_listener = \"\"\"\n MATCH (elb:LoadBalancer{id: {LoadBalancerId}})\n WITH elb\n UNWIND {Listeners} as data\n MERGE (l:Endpoint:ELBListener{id: elb.id + toString(data.Listener.LoadBalancerPort) +\n toString(data.Listener.Protocol)})\n ON CREATE SET l.port = data.Listener.LoadBalancerPort, l.protocol = data.Listener.Protocol,\n l.firstseen = timestamp()\n SET l.instance_port = data.Listener.InstancePort, l.instance_protocol = data.Listener.InstanceProtocol,\n l.lastupdated = {aws_update_tag}\n WITH l, elb\n MERGE (elb)-[r:ELB_LISTENER]->(l)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = {aws_update_tag}\n \"\"\"\n\n session.run(\n ingest_listener,\n LoadBalancerId=load_balancer_id,\n Listeners=listener_data,\n aws_update_tag=aws_update_tag\n )\n\n\ndef cleanup_ec2_security_groupinfo(session, common_job_parameters):\n run_cleanup_job(\n 'aws_import_ec2_security_groupinfo_cleanup.json',\n session,\n common_job_parameters\n )\n\n\ndef cleanup_ec2_instances(session, common_job_parameters):\n run_cleanup_job('aws_import_ec2_instances_cleanup.json', session, common_job_parameters)\n\n\ndef cleanup_ec2_auto_scaling_groups(session, common_job_parameters):\n run_cleanup_job(\n 'aws_ingest_ec2_auto_scaling_groups_cleanup.json',\n session,\n common_job_parameters\n )\n\n\ndef cleanup_load_balancers(session, common_job_parameters):\n run_cleanup_job('aws_ingest_load_balancers_cleanup.json', session, common_job_parameters)\n\n\ndef sync_ec2_security_groupinfo(session, boto3_session, regions, current_aws_account_id, aws_update_tag,\n common_job_parameters):\n for region in regions:\n logger.debug(\"Syncing EC2 security groups for region '%s' in account '%s'.\", region, current_aws_account_id)\n data = get_ec2_security_group_data(boto3_session, region)\n load_ec2_security_groupinfo(session, data, region, current_aws_account_id, aws_update_tag)\n cleanup_ec2_security_groupinfo(session, common_job_parameters)\n\n\ndef sync_ec2_instances(session, boto3_session, regions, current_aws_account_id, aws_update_tag, common_job_parameters):\n for region in regions:\n logger.debug(\"Syncing EC2 instances for region '%s' in account '%s'.\", region, current_aws_account_id)\n data = get_ec2_instances(boto3_session, region)\n load_ec2_instances(session, data, region, current_aws_account_id, aws_update_tag)\n cleanup_ec2_instances(session, common_job_parameters)\n\n\ndef sync_ec2_auto_scaling_groups(session, boto3_session, regions, current_aws_account_id, aws_update_tag,\n common_job_parameters):\n for region in regions:\n logger.debug(\"Syncing auto scaling groups for region '%s' in account '%s'.\", region, current_aws_account_id)\n data = get_ec2_auto_scaling_groups(boto3_session, region)\n load_ec2_auto_scaling_groups(session, data, region, current_aws_account_id, aws_update_tag)\n cleanup_ec2_auto_scaling_groups(session, common_job_parameters)\n\n\ndef sync_load_balancers(session, boto3_session, regions, current_aws_account_id, aws_update_tag, common_job_parameters):\n for region in regions:\n logger.debug(\"Syncing EC2 load balancers for region '%s' in account '%s'.\", region, current_aws_account_id)\n data = get_loadbalancer_data(boto3_session, region)\n load_load_balancers(session, data, region, current_aws_account_id, aws_update_tag)\n cleanup_load_balancers(session, common_job_parameters)\n" } ]
1
stellandyt/cwservbot
https://github.com/stellandyt/cwservbot
76d36a46707f9ada9cc7ac6a836a9bfad988b46e
a73d25442a515aa17743e8e50a0f2f54f8fb7634
65f3a4735eef3d2de000f2db39cf32e60aecaff2
refs/heads/master
2020-05-09T13:00:28.018814
2019-08-18T08:25:48
2019-08-18T08:25:48
181,132,134
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6216080188751221, "alphanum_fraction": 0.6238693594932556, "avg_line_length": 36.54716873168945, "blob_id": "b7c3501aa09e8fe85f6c49e60db9490f599e7c19", "content_id": "f4614e080b9e106a41c32987612829a2c9d04844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3980, "license_type": "no_license", "max_line_length": 78, "num_lines": 106, "path": "/venv/Lib/site-packages/oops_amqp/publisher.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Publish OOPS reports over amqp.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\nfrom hashlib import md5\nfrom threading import local\n\nimport amqp\n\nfrom oops_amqp.anybson import dumps\nfrom oops_amqp.utils import (\n amqplib_error_types,\n is_amqplib_connection_error,\n )\n\n__all__ = [\n 'Publisher',\n ]\n\nclass Publisher:\n \"\"\"Publish OOPS reports over AMQP.\n \n Messages are published as bson dicts via durable messages sent to a\n supplied exchange + routing key.\n \"\"\"\n\n def __init__(self, connection_factory, exchange_name, routing_key,\n inherit_id=False):\n \"\"\"Create a publisher.\n\n :param connection_factory: A callable which creates an amqplib\n Connection when called. This is used to create connections - one\n per thread which OOPS publishing happens in. This is because\n amqplib is not threadsafe and recommends not sharing connections\n across threads.\n :param exchange_name: The name of the exchange to publish to.\n :param routing_key: The routing key for messages.\n :param inherit_id: If True any 'True' 'id' in an OOPS report is\n preserved. Handy if an id that has already been shown to a user is\n being published (but uniqueness cannot be guaranteed).\n \"\"\"\n self.connection_factory = connection_factory\n self.exchange_name = exchange_name\n self.routing_key = routing_key\n self.channels = local()\n self.inherit_id = inherit_id\n\n def get_channel(self):\n if getattr(self.channels, 'channel', None) is None:\n try:\n connection = self.connection_factory()\n connection.connect()\n self.channels.channel = connection.channel()\n except amqplib_error_types as e:\n if is_amqplib_connection_error(e):\n # Could not connect\n return None\n # Unknown error mode : don't hide it.\n raise\n return self.channels.channel\n\n def __call__(self, report):\n # Don't mess with the passed in report.\n report = dict(report)\n if not self.inherit_id or not report.get('id'):\n # Discard any existing id.\n original_id = report.pop('id', None)\n # Hash it, to make an ID\n oops_id = \"OOPS-%s\" % md5(dumps(report)).hexdigest()\n # Store the id in what we send on the wire, so that the recipient\n # has it.\n report['id'] = oops_id\n message = amqp.Message(dumps(report))\n # We don't want to drop OOPS on the floor if rabbit is restarted.\n message.properties[\"delivery_mode\"] = 2\n channel = self.get_channel()\n if channel is None:\n return []\n try:\n channel.basic_publish(\n message, self.exchange_name, routing_key=self.routing_key)\n except amqplib_error_types as e:\n self.channels.channel = None\n if is_amqplib_connection_error(e):\n # Could not connect / interrupted connection\n return []\n # Unknown error mode : don't hide it.\n raise\n return [report['id']]\n" }, { "alpha_fraction": 0.7056030631065369, "alphanum_fraction": 0.7165242433547974, "avg_line_length": 30.909090042114258, "blob_id": "56ae1b032ab2d511eb9e66106c643874328aaa49", "content_id": "0fdecad7a0770a22c2b0877584fe2b6c697fa6c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2106, "license_type": "no_license", "max_line_length": 78, "num_lines": 66, "path": "/venv/Lib/site-packages/oops_datedir_repo/serializer.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Read from any known serializer.\n\nWhere possible using the specific known serializer is better as it is more\nefficient and won't suffer false positives if two serializations happen to pun\nwith each other (unlikely though that is).\n\nTypical usage:\n >>> fp = file('an-oops', 'rb')\n >>> report = serializer.read(fp)\n\nSee the serializer_rfc822 and serializer_bson modules for information about\nserializing OOPS reports by hand. Generally just using the DateDirRepo.publish\nmethod is all that is needed.\n\"\"\"\n\n\nfrom __future__ import absolute_import, print_function\n\n__all__ = [\n 'read',\n ]\n\nimport bz2\nfrom io import BytesIO\n\nfrom oops_datedir_repo import (\n anybson as bson,\n serializer_bson,\n serializer_rfc822,\n )\n\n\ndef read(fp):\n \"\"\"Deserialize an OOPS from a bson or rfc822 message.\n\n The whole file is read regardless of the OOPS format. It should be\n opened in binary mode.\n\n :raises IOError: If the file has no content.\n \"\"\"\n # Deal with no-rewindable file pointers.\n content = fp.read()\n if len(content) == 0:\n # This OOPS has no content\n raise IOError(\"Empty OOPS Report\")\n if content[0:3] == b\"BZh\":\n content = bz2.decompress(content)\n try:\n return serializer_bson.read(BytesIO(content))\n except (KeyError, ValueError, IndexError, bson.InvalidBSON):\n return serializer_rfc822.read(BytesIO(content))\n" }, { "alpha_fraction": 0.8813559412956238, "alphanum_fraction": 0.8983050584793091, "avg_line_length": 8.833333015441895, "blob_id": "a430ea84c71e3466da34562340eab18d305d15f2", "content_id": "e533cb599dbff266b267d286d961ecfe97a4696a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 59, "license_type": "no_license", "max_line_length": 16, "num_lines": 6, "path": "/requirements.txt", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "Telebot\nrequests\npyTelegramBotApi\nPySocks\ngunicorn\nurllib3\n" }, { "alpha_fraction": 0.6519562005996704, "alphanum_fraction": 0.6560250520706177, "avg_line_length": 32.6315803527832, "blob_id": "f521474561c82fe712cb9693f0149d28f92d5427", "content_id": "2a69d69538de27991e821c13b978ced7cd73dc95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3195, "license_type": "no_license", "max_line_length": 78, "num_lines": 95, "path": "/venv/Lib/site-packages/oops_wsgi/hooks.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2010, 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"oops creation and filtering hooks for working with WSGI.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport re\n\n__all__ = [\n 'copy_environ',\n 'hide_cookie',\n 'install_hooks',\n 'update_report',\n ]\n\n_wsgi_standard_env_keys = set([\n 'REQUEST_METHOD',\n 'SCRIPT_NAME',\n 'PATH_INFO',\n 'QUERY_STRING',\n 'CONTENT_TYPE',\n 'CONTENT_LENGTH',\n 'SERVER_NAME',\n 'SERVER_PORT',\n 'SERVER_PROTOCOL',\n 'wsgi.version',\n 'wsgi.url_scheme',\n ])\n\n\ndef copy_environ(report, context):\n \"\"\"Copy useful variables from the wsgi environment if it is present.\n\n This should be in the context as 'wsgi_environ'.\n\n e.g. \n report = config.create(context=dict(wsgi_environ=environ))\n \"\"\"\n environ = context.get('wsgi_environ', {})\n if 'req_vars' not in report:\n report['req_vars'] = {}\n req_vars = report['req_vars']\n for key, value in sorted(environ.items()):\n if (key in _wsgi_standard_env_keys or\n key.startswith('HTTP_')):\n req_vars[key] = value\n\n\ndef hide_cookie(report, context):\n \"\"\"If there is an HTTP_COOKIE entry in the report, hide its value.\n\n The entry is looked for either as a top level key or in the req_vars dict.\n\n The COOKIE header is often used to carry session tokens and thus permits\n folk analyzing crash reports to log in as an arbitrary user (e.g. your\n sysadmin users).\n\n The same goes for the AUTHORIZATION header, although in that case we\n permit the authorization scheme to remain visible.\n \"\"\"\n if 'HTTP_COOKIE' in report:\n report['HTTP_COOKIE'] = '<hidden>'\n if 'HTTP_COOKIE' in report.get('req_vars', {}):\n report['req_vars']['HTTP_COOKIE'] = '<hidden>'\n if 'HTTP_AUTHORIZATION' in report:\n report['HTTP_AUTHORIZATION'] = re.sub(\n r'(.*?)\\s+.*', r'\\1 <hidden>', report['HTTP_AUTHORIZATION'])\n if 'HTTP_AUTHORIZATION' in report.get('req_vars', {}):\n report['req_vars']['HTTP_AUTHORIZATION'] = re.sub(\n r'(.*?)\\s+.*', r'\\1 <hidden>',\n report['req_vars']['HTTP_AUTHORIZATION'])\n\n\ndef install_hooks(config):\n \"\"\"Install the default wsgi hooks into config.\"\"\"\n config.on_create.extend([copy_environ, hide_cookie])\n config.on_create.insert(0, update_report)\n\n\ndef update_report(report, context):\n \"\"\"Copy the oops.report contents from the wsgi environment to report.\"\"\"\n report.update(context.get('wsgi_environ', {}).get('oops.report', {}))\n" }, { "alpha_fraction": 0.5990676283836365, "alphanum_fraction": 0.631701648235321, "avg_line_length": 34.75, "blob_id": "a5f146675a586b8650fa539dfd5ec50f095e5358", "content_id": "2bf740560f7d5a827cb05ed449ff0c05b3ac0257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 83, "num_lines": 12, "path": "/venv/Scripts/prune-script.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#!D:\\TeleBot\\venv\\Scripts\\python.exe\n# EASY-INSTALL-ENTRY-SCRIPT: 'oops-datedir-repo==0.0.24','console_scripts','prune'\n__requires__ = 'oops-datedir-repo==0.0.24'\nimport re\nimport sys\nfrom pkg_resources import load_entry_point\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw?|\\.exe)?$', '', sys.argv[0])\n sys.exit(\n load_entry_point('oops-datedir-repo==0.0.24', 'console_scripts', 'prune')()\n )\n" }, { "alpha_fraction": 0.7168079018592834, "alphanum_fraction": 0.7288135886192322, "avg_line_length": 32.71428680419922, "blob_id": "ddbdaad2e76f2e56471bcfd3630969ebbfa8b813", "content_id": "eb4ec9b58f95eafe8234ad6c766c91cf32700d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/venv/Lib/site-packages/oops_datedir_repo/bsondump.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Print a BSON document for easier human inspection.\n\nThis can be used for oopses, which are commonly (though not necessarily)\nstored as BSON.\n\nusage: bsondump FILE\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom pprint import pprint\nimport sys\n\nfrom oops_datedir_repo import anybson as bson\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n if len(argv) != 2:\n print __doc__\n sys.exit(1)\n # I'd like to use json here, but not everything serializable in bson is\n # easily representable in json - even before getting in to the weird parts,\n # oopses commonly have datetime objects. -- mbp 2011-12-20\n pprint(bson.loads(file(argv[1]).read()))\n" }, { "alpha_fraction": 0.5733246803283691, "alphanum_fraction": 0.5755907893180847, "avg_line_length": 38.60256576538086, "blob_id": "ccaa2925ba9608a38157eaf5e99e114564d4aeac", "content_id": "da2cad02626000d1518dda104b5797fd2a2a7839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12356, "license_type": "no_license", "max_line_length": 81, "num_lines": 312, "path": "/venv/Lib/site-packages/oops_datedir_repo/repository.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"The primary interface to oopses stored on disk - the DateDirRepo.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\n__all__ = [\n 'DateDirRepo',\n ]\n\nimport datetime\nimport errno\nfrom functools import partial\nfrom hashlib import md5\nimport os.path\nimport stat\n\nfrom pytz import utc\n\nfrom oops_datedir_repo import (\n anybson as bson,\n serializer,\n serializer_bson,\n )\n\n\nclass DateDirRepo:\n \"\"\"Publish oopses to a date-dir repository.\n\n A date-dir repository is a directory containing:\n\n * Zero or one directories called 'metadata'. If it exists this directory\n contains any housekeeping material needed (such as a metadata.conf ini\n file).\n\n * Zero or more directories named like YYYY-MM-DD, which contain zero or\n more OOPS reports. OOPS file names can take various forms, but must not\n end in .tmp - those are considered to be OOPS reports that are currently\n being written.\n\n * The behaviour of this class is to assign OOPS file names by hashing the\n serialized OOPS to get a unique file name. Other naming schemes are\n valid - the code doesn't assume anything other than the .tmp limitation\n above.\n \"\"\"\n\n def __init__(self, error_dir, serializer=None, inherit_id=False,\n stash_path=False):\n \"\"\"Create a DateDirRepo.\n\n :param error_dir: The base directory to write OOPSes into. OOPSes are\n written into a subdirectory this named after the date (e.g.\n 2011-12-30).\n :param serializer: If supplied should be the module (e.g.\n oops_datedir_repo.serializer_rfc822) to use to serialize OOPSes.\n Defaults to using serializer_bson.\n :param inherit_id: If True, use the oops ID (if present) supplied in\n the report, rather than always assigning a new one.\n :param stash_path: If True, the filename that the OOPS was written to\n is stored in the OOPS report under the key 'datedir_repo_filepath'.\n It is not stored in the OOPS written to disk, only the in-memory\n model.\n \"\"\"\n self.root = error_dir\n if serializer is None:\n serializer = serializer_bson\n self.serializer = serializer\n self.inherit_id = inherit_id\n self.stash_path = stash_path\n self.metadatadir = os.path.join(self.root, 'metadata')\n self.config_path = os.path.join(self.metadatadir, 'config.bson')\n\n def publish(self, report, now=None):\n \"\"\"Write the report to disk.\n\n The report is written to a temporary file, and then renamed to its\n final location. Programs concurrently reading from a DateDirRepo\n should ignore files ending in .tmp.\n\n :param now: The datetime to use as the current time. Will be\n determined if not supplied. Useful for testing.\n \"\"\"\n # We set file permission to: rw-r--r-- (so that reports from\n # umask-restricted services can be gathered by a tool running as\n # another user).\n wanted_file_permission = (\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)\n if now is not None:\n now = now.astimezone(utc)\n else:\n now = datetime.datetime.now(utc)\n # Don't mess with the original report when changing ids etc.\n original_report = report\n report = dict(report)\n md5hash = md5(serializer_bson.dumps(report)).hexdigest()\n oopsid = 'OOPS-%s' % md5hash\n prefix = os.path.join(self.root, now.strftime('%Y-%m-%d'))\n if not os.path.isdir(prefix):\n try:\n os.makedirs(prefix)\n except OSError as err:\n # EEXIST - dir created by another, concurrent process\n if err.errno != errno.EEXIST:\n raise\n # For directories we need to set the x bits too.\n os.chmod(\n prefix, wanted_file_permission | stat.S_IXUSR | stat.S_IXGRP |\n stat.S_IXOTH)\n filename = os.path.join(prefix, oopsid)\n if self.inherit_id:\n oopsid = report.get('id') or oopsid\n report['id'] = oopsid\n with open(filename + '.tmp', 'wb') as f:\n self.serializer.write(report, f)\n os.rename(filename + '.tmp', filename)\n if self.stash_path:\n original_report['datedir_repo_filepath'] = filename\n os.chmod(filename, wanted_file_permission)\n return [report['id']]\n\n def republish(self, publisher):\n \"\"\"Republish the contents of the DateDirRepo to another publisher.\n\n This makes it easy to treat a DateDirRepo as a backing store in message\n queue environments: if the message queue is down, flush to the\n DateDirRepo, then later pick the OOPSes up and send them to the message\n queue environment.\n\n For instance:\n\n >>> repo = DateDirRepo('.')\n >>> repo.publish({'some':'report'})\n >>> queue = []\n >>> def queue_publisher(report):\n ... queue.append(report)\n ... return report['id']\n >>> repo.republish(queue_publisher)\n\n Will scan the disk and send the single found report to queue_publisher,\n deleting the report afterwards.\n\n Empty datedir directories are automatically cleaned up, as are stale\n .tmp files.\n\n If the publisher returns None, signalling that it did not publish the\n report, then the report is not deleted from disk.\n \"\"\"\n two_days = datetime.timedelta(2)\n now = datetime.date.today()\n old = now - two_days\n for dirname, (y,m,d) in self._datedirs():\n date = datetime.date(y, m, d)\n prune = date < old\n dirpath = os.path.join(self.root, dirname)\n files = os.listdir(dirpath)\n if not files and prune:\n # Cleanup no longer needed directory.\n os.rmdir(dirpath)\n for candidate in map(partial(os.path.join, dirpath), files):\n if candidate.endswith('.tmp'):\n if prune:\n os.unlink(candidate)\n continue\n with open(candidate, 'rb') as report_file:\n try:\n report = serializer.read(report_file)\n except IOError as e:\n if e.args[0] == 'Empty OOPS Report':\n report = None\n else:\n raise\n if report is not None:\n oopsid = publisher(report)\n if (report is None and prune) or (report is not None and oopsid):\n os.unlink(candidate)\n\n def _datedirs(self):\n \"\"\"Yield each subdir which looks like a datedir.\"\"\"\n for dirname in os.listdir(self.root):\n try:\n y, m, d = dirname.split('-')\n y = int(y)\n m = int(m)\n d = int(d)\n except ValueError:\n # Not a datedir\n continue\n yield dirname, (y, m, d)\n\n def _read_config(self):\n \"\"\"Return the current config document from disk.\"\"\"\n try:\n with open(self.config_path, 'rb') as config_file:\n return bson.loads(config_file.read())\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n return {}\n\n def get_config(self, key):\n \"\"\"Return a key from the repository config.\n\n :param key: A key to read from the config.\n \"\"\"\n return self._read_config()[key]\n\n def set_config(self, key, value):\n \"\"\"Set config option key to value.\n\n This is written to the bson document root/metadata/config.bson\n\n :param key: The key to set - anything that can be a key in a bson\n document.\n :param value: The value to set - anything that can be a value in a\n bson document.\n \"\"\"\n config = self._read_config()\n config[key] = value\n try:\n with open(self.config_path + '.tmp', 'wb') as config_file:\n config_file.write(bson.dumps(config))\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n os.mkdir(self.metadatadir)\n with open(self.config_path + '.tmp', 'wb') as config_file:\n config_file.write(bson.dumps(config))\n os.rename(self.config_path + '.tmp', self.config_path)\n\n def oldest_date(self):\n \"\"\"Return the date of the oldest datedir in the repository.\n\n If pruning / resubmission is working this should also be the date of\n the oldest oops in the repository.\n \"\"\"\n dirs = list(self._datedirs())\n if not dirs:\n raise ValueError(\"No OOPSes in repository.\")\n return datetime.date(*sorted(dirs)[0][1])\n\n def prune_unreferenced(self, start_time, stop_time, references):\n \"\"\"Delete OOPS reports filed between start_time and stop_time.\n\n A report is deleted if all of the following are true:\n\n * it is in a datedir covered by [start_time, stop_time] inclusive of\n the end points.\n\n * It is not in the set references.\n\n * Its timestamp falls between start_time and stop_time inclusively or\n it's timestamp is outside the datedir it is in or there is no\n timestamp on the report.\n\n :param start_time: The lower bound to prune within.\n :param stop_time: The upper bound to prune within.\n :param references: An iterable of OOPS ids to keep.\n \"\"\"\n start_date = start_time.date()\n stop_date = stop_time.date()\n midnight = datetime.time(tzinfo=utc)\n for dirname, (y,m,d) in self._datedirs():\n dirdate = datetime.date(y, m, d)\n if dirdate < start_date or dirdate > stop_date:\n continue\n dirpath = os.path.join(self.root, dirname)\n files = os.listdir(dirpath)\n deleted = 0\n for candidate in map(partial(os.path.join, dirpath), files):\n if candidate.endswith('.tmp'):\n # Old half-written oops: just remove.\n os.unlink(candidate)\n deleted += 1\n continue\n with open(candidate, 'rb') as report_file:\n report = serializer.read(report_file)\n report_time = report.get('time', None)\n if (report_time is None or\n getattr(report_time, 'date', None) is None or\n report_time.date() < dirdate or\n report_time.date() > dirdate):\n # The report is oddly filed or missing a precise\n # datestamp. Treat it like midnight on the day of the\n # directory it was placed in - this is a lower bound on\n # when it was actually created.\n report_time = datetime.datetime.combine(\n dirdate, midnight)\n if (report_time >= start_time and\n report_time <= stop_time and\n report['id'] not in references):\n # Unreferenced and prunable\n os.unlink(candidate)\n deleted += 1\n if deleted == len(files):\n # Everything in the directory was deleted.\n os.rmdir(dirpath)\n" }, { "alpha_fraction": 0.7476491928100586, "alphanum_fraction": 0.7541738748550415, "avg_line_length": 41.71311569213867, "blob_id": "91814e5f8d24f361416e36ddf27e07b4276482ab", "content_id": "3a0af2e4adbc91bc5421c1fa3d881d9d5cc97beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5211, "license_type": "no_license", "max_line_length": 80, "num_lines": 122, "path": "/venv/Lib/site-packages/oops_wsgi/__init__.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"oops <-> wsgi integration.\n\noops_wsgi provides integration with an oops.Config, permitting errors in your\nweb application to be gathered centrally, with tracebacks and other diagnostic\ninformation.\n\nTypically, something like this:\n\n* Setup your configuration::\n\n >>> from oops import Config\n >>> config = Config()\n\nNote that you will probably want at least one publisher, or your reports will\nbe discarded.\n\n* Add in wsgi specific hooks to the config::\n\n >>> oops_wsgi.install_hooks(config)\n\nThis is a convenience function - you are welcome to pick and choose the creation\nor filter hooks you want from oops_wsgi.hooks.\n\n* Create your wsgi app as normal, and then wrap it::\n\n >>> app = oops_wsgi.make_app(app, config)\n\nIf any exception bubbles up through this middleware, an oops will be logged. If\nthe body of the request had not started, then a custom page is shown that\nshows the OOPS id, and the exception is swallowed. Exceptions that indicate\nnormal situations like end-of-file on a socket do not trigger OOPSes. If the\nOOPS is filtered, or no publishers are configured, then the exception will\npropogate up the stack - the oops middleware cannot do anything useful in these\ncases. (For instance, if you have a custom 404 middleware above the oops\nmiddleware in the wsgi stack, and filter 404 exceptions so they do not create\nreports, then if the oops middleware did anything other than propogate the\nexception, your custom 404 middleware would not work.\n\nIf the body had started, then there is no way to communicate the OOPS id to the\nclient and the exception will propogate up the wsgi app stack.\n\nYou can customise the error page if you supply a helper that accepts (environ,\nreport) and returns HTML to be sent to the client.\n\n >>> def myerror_html(environ, report):\n ... return '<html><body><h1>OOPS! %s</h1></body></html>' % report['id']\n >>> app = oops_wsgi.make_app(app, config, error_render=myerror_html)\n\nOr you can supply a string template to be formatted with the report.\n\n >>> json_template='{\"oopsid\" : \"%(id)s\"}'\n >>> app = oops_wsgi.make_app(app, config, error_template=json_template)\n\nIf the wrapped app errors by sending exc_info to start_response, that will be\nused to create an OOPS report, and the id added to the headers under the\nX-Oops-Id header. This is also present when an OOPS is triggered by catching an\nexception in the wrapped app (as long as the body hasn't started).\n\nYou can request that reports be created when a given status code is used (e.g.\nto gather stats on the number of 404's occuring without doing log processing).\n\n >>> app = oops_wsgi.make_app(app, config, oops_on_status=['404'])\n\nThe oops middleware injects two variables into the WSGI environ to make it easy\nfor cooperating code to report additional data.\n\nThe `oops.report` variable is a dict which is copied into the report. See the\n`oops` package documentation for documentation on what should be present in an\noops report. This requires the update_report hook to be installed (which\n`install_hooks` will do for you).\n\nThe `oops.context` variable is a dict used for generating the report - keys and\nvalues added to that can be used in the `config.on_create` hooks to populate\ncustom data without needing to resort to global variables.\n\nIf a timeline is present in the WSGI environ (as 'timeline.timeline') it is\nautomatically captured to the oops context when generating an OOPS. See the\noops-timeline module for hooks to use this.\n\n`pydoc oops_wsgi.make_app` describes the entire capabilities of the\nmiddleware.\n\"\"\"\n\n\nfrom __future__ import absolute_import, print_function\n\n# same format as sys.version_info: \"A tuple containing the five components of\n# the version number: major, minor, micro, releaselevel, and serial. All\n# values except releaselevel are integers; the release level is 'alpha',\n# 'beta', 'candidate', or 'final'. The version_info value corresponding to the\n# Python version 2.0 is (2, 0, 0, 'final', 0).\" Additionally we use a\n# releaselevel of 'dev' for unreleased under-development code.\n#\n# If the releaselevel is 'alpha' then the major/minor/micro components are not\n# established at this point, and setup.py will use a version of next-$(revno).\n# If the releaselevel is 'final', then the tarball will be major.minor.micro.\n# Otherwise it is major.minor.micro~$(revno).\n__version__ = (0, 0, 10, 'beta', 0)\n\n__all__ = [\n 'install_hooks',\n 'make_app'\n ]\n\nfrom oops_wsgi.middleware import make_app\nfrom oops_wsgi.hooks import install_hooks\n" }, { "alpha_fraction": 0.6874316930770874, "alphanum_fraction": 0.6918032765388489, "avg_line_length": 36.0945930480957, "blob_id": "ab9549eaaafda9de24bde3a9068cece897e7085f", "content_id": "746c3c2c514982a11466ad44ccdaa39457592ae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2745, "license_type": "no_license", "max_line_length": 81, "num_lines": 74, "path": "/venv/Lib/site-packages/oops_amqp/trace.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2012, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Trace OOPS reports coming from an AMQP queue.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom functools import partial\nimport sys\nimport optparse\nfrom textwrap import dedent\n\nimport amqp\nimport oops\nimport oops_amqp\n\n\ndef main(argv=None):\n if argv is None:\n argv=sys.argv\n usage = dedent(\"\"\"\\\n %prog [options]\n\n The following options must be supplied:\n --host\n\n e.g.\n oops-amqp-trace --host \"localhost:3472\"\n\n If you do not have a persistent queue, you should run this script\n before generating oopses, as AMQP will discard messages with no\n consumers.\n \"\"\")\n description = \"Trace OOPS reports coming from an AMQP queue.\"\n parser = optparse.OptionParser(\n description=description, usage=usage)\n parser.add_option('--host', help=\"AQMP host / host:port.\")\n parser.add_option('--username', help=\"AQMP username.\", default=\"guest\")\n parser.add_option('--password', help=\"AQMP password.\", default=\"guest\")\n parser.add_option('--vhost', help=\"AMQP vhost.\", default=\"/\")\n parser.add_option('--exchange', help=\"AMQP exchange name.\", default=\"oopses\")\n options, args = parser.parse_args(argv[1:])\n def needed(optname):\n if getattr(options, optname, None) is None:\n raise ValueError('option \"%s\" must be supplied' % optname)\n needed('host')\n factory = partial(\n amqp.Connection, host=options.host, userid=options.username,\n password=options.password, virtual_host=options.vhost)\n connection = factory()\n channel = connection.channel()\n channel.exchange_declare(options.exchange, type=\"fanout\", durable=False,\n auto_delete=True)\n queue = channel.queue_declare(durable=False, auto_delete=True)[0]\n channel.queue_bind(queue, options.exchange)\n config = oops.Config()\n config.publisher = oops.pprint_to_stream(sys.stdout)\n receiver = oops_amqp.Receiver(config, factory, queue)\n try:\n receiver.run_forever()\n except KeyboardInterrupt:\n pass\n" }, { "alpha_fraction": 0.6095865964889526, "alphanum_fraction": 0.6152186989784241, "avg_line_length": 34.063026428222656, "blob_id": "5d414d3cf27b13f6c304723bba7481c48741aede", "content_id": "537a95e647840e7c77480f9e035df5d038b1869f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8345, "license_type": "no_license", "max_line_length": 79, "num_lines": 238, "path": "/venv/Lib/site-packages/oops_datedir_repo/serializer_rfc822.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2010, 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Read / Write an OOPS dict as an rfc822 formatted message.\n\nThis style of OOPS format is very web server specific, not extensible - it\nshould be considered deprecated.\n\nThe reports this serializer handles always have the following variables (See\nthe python-oops api docs for more information about these variables):\n\n* id: The name of this error report.\n* type: The type of the exception that occurred.\n* value: The value of the exception that occurred.\n* time: The time at which the exception occurred.\n* reporter: The reporting program.\n* topic: The identifier for the template/script that oopsed.\n [this is written as Page-Id for compatibility with as yet unported tools.]\n* branch_nick: The branch nickname.\n* revno: The revision number of the branch.\n* tb_text: A text version of the traceback.\n* username: The user associated with the request.\n* url: The URL for the failed request.\n* req_vars: The request variables. Either a list of 2-tuples or a dict.\n* branch_nick: A name for the branch of code that was running when the report\n was triggered.\n* revno: The revision that the branch was at.\n* Informational: A flag, True if the error wasn't fatal- if it was\n 'informational'.\n [Deprecated - this is no longer part of the oops report conventions. Existing\n reports with it set are still read, but the key is only present if it was\n truely in the report.]\n\"\"\"\n\n\nfrom __future__ import absolute_import, print_function\n\n__all__ = [\n 'read',\n 'write',\n ]\n\n__metaclass__ = type\n\ntry:\n from email.parser import BytesParser\nexcept ImportError:\n # On Python 2, email.parser.Parser will do well enough, since\n # bytes == str.\n from email.parser import Parser as BytesParser\nimport logging\nimport re\nimport urllib\n\nimport iso8601\nimport six\nfrom six.moves import intern\nfrom six.moves.urllib_parse import (\n quote,\n unquote,\n )\n\n\ndef read(fp):\n \"\"\"Deserialize an OOPS from an RFC822 format message.\"\"\"\n msg = BytesParser().parse(fp, headersonly=True)\n id = msg.get('oops-id')\n exc_type = msg.get('exception-type')\n exc_value = msg.get('exception-value')\n datestr = msg.get('date')\n if datestr is not None:\n date = iso8601.parse_date(msg.get('date'))\n else:\n date = None\n topic = msg.get('topic')\n if topic is None:\n topic = msg.get('page-id')\n username = msg.get('user')\n url = msg.get('url')\n try:\n duration = float(msg.get('duration', '-1'))\n except ValueError:\n duration = float(-1)\n informational = msg.get('informational')\n branch_nick = msg.get('branch')\n revno = msg.get('revision')\n reporter = msg.get('oops-reporter')\n\n # Explicitly use an iterator so we can process the file sequentially.\n lines = iter(msg.get_payload().splitlines(True))\n\n statement_pat = re.compile(r'^(\\d+)-(\\d+)(?:@([\\w-]+))?\\s+(.*)')\n\n def is_req_var(line):\n return \"=\" in line and not statement_pat.match(line)\n\n def is_traceback(line):\n return line.lower().startswith('traceback') or line.startswith(\n '== EXTRA DATA ==')\n\n req_vars = []\n statements = []\n first_tb_line = ''\n for line in lines:\n first_tb_line = line\n line = line.strip()\n if line == '':\n continue\n else:\n match = statement_pat.match(line)\n if match is not None:\n start, end, db_id, statement = match.groups()\n if db_id is not None:\n db_id = intern(db_id) # This string is repeated lots.\n statements.append(\n [int(start), int(end), db_id, statement])\n elif is_req_var(line):\n key, value = line.split('=', 1)\n req_vars.append([unquote(key), unquote(value)])\n elif is_traceback(line):\n break\n req_vars = dict(req_vars)\n\n # The rest is traceback.\n tb_text = ''.join([first_tb_line] + list(lines))\n\n result = dict(id=id, type=exc_type, value=exc_value, time=date,\n topic=topic, tb_text=tb_text, username=username, url=url,\n duration=duration, req_vars=req_vars, timeline=statements,\n branch_nick=branch_nick, revno=revno)\n if informational is not None:\n result['informational'] = informational\n if reporter is not None:\n result['reporter'] = reporter\n return result\n\n\ndef _normalise_whitespace(s):\n \"\"\"Normalise the whitespace in a bytestring to spaces.\"\"\"\n if s is None:\n return None # (used by the cast to %s to get 'None')\n return b' '.join(s.split())\n\n\ndef _safestr(obj):\n if isinstance(obj, six.text_type):\n return obj.replace('\\\\', '\\\\\\\\').encode('ASCII',\n 'backslashreplace')\n # A call to str(obj) could raise anything at all.\n # We'll ignore these errors, and print something\n # useful instead, but also log the error.\n # We disable the pylint warning for the blank except.\n if isinstance(obj, six.binary_type):\n value = obj\n else:\n try:\n value = str(obj)\n except:\n logging.getLogger('oops_datedir_repo.serializer_rfc822').exception(\n 'Error while getting a str '\n 'representation of an object')\n value = '<unprintable %s object>' % (\n str(type(obj).__name__))\n # Some str() calls return unicode objects.\n if isinstance(value, six.text_type):\n return _safestr(value)\n # encode non-ASCII characters\n value = value.replace(b'\\\\', b'\\\\\\\\')\n value = re.sub(\n br'[\\x80-\\xff]',\n lambda match: ('\\\\x%02x' % ord(match.group(0))).encode('UTF-8'), value)\n return value\n\n\ndef to_chunks(report):\n \"\"\"Returns a list of bytestrings making up the serialized oops.\"\"\"\n chunks = []\n def header(label, key, optional=True):\n if optional and key not in report:\n return\n value = _safestr(report[key])\n value = _normalise_whitespace(value)\n chunks.append(label.encode('UTF-8') + b': ' + value + b'\\n')\n header('Oops-Id', 'id', optional=False)\n header('Exception-Type', 'type')\n header('Exception-Value', 'value')\n if 'time' in report:\n chunks.append(\n ('Date: %s\\n' % report['time'].isoformat()).encode('UTF-8'))\n header('Page-Id', 'topic')\n header('Branch', 'branch_nick')\n header('Revision', 'revno')\n header('User', 'username')\n header('URL', 'url')\n header('Duration', 'duration')\n header('Informational', 'informational')\n header('Oops-Reporter', 'reporter')\n chunks.append(b'\\n')\n safe_chars = ';/\\\\?:@&+$, ()*!'\n if 'req_vars' in report:\n try:\n items = sorted(report['req_vars'].items())\n except AttributeError:\n items = report['req_vars']\n for key, value in items:\n chunk = '%s=%s\\n' % (\n quote(_safestr(key), safe_chars),\n quote(_safestr(value), safe_chars))\n chunks.append(chunk.encode('UTF-8'))\n chunks.append(b'\\n')\n if 'timeline' in report:\n for row in report['timeline']:\n (start, end, category, statement) = row[:4]\n chunks.append(\n ('%05d-%05d@' % (start, end)).encode('UTF-8') +\n _safestr(category) + b' ' +\n _normalise_whitespace(_safestr(statement)) + b'\\n')\n chunks.append(b'\\n')\n if 'tb_text' in report:\n chunks.append(_safestr(report['tb_text']))\n return chunks\n\n\ndef write(report, output):\n \"\"\"Write a report to a file.\"\"\"\n output.writelines(to_chunks(report))\n" }, { "alpha_fraction": 0.7108986377716064, "alphanum_fraction": 0.7151051759719849, "avg_line_length": 32.52564239501953, "blob_id": "c8d6939795ebeb3dc9b1f97fd9e1fd1006935c8f", "content_id": "3b69fdd94a9185b1014b34cb0ec68e7d9c523e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2615, "license_type": "no_license", "max_line_length": 77, "num_lines": 78, "path": "/venv/Lib/site-packages/oops_datedir_repo/serializer_bson.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Read / Write an OOPS dict as a bson dict.\n\nThis style of OOPS format is very extensible and maintains compatability with\nolder rfc822 oops code: the previously mandatory keys are populated on read.\n\nUse of bson serializing is recommended.\n\nThe reports this serializer handles always have the following variables (See\nthe python-oops api docs for more information about these variables):\n\n* id: The name of this error report.\n* type: The type of the exception that occurred.\n* value: The value of the exception that occurred.\n* time: The time at which the exception occurred.\n* reporter: The reporting program.\n* topic: The identifier for the template/script that oopsed.\n* branch_nick: The branch nickname.\n* revno: The revision number of the branch.\n* tb_text: A text version of the traceback.\n* username: The user associated with the request.\n* url: The URL for the failed request.\n* req_vars: The request variables. Either a list of 2-tuples or a dict.\n* branch_nick: A name for the branch of code that was running when the report\n was triggered.\n* revno: The revision that the branch was at.\n\"\"\"\n\n\nfrom __future__ import absolute_import, print_function\n\n__all__ = [\n 'dumps',\n 'read',\n 'write',\n ]\n\n__metaclass__ = type\n\nfrom oops_datedir_repo import anybson as bson\n\n\ndef read(fp):\n \"\"\"Deserialize an OOPS from a bson message.\"\"\"\n report = bson.loads(fp.read())\n for key in (\n 'branch_nick', 'revno', 'type', 'value', 'time', 'topic',\n 'username', 'url'):\n report.setdefault(key, None)\n report.setdefault('duration', -1)\n report.setdefault('req_vars', {})\n report.setdefault('tb_text', '')\n report.setdefault('timeline', [])\n return report\n\n\ndef dumps(report):\n \"\"\"Return a binary string representing report.\"\"\"\n return bson.dumps(report)\n\n\ndef write(report, fp):\n \"\"\"Write report to fp.\"\"\"\n return fp.write(dumps(report))\n" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.7432432174682617, "avg_line_length": 41.38181686401367, "blob_id": "81c15bebe0cc6d4710c606f51a4389b5240063d3", "content_id": "7ab7d7f711c1ea680e085b83677a7c13d181aa6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4662, "license_type": "no_license", "max_line_length": 79, "num_lines": 110, "path": "/venv/Lib/site-packages/oops_amqp/__init__.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Publish OOPS reports over AMQP.\n\nThe oops_amqp package provides an AMQP OOPS http://pypi.python.org/pypi/oops)\npublisher, and a small daemon that listens on amqp for OOPS reports and\nrepublishes them (into a supplied publisher). The OOPS framework permits\nfalling back to additional publishers if AMQP is down.\n\nUsage\n=====\n\nPublishing to AMQP\n++++++++++++++++++\n\nWhere you are creating OOPS reports, configure oops_amqp.Publisher. This takes\na connection factory - a simple callable that creates an amqp\nconnection - and the exchange name and routing key to submit to.\n\n >>> factory = partial(amqp.Connection, host=\"localhost:5672\",\n ... userid=\"guest\", password=\"guest\", virtual_host=\"/\")\n >>> publisher = oops_amqp.Publisher(factory, \"oopses\", \"\")\n\nProvide the publisher to your OOPS config::\n\n >>> config = oops.Config()\n >>> config.publisher = publisher\n\nAny oops published via that config will now be sent via amqp.\n\nOOPS ids are generating by hashing the oops message (without the id field) -\nthis ensures unique ids.\n\nThe reason a factory is used is because amqp is not threadsafe - the\npublisher maintains a thread locals object to hold the factories and creates\nconnections when new threads are created(when they first generate an OOPS).\n\nDealing with downtime\n---------------------\n\nFrom time to time your AMQP server may be unavailable. If that happens then\nthe Publisher will not assign an oops id - it will return None to signal that\nthe publication failed. To prevent losing the OOPS its a good idea to have a\nfallback publisher - either another AMQP publisher (to a different server) or\none that spools locally (where you can pick up the OOPSes via rsync or some\nother mechanism. Using the oops standard helper publish_with_fallback will let\nyou wrap the fallback publisher so that it only gets invoked if the primary\nmethod failed::\n\n >>> fallback_factory = partial(amqp.Connection, host=\"otherserver:5672\",\n ... userid=\"guest\", password=\"guest\", virtual_host=\"/\")\n >>> fallback_publisher = oops_amqp.Publisher(fallback_factory, \"oopses\", \"\")\n >>> config.publisher = publish_with_fallback(publisher, fallback_publisher)\n\nReceiving from AMQP\n+++++++++++++++++++\n\nThere is a simple method that will run an infinite loop processing reports from\nAMQP. To use it you need to configure a local config to publish the received\nreports. A full config is used because that includes support for filtering\n(which can be useful if you need to throttle volume, for instance).\nAdditionally you need an amqp connection factory (to handle the amqp server\nbeing restarted) and a queue name to receive from.\n\nThis example uses the DateDirRepo publisher, telling it to accept whatever\nid was assigned by the process publishing to AMQP::\n\n >>> publisher = oops_datedir_repo.DateDirRepo('.', inherit_id=True)\n >>> config = oops.Config()\n >>> config.publisher = publisher.publish\n >>> receiver = oops_amqp.Receiver(config, factory, \"my queue\")\n >>> receiver.run_forever()\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n# same format as sys.version_info: \"A tuple containing the five components of\n# the version number: major, minor, micro, releaselevel, and serial. All\n# values except releaselevel are integers; the release level is 'alpha',\n# 'beta', 'candidate', or 'final'. The version_info value corresponding to the\n# Python version 2.0 is (2, 0, 0, 'final', 0).\" Additionally we use a\n# releaselevel of 'dev' for unreleased under-development code.\n#\n# If the releaselevel is 'alpha' then the major/minor/micro components are not\n# established at this point, and setup.py will use a version of next-$(revno).\n# If the releaselevel is 'final', then the tarball will be major.minor.micro.\n# Otherwise it is major.minor.micro~$(revno).\n__version__ = (0, 1, 0, 'final', 0)\n\n__all__ = [\n 'Publisher',\n 'Receiver',\n ]\n\nfrom oops_amqp.publisher import Publisher\nfrom oops_amqp.receiver import Receiver\n" }, { "alpha_fraction": 0.640649139881134, "alphanum_fraction": 0.6426584124565125, "avg_line_length": 37.05882263183594, "blob_id": "cd7c9e970018bb17e0137b86d56acf146670ec60", "content_id": "3f3835f0c6f869803a91ba7b4580708cf71ed9b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6470, "license_type": "no_license", "max_line_length": 79, "num_lines": 170, "path": "/venv/Lib/site-packages/oops_datedir_repo/prune.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Delete OOPSes that are not referenced in the bugtracker.\n\nCurrently only has support for the Launchpad bug tracker.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\nimport datetime\nimport logging\nimport optparse\nfrom textwrap import dedent\nimport sys\n\nfrom launchpadlib.launchpad import Launchpad\nfrom launchpadlib.uris import lookup_service_root\nfrom pytz import utc\n\nimport oops_datedir_repo\n\n__all__ = [\n 'main',\n ]\n\n\nclass LaunchpadTracker:\n \"\"\"Abstracted bug tracker/forums etc - permits testing of main().\"\"\"\n\n def __init__(self, options):\n self.lp = Launchpad.login_anonymously(\n 'oops-prune', options.lpinstance, version='devel')\n\n def find_oops_references(self, start_time, end_time, project=None,\n projectgroup=None):\n \"\"\"Find oops references from start_time to end_time.\n\n :param project: Either None or a project name, or a list of projects.\n :param projectgroup: Either None or a project group name or a list\n of project group names.\n \"\"\"\n projects = set([])\n if project is not None:\n if type(project) is not list:\n project = [project]\n projects.update(project)\n if projectgroup is not None:\n if type(projectgroup) is not list:\n projectgroup = [projectgroup]\n for group in projectgroup:\n [projects.add(lp_proj.name)\n for lp_proj in self.lp.project_groups[group].projects]\n result = set()\n lp_projects = self.lp.projects\n one_week = datetime.timedelta(weeks=1)\n for project in projects:\n lp_project = lp_projects[project]\n current_start = start_time\n while current_start < end_time:\n current_end = current_start + one_week\n if current_end > end_time:\n current_end = end_time\n logging.info(\n \"Querying OOPS references on %s from %s to %s\", \n project, current_start, current_end)\n result.update(lp_project.findReferencedOOPS(\n start_date=current_start, end_date=current_end))\n current_start = current_end\n return result\n\n\ndef main(argv=None, tracker=LaunchpadTracker, logging=logging):\n \"\"\"Console script entry point.\"\"\"\n if argv is None:\n argv = sys.argv\n usage = dedent(\"\"\"\\\n %prog [options]\n\n The following options must be supplied:\n --repo\n\n And at least one of either\n --project\n or\n --projectgroup\n\n e.g.\n %prog --repo . --projectgroup launchpad-project\n\n Will process every member project of launchpad-project.\n\n --project and --projectgroup can be supplied multiple times.\n\n When run this program will ask Launchpad for OOPS references made since\n the last date it pruned up to, with an upper limit of one week from\n today. It then looks in the repository for all oopses created during\n that date range, and if they are not in the set returned by Launchpad,\n deletes them. If the repository has never been pruned before, it will\n pick the earliest datedir present in the repository as the start date.\n \"\"\")\n description = \\\n \"Delete OOPS reports that are not referenced in a bug tracker.\"\n parser = optparse.OptionParser(\n description=description, usage=usage)\n parser.add_option('--project', action=\"append\",\n help=\"Launchpad project to find references in.\")\n parser.add_option('--projectgroup', action=\"append\",\n help=\"Launchpad project group to find references in.\")\n parser.add_option('--repo', help=\"Path to the repository to read from.\")\n parser.add_option(\n '--lpinstance', help=\"Launchpad instance to use\", default=\"production\")\n options, args = parser.parse_args(argv[1:])\n def needed(*optnames):\n present = set()\n for optname in optnames:\n if getattr(options, optname, None) is not None:\n present.add(optname)\n if not present:\n if len(optnames) == 1:\n raise ValueError('Option \"%s\" must be supplied' % optname)\n else:\n raise ValueError(\n 'One of options %s must be supplied' % (optnames,))\n needed('repo')\n needed('project', 'projectgroup')\n logging.basicConfig(\n filename='prune.log', filemode='w', level=logging.DEBUG)\n repo = oops_datedir_repo.DateDirRepo(options.repo)\n one_week = datetime.timedelta(weeks=1)\n one_day = datetime.timedelta(days=1)\n # Only prune OOPS reports more than one week old.\n prune_until = datetime.datetime.now(utc) - one_week\n # Ignore OOPS reports we already found references for - older than the last\n # prune date.\n try:\n prune_from = repo.get_config('pruned-until')\n except KeyError:\n try:\n oldest_oops = repo.oldest_date()\n except ValueError:\n logging.info(\"No OOPSes in repo, nothing to do.\")\n return 0\n midnight_utc = datetime.time(tzinfo=utc)\n prune_from = datetime.datetime.combine(oldest_oops, midnight_utc)\n # The tracker finds all the references for the selected dates.\n finder = tracker(options)\n references = finder.find_oops_references(\n prune_from, datetime.datetime.now(utc), options.project,\n options.projectgroup)\n # Then we can delete the unreferenced oopses.\n repo.prune_unreferenced(prune_from, prune_until, references)\n # And finally save the fact we have scanned up to the selected date.\n repo.set_config('pruned-until', prune_until)\n return 0\n" }, { "alpha_fraction": 0.7085399627685547, "alphanum_fraction": 0.7179063558578491, "avg_line_length": 39.33333206176758, "blob_id": "54c350f0e9280823fbd2bf40e73f40e633fe1047", "content_id": "debe068e803c3dedc1749165491ff1d0a04e63fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1815, "license_type": "no_license", "max_line_length": 79, "num_lines": 45, "path": "/venv/Lib/site-packages/oops_wsgi/django.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Django glue for OOPS integration.\n\nTo use:\n* Use OOPSWSGIHandler rather than than WSGIHandler.\n* Create an oops wrapper with oops_wsgi.make_app(..., oops_on_status=['500'])\n\nThis is not needed if you have https://code.djangoproject.com/ticket/16674\nfixed in your Django.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom django.core.handlers import wsgi\n\n__all__ = [\n 'OOPSWSGIHandler',\n ]\n\nclass OOPSWSGIHandler(wsgi.WSGIHandler):\n\n def handle_uncaught_exception(self, request, resolver, exc_info):\n if 'oops.context' in request.environ:\n # We are running under python-oops-wsgi - inject the exception into\n # its context. This will provide the exception to the handler, and\n # if you use oops_on_status=['500'] OOPS reports will be created\n # when Django has suffered a failure.\n request.environ['oops.context']['exc_info'] = exc_info\n # Now perform the default django uncaught exception behaviour.\n return super(OOPSWSGIHandler, self).handle_uncaught_exception(\n request, resolver, exc_info)\n" }, { "alpha_fraction": 0.6640020608901978, "alphanum_fraction": 0.6683857440948486, "avg_line_length": 28.830768585205078, "blob_id": "652fcb8d45ac82b440b30ade3ad96fa95d573813", "content_id": "5a7d53e9e4a24f46a03c7609c42d77fb593b2c20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3878, "license_type": "no_license", "max_line_length": 78, "num_lines": 130, "path": "/venv/Lib/site-packages/oops/createhooks.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2010, 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Various hooks that can be used to populate OOPS reports.\n\nThe default_hooks list contains some innocuous hooks which most reporters will\nwant.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__all__ = [\n 'attach_exc_info',\n 'attach_date',\n 'attach_hostname',\n 'copy_reporter',\n 'copy_topic',\n 'copy_url',\n 'default_hooks',\n 'safe_unicode',\n ]\n\n__metaclass__ = type\n\nimport datetime\nimport socket\nimport traceback\n\nfrom pytz import utc\nimport six\n\n# Used to detect missing keys.\n_sentinel = object()\n\n\ndef _simple_copy(key):\n \"\"\"Curry a simple hook that copies a key from context to report.\"\"\"\n def copy_key(report, context):\n value = context.get(key, _sentinel)\n if value is not _sentinel:\n report[key] = value\n copy_key.__doc__ = (\n \"Copy the %s field from context to report, if present.\" % key)\n return copy_key\n\ncopy_reporter = _simple_copy('reporter')\ncopy_topic = _simple_copy('topic')\ncopy_url = _simple_copy('url')\n \n\ndef safe_unicode(obj):\n \"\"\"Used to reliably get *a* string for an object.\n\n This is called on objects like exceptions, where bson won't be able to\n serialize it, but a representation is needed for the report. It is\n exposed a convenience for other on_create hook authors.\n \"\"\"\n if isinstance(obj, six.text_type):\n return obj\n # A call to str(obj) could raise anything at all.\n # We'll ignore these errors, and print something\n # useful instead, but also log the error.\n # We disable the pylint warning for the blank except.\n try:\n value = six.text_type(obj)\n except:\n value = u'<unprintable %s object>' % (\n six.text_type(type(obj).__name__))\n # Some objects give back bytestrings to __unicode__...\n if isinstance(value, six.binary_type):\n value = value.decode('latin-1')\n return value\n\n\ndef attach_date(report, context):\n \"\"\"Set the time key in report to a datetime of now.\"\"\"\n report['time'] = datetime.datetime.now(utc)\n\n\ndef attach_exc_info(report, context):\n \"\"\"Attach exception info to the report.\n\n This reads the 'exc_info' key from the context and sets the:\n * type\n * value\n * tb_text \n keys in the report.\n\n exc_info must be a tuple, but it can contain either live exception\n information or simple strings (allowing exceptions that have been\n serialised and received over the network to be reported).\n \"\"\"\n info = context.get('exc_info')\n if info is None:\n return\n report['type'] = getattr(info[0], '__name__', info[0])\n report['value'] = safe_unicode(info[1])\n if isinstance(info[2], six.string_types):\n tb_text = info[2]\n else:\n tb_text = u''.join(map(safe_unicode, traceback.format_tb(info[2])))\n report['tb_text'] = tb_text\n\n\ndef attach_hostname(report, context):\n \"\"\"Add the machine's hostname to report in the 'hostname' key.\"\"\"\n report['hostname'] = socket.gethostname()\n\n\n# hooks that are installed into Config objects by default.\ndefault_hooks = [\n attach_exc_info,\n attach_date,\n copy_reporter,\n copy_topic,\n copy_url,\n attach_hostname,\n ]\n" }, { "alpha_fraction": 0.5984503030776978, "alphanum_fraction": 0.6027953028678894, "avg_line_length": 40.9726448059082, "blob_id": "9db2203e5bb31cfd41c4e261fac018eca9a21d71", "content_id": "75627c40457254b1a9ddfbdb877dc585e672865f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13809, "license_type": "no_license", "max_line_length": 84, "num_lines": 329, "path": "/venv/Lib/site-packages/oops_wsgi/middleware.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2010, 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"WSGI middleware to integrate with an oops.Config.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\nimport socket\nimport sys\nimport time\n\nfrom six.moves.urllib_parse import quote\n\n__all__ = [\n 'default_map_environ',\n 'generator_tracker',\n 'make_app',\n ]\n\n\ndefault_error_template='''<html>\n<head><title>Oops! - %(id)s</title></head>\n<body>\n<h1>Oops!</h1>\n<p>Something broke while generating the page.\nPlease try again in a few minutes, and if the problem persists file\na bug or contact customer support. Please quote OOPS-ID\n<strong>%(id)s</strong>\n</p></body></html>'''\n\n\ndefault_map_environ = {\n # Map timeline objects into the oops context as 'timeline'\n 'timeline.timeline': 'timeline',\n }\n\n\nclass SoftRequestTimeout(Exception):\n \"\"\"Soft request timeout expired\"\"\"\n\n\ndef make_app(app, config, template=default_error_template,\n content_type='text/html', error_render=None, oops_on_status=None,\n map_environ=None, tracker=None, soft_start_timeout=None):\n \"\"\"Construct a middleware around app that will forward errors via config.\n\n Any errors encountered by the app will be forwarded to config and an error\n page shown.\n\n If the body of a reply has already started the error will be forwarded to\n config and also re-raised.\n\n If there are no publishers, or an error is filtered, the error will be\n re-raised rather than an error page shown. This permits containing\n middleware to show custom errors (for 404's, for instance), perhaps even\n for just some occurences of the issue.\n\n :param app: A WSGI app.\n :param config: An oops.Config.\n :param template: Optional string template to use when reporting the oops to\n the client. If not supplied a default template is used (unless an\n error_render function has been supplied).\n :param content_type: The content type for error pages. Defaults to\n text/html.\n :param error_render: Optional custom renderer for presenting error reports\n to clients. Should be a callable taking the report as its only\n parameter.\n :param oops_on_status: Optional list of HTTP status codes that should\n generate OOPSes. OOPSes triggered by sniffing these codes will not\n interfere with the response being sent. For instance, if you do\n not expect any 404's from your application, you might set\n oops_on_status=['404'].\n :param map_environ: A dictionary of environment keys to look for, and if\n present map into the OOPS context when generating an OOPS. The value of\n the key determines the name given in the OOPS context. If None is passed\n the default_map_environ is used. Pass {} in to entirely disable mapping.\n :param tracker: A factory function to create a tracker. Trackers are used\n to allow variations on the WSGI environment to still use oops_wsgi.\n See generator_tracker for the reference tracker used in regular WSGI\n environments. generator_tracker is used by default or when\n tracker=None.\n :param soft_start_timeout: A duration in milliseconds for the creation of\n reports on slow requests. If this is set and the duration between\n calling into the app and start_response being called is greater than\n the timeout value, then an OOPS will be created and the OOPS id added\n to the response HTTP headers as normal. A backtrace leading into the\n middleware is generated (this can be informative as start_response is\n a callback) and the exception type is set to SoftRequestTimeout.\n :return: A WSGI app.\n \"\"\"\n def oops_middleware(environ, start_response):\n \"\"\"OOPS inserting middleware.\n\n This has the following WSGI properties:\n * start_response is buffered until either write() is called, or the\n wrapped app starts yielding content.\n * Exceptions that are ignored by the oops config get re-raised.\n * socket errors and GeneratorExit errors are passed through without\n * being forward to the oops system.\n \"\"\"\n environ['oops.report'] = {}\n environ['oops.context'] = {}\n if soft_start_timeout:\n start_time = time.time()\n state = {}\n def make_context(exc_info=None):\n context = dict(url=construct_url(environ), wsgi_environ=environ)\n context.update(environ.get('oops.context', {}))\n mapper = map_environ\n if mapper is None:\n mapper = default_map_environ\n for environ_key, context_key in mapper.items():\n if environ_key in environ:\n context[context_key] = environ[environ_key]\n if exc_info is not None:\n context['exc_info'] = exc_info\n return context\n def oops_write(bytes):\n write = state.get('write')\n if write is None:\n status, headers = state.pop('response')\n # Signal that we have called start_response\n state['write'] = start_response(status, headers)\n write = state['write']\n write(bytes)\n def oops_start_response(status, headers, exc_info=None):\n if exc_info is not None:\n # The app is explicitly signalling an error (rather than\n # returning a page describing the error). Capture that and then\n # forward to the containing element untouched except for the\n # addition of the X-Oops-Id header. We don't touch the body\n # because the application is handling the error and generating\n # the body itself. We may in future provide an option to\n # replace the body in this situation.\n report = config.create(make_context(exc_info=exc_info))\n ids = config.publish(report)\n try:\n if ids:\n headers = list(headers)\n headers.append(('X-Oops-Id', str(report['id'])))\n state['write'] = start_response(status, headers, exc_info)\n return state['write']\n finally:\n del exc_info\n else:\n do_oops = False\n if oops_on_status:\n for sniff_status in oops_on_status:\n if status.startswith(sniff_status):\n do_oops = True\n if (soft_start_timeout and\n (time.time()-start_time)*1000 > soft_start_timeout):\n try:\n raise SoftRequestTimeout(\n \"Start_response over timeout %s.\"\n % soft_start_timeout)\n except SoftRequestTimeout:\n exc_info = sys.exc_info()\n do_oops = True\n if do_oops:\n report = config.create(make_context(exc_info=exc_info))\n report['HTTP_STATUS'] = status.split(' ')[0]\n config.publish(report)\n state['response'] = (status, headers)\n return oops_write\n try:\n def ensure_start_response():\n if 'write' not in state:\n status, headers = state.pop('response')\n # Signal that we have called start_response\n state['write'] = start_response(status, headers)\n def on_exception(exc_info):\n report = config.create(make_context(exc_info=exc_info))\n ids = config.publish(report)\n if not ids or 'write' in state:\n # No OOPS generated, no oops publisher, or we have already\n # transmitted the wrapped apps headers - either way we can't\n # replace the content with a clean error, so let the wsgi\n # server figure it out.\n raise\n headers = [('Content-Type', content_type)]\n headers.append(('X-Oops-Id', str(report['id'])))\n start_response(\n '500 Internal Server Error', headers, exc_info)\n del exc_info\n if error_render is not None:\n return error_render(report)\n else:\n return template % report\n if tracker is None:\n tracker_factory = generator_tracker\n else:\n tracker_factory = tracker\n return tracker_factory(\n ensure_start_response, ensure_start_response, on_exception,\n app(environ, oops_start_response))\n except socket.error:\n raise\n except Exception:\n exc_info = sys.exc_info()\n return [on_exception(exc_info)]\n\n return oops_middleware\n\n\ndef generator_tracker(on_first_bytes, on_finish, on_error, app_body):\n \"\"\"A wrapper for generators that calls the OOPS hooks as needed.\n\n :param on_first_bytes: Called as on_first_bytes() when the first bytes from\n the app body are available but before they are yielded.\n :param on_finish: Called as on_finish() when the app body is fully\n consumed.\n :param on_error: Called as on_error(sys.exc_info()) if a handleable error\n has occured while consuming the generator. Errors like GeneratorExit\n are not handleable.\n :param app_body: The iterable body for the WSGI app. This may be a simple\n list or a generator - it is merely known to meet the iterator protocol.\n \"\"\"\n try:\n called_first = False\n for bytes in app_body:\n if not called_first:\n called_first = True\n on_first_bytes()\n yield bytes\n on_finish()\n except socket.error:\n # start_response, which iteration can trigger a call into, may raise\n # socket.error when writing if the client has disconnected: thats not\n # an OOPS condition. This does potentially mask socket.error issues in\n # the appserver code, so we may want to change this to callback to\n # determine if start_response has been called upstream, and if so, to\n # still generate an OOPS.\n raise\n except GeneratorExit:\n # Python 2.4\n raise\n except Exception:\n exc_info = sys.exc_info()\n yield on_error(exc_info)\n finally:\n if hasattr(app_body, 'close'):\n app_body.close()\n\n\ndef maybe_encode(value):\n \"\"\" It is against the WSGI spec for an environ value to be a unicode type.\n However, django will convert some values (PATH_INFO particularly).\n In this case, and on python 2, we want to encode them back, otherwise\n we will get KeyErrors in the urllib.quote() method as that explicitly\n cannot handle unicode/UTF-8.\n \"\"\"\n if sys.version_info[0] >= 3:\n return value\n if not isinstance(value, unicode):\n return value\n return value.encode('UTF-8')\n\n\n# construct_url is taken from paste.request at 646047a, which is licensed:\n# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)\n# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php\n#\n# It is modifed to use str() around environ['SERVER_PORT'] due to\n# https://github.com/benoitc/gunicorn/issues/271\ndef construct_url(environ, with_query_string=True, with_path_info=True,\n script_name=None, path_info=None, querystring=None):\n \"\"\"Reconstructs the URL from the WSGI environment.\n\n You may override SCRIPT_NAME, PATH_INFO, and QUERYSTRING with\n the keyword arguments.\n\n \"\"\"\n url = environ['wsgi.url_scheme']+'://'\n\n if environ.get('HTTP_HOST'):\n host = environ['HTTP_HOST']\n port = None\n if ':' in host:\n host, port = host.split(':', 1)\n if environ['wsgi.url_scheme'] == 'https':\n if port == '443':\n port = None\n elif environ['wsgi.url_scheme'] == 'http':\n if port == '80':\n port = None\n url += host\n if port:\n url += ':%s' % port\n else:\n url += environ['SERVER_NAME']\n server_port = str(environ['SERVER_PORT'])\n if environ['wsgi.url_scheme'] == 'https':\n if server_port != '443':\n url += ':' + server_port\n else:\n if server_port != '80':\n url += ':' + server_port\n\n if script_name is None:\n url += quote(environ.get('SCRIPT_NAME',''))\n else:\n url += quote(script_name)\n if with_path_info:\n if path_info is None:\n url += quote(maybe_encode(environ.get('PATH_INFO','')))\n else:\n url += quote(maybe_encode(path_info))\n if with_query_string:\n if querystring is None:\n if environ.get('QUERY_STRING'):\n url += '?' + environ['QUERY_STRING']\n elif querystring:\n url += '?' + querystring\n return url\n" }, { "alpha_fraction": 0.7360360622406006, "alphanum_fraction": 0.7387387156486511, "avg_line_length": 36.62711715698242, "blob_id": "2b3507086052983478541187ca34e78fd704185c", "content_id": "6bcc9b1a835a1421dde479dcb7af05e1fa9028ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2220, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/venv/Lib/site-packages/oops_amqp/utils.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Utility functions for oops_amqp.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport socket\n\nfrom amqp.exceptions import ConnectionError\n\n__all__ = [\n 'amqplib_error_types',\n 'close_ignoring_connection_errors',\n 'is_amqplib_connection_error',\n 'is_amqplib_ioerror',\n ]\n\n# These exception types always indicate an AMQP connection error/closure.\n# However you should catch amqplib_error_types and post-filter with\n# is_amqplib_connection_error.\namqplib_connection_errors = (socket.error, ConnectionError)\n# A tuple to reduce duplication in different code paths. Lists the types of\n# exceptions legitimately raised by amqplib when the AMQP server goes down.\n# Not all exceptions *will* be such errors - use is_amqplib_connection_error to\n# do a second-stage filter after catching the exception.\namqplib_error_types = amqplib_connection_errors + (IOError,)\n\n\ndef close_ignoring_connection_errors(closable):\n try:\n return closable.close()\n except amqplib_error_types as e:\n if is_amqplib_connection_error(e):\n return\n raise\n\n\ndef is_amqplib_ioerror(e):\n \"\"\"Returns True if e is an amqplib internal exception.\"\"\"\n # Raised by amqplib rather than socket.error on ssl issues and short reads.\n return type(e) is IOError and e.args == ('Socket error',)\n\n\ndef is_amqplib_connection_error(e):\n \"\"\"Return True if e was (probably) raised due to a connection issue.\"\"\"\n return isinstance(e, amqplib_connection_errors) or is_amqplib_ioerror(e)\n" }, { "alpha_fraction": 0.6893899440765381, "alphanum_fraction": 0.6928669214248657, "avg_line_length": 41.945701599121094, "blob_id": "273c4da81d8ccd2a06d088e3d24f03f86eeb7f1c", "content_id": "a292544b7f1ef8bffc710a6d43559573bf3db365", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9491, "license_type": "no_license", "max_line_length": 81, "num_lines": 221, "path": "/venv/Lib/site-packages/oops/config.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "#\n# Copyright (c) 2010, 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"The primary interface for clients creating OOPS reports.\n\nTypical usage:\n\n* Configure the library::\n\n >>> from oops import Config\n >>> config = Config()\n >>> def demo_publish(report):\n ... return 'id 1'\n >>> config.publisher = demo_publish\n\n This allows aggregation of oops reports from different programs into one\n oops-tools install.\n\n >>> config.template['reporter'] = 'myprogram'\n\n* Create a report::\n\n >>> report = config.create()\n\n* And then send it off for storage::\n\n >>> config.publish(report)\n ['id 1']\n >>> report\n {'id': 'id 1', 'template': 'myprogram'}\n\n* See the Config object pydoc for more information.\n\nThe OOPS report is a dictionary, and must be bson serializable. This permits\nthe inclusion of binary data in the report, and provides cross-language\ncompatibility.\n\nA minimal report can be empty, but this is fairly useless and may even be\nrejected by some repositories.\n\nSome well known keys used by Launchpad in its OOPS reports::\n\n* id: The name of this error report.\n* type: The type of the exception that occurred.\n* value: The value of the exception that occurred.\n* time: The time at which the exception occurred.\n* hostname: The hostname of the machine the oops was created on. (Set by default)\n* branch_nick: The branch nickname.\n* revno: The revision number of the branch.\n* tb_text: A text version of the traceback.\n* username: The user associated with the request.\n* url: The URL for the failed request.\n* req_vars: The request variables. This should be a dict of simple string ->\n string mappings. The strings and their values should be either unicode or\n url escaped ascii bytestrings. Older versions of the oops toolchain emitted\n this variable as a list of two-tuples e.g. (key, value). Code expecting to\n receive or process old reports should accept both dicts and tuples. Modern\n or new code can just expect a dict.\n* branch_nick: A name for the branch of code that was running when the report\n was triggered.\n* revno: The revision that the branch was at.\n* reporter: Describes the program creating the report. For instance you might\n put the name of the program, or its website - as long as its distinct from\n other reporters being sent to a single analysis server. For dynamically\n scaled services with multiple instances, the reporter will usually be the\n same for a single set of identical instances. e.g. all the instances in one\n Amazon EC2 availability zone might be given the same reporter. Differentiated\n backend services for the same front end site would usually get different\n reporters as well. (e.g. auth, cache, render, ...)\n* topic: The subject or context for the report. With a command line tool you\n might put the subcommand here, with a web site you might put the template (as\n opposed to the url). This is used as a weak correlation hint: reports from the\n same topic are more likely to have the same cause than reports from different\n topics.\n* timeline: A sequence of (start, stop, category, detail) tuples describing the\n events leading up to the OOPS. One way to populate this is the oops-timeline\n package. Consumers should not assume the length of the tuple to be fixed -\n additional fields may be added in future to the right hand side (e.g.\n backtraces).\n\"\"\"\n\n\nfrom __future__ import absolute_import, print_function\n\n__all__ = [\n 'Config',\n ]\n\n__metaclass__ = type\n\nfrom copy import deepcopy\nimport warnings\n\nfrom oops.createhooks import default_hooks\nfrom oops.publishers import (\n convert_result_to_list,\n publish_to_many,\n )\n\n\nclass Config:\n \"\"\"The configuration for the OOPS system.\n\n :ivar on_create: A list of callables to call when making a new report. Each\n will be called in series with the new report and a creation context\n dict. The return value of the callbacks is ignored.\n :ivar filters: A list of callables to call when filtering a report. Each\n will be called in series with a report that is about to be published.\n If the filter returns true (that is not None, 0, '' or False), then\n the report will not be published, and the call to publish will return\n None to the user.\n :ivar publisher: A callable to call when publishing a report.\n It will be called in series with the report to publish. It is expected\n to return a list of ids. See the publish() method for more\n information.\n :ivar publishers: A list of callables to call when publishing a report.\n Each will be called in series with the report to publish. Their return\n value will be assigned to the reports 'id' key : if a publisher\n allocates a different id than a prior publisher, only the last\n publisher in the list will have its id present in the report at the\n end. See the publish() method for more information. This attribute\n is deprecated, Use the `publisher` attribute instead, and see\n `oops.publishers.publish_to_many` if you want to publish to multiple\n publishers.\n \"\"\"\n\n def __init__(self):\n self.filters = []\n self.on_create = list(default_hooks)\n self.template = {}\n self.publisher = None\n self.publishers = []\n\n def create(self, context=None):\n \"\"\"Create an OOPS.\n\n The current template is copied to make the new report, and the new\n report is then passed to all the on_create callbacks for population.\n\n If a callback raises an exception, that will propgate to the caller.\n\n :param context: A dict of information that the on_create callbacks can\n use in populating the report. For instance, the attach_exception \n callback looks for an exc_info key in the context and uses that\n to add information to the report. If context is None, an empty dict\n is created and used with the callbacks.\n :return: A fresh OOPS.\n \"\"\"\n if context is None:\n context = {}\n result = deepcopy(self.template)\n [callback(result, context) for callback in self.on_create]\n return result\n\n def publish(self, report):\n \"\"\"Publish a report.\n\n The report will be passed through any filters, and then handed\n off to the callable assigned to the `publisher` instance variable,\n if any, and then passed through any publishers in the `publishers`\n list instance variable. The return value will be the list returned\n by `publisher` method, with the ids returned by the `publishers`\n appended. The `publishers` list is deprecated, and the `publisher`\n attribute should be used instead, with\n `oops.publishers.publish_to_many` used if needed.\n\n The `publisher` should return a list of ids that were\n allocated-or-used for the report. The `publishers` should each return\n any id that was allocated-or-used for the report. The return value of\n the callables in the `publishers` list will be assigned to the `id`\n key of the report. If a publisher in the `publishers` list returns\n anything non-True (that is None, 0, False, ''), it indicates that the\n publisher did not publish the report.\n\n The last entry in the list of ids, if any, will be assigned to the 'id'\n key of the report before returning, so that clients that only care to\n deal with one id don't have to pick one themselves.\n\n The whole list of ids will be returned to the caller to allow them\n to handle the case where multiple ids were used for a report.\n\n If any publisher raises an exception, that will propagate to the caller.\n\n :return: A list of the allocated ids.\n \"\"\"\n for report_filter in self.filters:\n if report_filter(report):\n return None\n # XXX: james_w 2012-06-19 bug=1015293: Deprecated code path,\n # this should be removed once users have had a chance\n # to migrate. The constructor and docstrings should\n # also be cleaned up at the same time.\n if self.publishers:\n warnings.warn(\n \"Using the oops.Config.publishers attribute is \"\n \"deprecated. Use the oops.Config.publisher attribute \"\n \"instead, with an oops.publishers.publish_to_many object \"\n \"if multiple publishers are needed\",\n DeprecationWarning, stacklevel=2)\n old_publishers = map(convert_result_to_list, self.publishers)\n if self.publisher:\n publisher = publish_to_many(self.publisher, *old_publishers)\n else:\n publisher = publish_to_many(*old_publishers)\n ret = publisher(report)\n if ret:\n report['id'] = ret[-1]\n return ret\n" }, { "alpha_fraction": 0.6025754809379578, "alphanum_fraction": 0.6061278581619263, "avg_line_length": 35.918033599853516, "blob_id": "5887ace37ca9d6cb6472e6554df8e78806c89d89", "content_id": "bf800d952c101df81ff93491eefdd83bc8bfd83b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4504, "license_type": "no_license", "max_line_length": 79, "num_lines": 122, "path": "/venv/Lib/site-packages/oops_amqp/receiver.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Receive OOPS reports over amqp and republish locally.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\nimport time\n\nfrom oops_amqp import anybson as bson\nfrom oops_amqp.utils import (\n amqplib_error_types,\n close_ignoring_connection_errors,\n is_amqplib_connection_error,\n )\n\n__all__ = [\n 'Receiver',\n ]\n\nclass Receiver:\n \"\"\"Republish OOPS reports from AMQP to a local oops.Config.\n \n :ivar stopping: When True will cause Receiver to break out of run_forever.\n Calls to run_forever reset this to False.\n :ivar sentinel: If a message identical to the sentinel is received,\n handle_report will set stopping to True.\n \"\"\"\n\n def __init__(self, config, connection_factory, queue_name):\n \"\"\"Create a Receiver.\n\n :param config: An oops.Config to republish the OOPS reports.\n :param connection_factory: An amqplib connection factory, used to make\n the initial connection and to reconnect if that connection is\n interrupted.\n :param queue_name: The queue to listen for reports on.\n \"\"\"\n self.config = config\n self.connection = None\n self.channel = None\n self.connection_factory = connection_factory\n self.queue_name = queue_name\n self.sentinel = None\n\n def handle_report(self, message):\n # bson requires bytes.\n body = message.body\n if not isinstance(body, bytes):\n body = body.encode(message.content_encoding or 'UTF-8')\n if body == self.sentinel:\n self.stopping = True\n self.channel.basic_ack(message.delivery_tag)\n return\n try:\n report = bson.loads(body)\n except KeyError:\n # Garbage in the queue. Possibly this should raise an OOPS itself\n # (through a different config) or log an info level message.\n pass\n self.config.publish(report)\n # ACK last so errors here don't eat the message.\n self.channel.basic_ack(message.delivery_tag)\n \n def run_forever(self):\n \"\"\"Run in a loop handling messages.\n\n If the amqp server is down or uncontactable for > 120 seconds, error\n out.\n \"\"\"\n self.stopping = False\n self.went_bad = None\n while (not self.stopping and\n (not self.went_bad or time.time() < self.went_bad + 120)):\n try:\n self._run_forever()\n except amqplib_error_types as e:\n if not is_amqplib_connection_error(e):\n # Something unknown went wrong.\n raise\n if not self.went_bad:\n self.went_bad = time.time()\n # Don't probe immediately, give the network/process time to\n # come back.\n time.sleep(0.1)\n\n def _run_forever(self):\n self.connection = self.connection_factory()\n self.connection.connect()\n # A successful connection: record this so run_forever won't bail early.\n self.went_bad = None\n try:\n self.channel = self.connection.channel()\n try:\n self.consume_tag = self.channel.basic_consume(\n self.queue_name, callback=self.handle_report)\n try:\n while True:\n self.connection.drain_events(timeout=1)\n if self.stopping:\n break\n finally:\n if self.channel.is_open:\n self.channel.basic_cancel(self.consume_tag)\n finally:\n close_ignoring_connection_errors(self.channel)\n finally:\n close_ignoring_connection_errors(self.connection)\n" }, { "alpha_fraction": 0.5417746305465698, "alphanum_fraction": 0.553027868270874, "avg_line_length": 39.90066146850586, "blob_id": "f774aa6d235a0ada85be97157193cab54642e726", "content_id": "692d7d2d805bc55e8382c6ec3d926cd709da73db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13371, "license_type": "no_license", "max_line_length": 161, "num_lines": 302, "path": "/botCS.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "import telebot\nfrom telebot import types\nimport sqlite3\n# import schedule\nimport time\nimport requests\n\ntoken = '918573896:AAEM0r2hDXAJoCwy5WtTFZJ92iEmhxmoVeM'\nbot = telebot.TeleBot(token)\n\nconn = sqlite3.connect('game.db', check_same_thread=False)\ncursor = conn.cursor()\n\ntry:\n cursor.execute('''CREATE TABLE game \n (User_ID integer, Score integer, business text, pin integer)\n ''')\nexcept sqlite3.OperationalError:\n pass\n\n\nscore = 1000\ncheck = False\ngg = False\ngg_2 = True\ngame = False\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n bot.send_message(message.chat.id, 'Привет! Ты зашёл в нашу игру!', reply_markup=keyboard())\n\n\[email protected]_handler(commands=[\"Login\"])\ndef auto_log(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=True, resize_keyboard=True)\n btn_1 = types.KeyboardButton(text=\"Регистрация/Авторизация\", request_contact=True)\n btn_2 = types.KeyboardButton(text='/home')\n keyboard.add(btn_1)\n keyboard.add(btn_2)\n bot.send_message(message.chat.id, \"Привет! Нажми на кнопку, чтобы пройти Регистрацию/Авторизацию.\", reply_markup=keyboard)\n\n\[email protected]_handler(content_types=[\"contact\"])\ndef contact(message):\n global check, score\n if message.contact is not None:\n contact_one = message.contact\n us_id = \"%s\" % contact_one.user_id\n\n args = int(us_id)\n cursor.execute(\"\"\" SELECT User_ID FROM game WHERE User_ID = ? \"\"\", [args])\n row = cursor.fetchone()\n\n if row == None:\n cursor.execute('insert into game (User_id, Score, pin) values (?, ?, 0)', (us_id, score))\n print(cursor.execute('select * from game').fetchall())\n conn.commit()\n bot.send_message(message.chat.id, \"Вы успешно Зарегистрировались/Авторизовались!\", reply_markup=keyboard())\n check = True\n return check\n else:\n bot.send_message(message.chat.id, 'Вы уже Зарегистрированы!', reply_markup=keyboard())\n check = True\n return check\n\n\[email protected]_handler(commands=['home'])\ndef home(message):\n bot.send_message(message.chat.id, 'Вы находитесь на домашней странице!', reply_markup=keyboard())\n print(check)\n\n\ndef get_info(message):\n uid_new = message.text\n us_id = str(message.from_user.id)\n print(uid_new)\n\n cursor.execute(\"SELECT Score from game WHERE User_ID=?\", [us_id])\n row = cursor.fetchone()\n us_score = row[0]\n\n if uid_new == '1' and us_score >= 1000:\n cursor.execute('''UPDATE game SET business = 'Магнит' WHERE User_ID = ?''', [us_id])\n cursor.execute(''' update game set Score=Score-1000 where User_ID=?''', [us_id])\n conn.commit()\n bot.send_message(message.chat.id, 'Вы преобрели: Магнит', reply_markup=keyboard())\n elif uid_new == '2' and us_score >=2500:\n cursor.execute('''UPDATE game SET business = 'Пятёрочка' WHERE User_ID = ?''', [us_id])\n cursor.execute(''' update game set Score=Score-2500 where User_ID=?''', [us_id])\n conn.commit()\n bot.send_message(message.chat.id, 'Вы преобрели: Пятёрочку', reply_markup=keyboard())\n elif uid_new == '3' and us_score >= 5000:\n cursor.execute('''UPDATE game SET business = 'DNS' WHERE User_ID = ?''', [us_id])\n cursor.execute(''' update game set Score=Score-5000 where User_ID=?''', [us_id])\n conn.commit()\n bot.send_message(message.chat.id, 'Вы преобрели: DNS', reply_markup=keyboard())\n else:\n bot.send_message(message.chat.id, 'Недостаточно монет!', reply_markup=keyboard())\n\n\ndef get_id(message):\n text = message.text\n serv = 'http://api.warface.ru/user/stat/?name=%s&server=1' % text\n print(serv)\n r = requests.get(serv)\n t = str(r)\n data = r.json()\n if t == '<Response [400]>':\n serv = 'http://api.warface.ru/user/stat/?name=%s&server=2' % text\n print(serv)\n r = requests.get(serv)\n t = str(r)\n if t == '<Response [400]>':\n serv = 'http://api.warface.ru/user/stat/?name=%s&server=3' % text\n print(serv)\n r = requests.get(serv)\n t = str(r)\n if t == '<Response [200]>':\n data = r.json()\n print(data)\n nick = data['nickname']\n rank = data['rank_id']\n exp = data['experience']\n try:\n clan = data['clan_name']\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Чарли' + '\\n' + 'Ранг: ' + str(\n rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: ' + str(clan),\n reply_markup=keyboard())\n except KeyError:\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Чарли' + '\\n' + 'Ранг: ' + str(\n rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: No',\n reply_markup=keyboard())\n else:\n bot.send_message(message.chat.id, 'Игрок скрыл свою статистику или данный Ник не найден.', reply_markup=keyboard())\n print('error')\n\n elif t == '<Response [200]>':\n data = r.json()\n print(data)\n nick = data['nickname']\n rank = data['rank_id']\n exp = data['experience']\n try:\n clan = data['clan_name']\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Браво' + '\\n' + 'Ранг: ' + str(\n rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: ' + str(clan),\n reply_markup=keyboard())\n except KeyError:\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Браво' + '\\n' + 'Ранг: ' + str(\n rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: No',\n reply_markup=keyboard())\n\n elif t == '<Response [200]>':\n data = r.json()\n print(data)\n nick = data['nickname']\n rank = data['rank_id']\n exp = data['experience']\n try:\n time.sleep(5)\n clan = data['clan_name']\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Альфа' + '\\n' + 'Ранг: ' + str(rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: ' + str(clan),\n reply_markup=keyboard())\n except KeyError:\n bot.send_message(message.chat.id,\n 'Ник: ' + str(nick) + '\\n' + 'Сервер: Альфа' + '\\n' + 'Ранг: ' + str(rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: No',\n reply_markup=keyboard())\n\n # bot.send_message(message.chat.id,\n # 'Ник: ' + str(nick) + '\\n' + 'Сервер: Альфа' + '\\n' + 'Ранг: ' + str(rank) + '\\n' + 'Exp: ' + str(exp) + '\\n' + 'Клан: ' + str(clan),\n # reply_markup=keyboard())\n\n\ndef get_money(message):\n text = message.text\n us_id = str(message.from_user.id)\n cursor.execute(\"SELECT pin from game WHERE User_ID=?\", [us_id])\n row = cursor.fetchone()\n us_bus = str(row[0])\n print(us_bus)\n\n if text == 'stell' and us_bus == '0':\n cursor.execute(''' update game set Score=Score+100 where User_ID=?''', [us_id])\n cursor.execute(''' update game set pin=1 where User_ID=?''', [us_id])\n conn.commit()\n bot.send_message(message.chat.id, 'Пин-код успешно активирован!', reply_markup=keyboard())\n elif us_bus == '1':\n bot.send_message(message.chat.id, 'Вы же активировали пин-код!', reply_markup=keyboard())\n else:\n bot.send_message(message.chat.id, 'Вы ввели некорректный пин-код!', reply_markup=keyboard())\n\n\[email protected]_handler(commands=['job'])\ndef my_funk(message):\n us_id = str(message.from_user.id)\n cursor.execute(\"SELECT business from game WHERE User_ID=?\", [us_id])\n row = cursor.fetchone()\n us_bus = str(row[0])\n print(us_bus)\n if us_bus == 'Магнит':\n bot.send_message(message.chat.id, 'Вы начали работу! Приходите через 24часа!', reply_markup=keyboard())\n time.sleep(60) #86400 - 24 часа\n cursor.execute(''' update game set Score=Score+100 where User_ID=?''', [us_id])\n conn.commit()\n elif us_bus == 'Пятёрочка':\n bot.send_message(message.chat.id, 'Вы начали работу! Приходите через 24часа!', reply_markup=keyboard())\n time.sleep(60) #86400 - 24 часа\n cursor.execute(''' update game set Score=Score+500 where User_ID=?''', [us_id])\n conn.commit()\n elif us_bus == 'DNS':\n bot.send_message(message.chat.id, 'Вы начали работу! Приходите через 24часа!', reply_markup=keyboard())\n time.sleep(60) #86400 - 24 часа\n cursor.execute(''' update game set Score=Score+1000 where User_ID=?''', [us_id])\n conn.commit()\n else:\n bot.send_message(message.chat.id, 'Купите бизнес, чтобы начать получать прибыль!', reply_markup=keyboard())\n\n\[email protected]_handler(content_types=['text'])\ndef get_text_message(message):\n global gg, check\n text = message.text\n if text == 'Привет' or text == 'привет':\n bot.send_message(message.chat.id, 'Хааааай', reply_markup=keyboard())\n elif text == 'Ваш баланс' or text == 'Баланс':\n try:\n us_id = str(message.from_user.id)\n cursor.execute(\"SELECT Score from game WHERE User_ID=?\", [us_id])\n row = cursor.fetchone()\n us_score = row[0]\n bot.send_message(message.chat.id, 'Ваш баланс: ' + str(us_score) + ' монет.')\n print(row[0])\n except TypeError:\n bot.send_message(message.chat.id, 'Зарегистрируйтесь/Войдите')\n elif text == 'Купить':\n sent = bot.send_message(message.chat.id, 'Выберите бизнес:\\n1- Магнит\\n2- Пятёрочка\\n3- DNS')\n bot.register_next_step_handler(sent, get_info)\n # us_id = str(message.from_user.id)\n # cursor.execute(\"SELECT business from game WHERE User_ID=?\", [us_id])\n # row = cursor.fetchone()\n # us_buy = str(row[0])\n # print(us_buy)\n #\n # if us_buy != 'None':\n # pass\n # elif gg == False:\n # bot.send_message(message.from_user.id, \"Выберите бизнес:\\n1 Магнит\\n2 Пятёрочка\\n3 DNS\")\n #\n # gg = True\n # print(gg)\n # if gg == True:\n # bot.send_message(message.from_user.id, \"Вы приобрели бизнес!\")\n\n elif text == 'Мои Бизнесы' or text == 'Бизнесы':\n us_id = str(message.from_user.id)\n cursor.execute(\"SELECT business from game WHERE User_ID=?\", [us_id])\n row = cursor.fetchone()\n us_bus = str(row[0])\n if us_bus == 'None':\n bot.send_message(message.chat.id, 'У Вас нет бизнесов.')\n else:\n bot.send_message(message.chat.id, 'Ваши бизнесы: ' + str(us_bus))\n print(row[0])\n elif text == 'Получить монеты':\n sent = bot.send_message(message.chat.id, 'Введите пин-код:')\n bot.register_next_step_handler(sent, get_money)\n elif text == 'Help' or text == 'Помощь':\n bot.send_message(message.chat.id, 'Помощь во всём!!!', reply_markup=keyboard())\n elif text == 'Данные игрока':\n sent = bot.send_message(message.chat.id, 'Введите ник игрока.')\n bot.register_next_step_handler(sent, get_id)\n else:\n bot.send_message(message.chat.id, 'Я тебя не понимаю!', reply_markup=keyboard())\n\n\ndef keyboard():\n markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)\n btn_1 = types.KeyboardButton('Help')\n btn_2 = types.KeyboardButton('/Login')\n btn_3 = types.KeyboardButton('Баланс')\n btn_4 = types.KeyboardButton('Купить')\n btn_5 = types.KeyboardButton('Мои Бизнесы')\n btn_6 = types.KeyboardButton('Получить монеты')\n btn_7 = types.KeyboardButton('/job')\n btn_8 = types.KeyboardButton('Данные игрока')\n markup.add(btn_1, btn_3)\n markup.add(btn_4, btn_5)\n markup.add(btn_6, btn_7)\n markup.add(btn_2, btn_8)\n return markup\n\n\n\n\n\nbot.polling(none_stop=True, interval=0)\n" }, { "alpha_fraction": 0.6707960367202759, "alphanum_fraction": 0.6730901598930359, "avg_line_length": 31.288888931274414, "blob_id": "d8429d56aca48fba45bd13a7d4a7a1cab1822899", "content_id": "707ed30d7f95f7020858134894cd14a9df7aca54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4359, "license_type": "no_license", "max_line_length": 77, "num_lines": 135, "path": "/venv/Lib/site-packages/oops/publishers.py", "repo_name": "stellandyt/cwservbot", "src_encoding": "UTF-8", "text": "# Copyright (c) 2011, Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, version 3 only.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n# GNU Lesser General Public License version 3 (see the file LICENSE).\n\n\"\"\"Generic publisher support and utility code.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n__metaclass__ = type\n\n__all__ = [\n 'pprint_to_stream',\n 'publish_with_fallback',\n 'publish_to_many',\n ]\n\nfrom hashlib import md5\nfrom pprint import pformat\n\n\ndef pprint_to_stream(stream):\n \"\"\"Pretty print reports to text stream.\n \n Reports will be given an id by hashing the report if none is present.\n \"\"\"\n def pprinter(report):\n report = dict(report)\n output = pformat(report)\n if not report.get('id'):\n report['id'] = md5(output.encode('UTF-8')).hexdigest()\n output = pformat(report)\n stream.write(output)\n stream.write('\\n')\n stream.flush()\n return [report['id']]\n return pprinter\n\n\ndef publish_new_only(publisher):\n \"\"\"Wraps a publisher with a check that the report has not had an id set.\n\n This permits having fallback publishers that only publish if the earlier\n one failed.\n\n For instance:\n\n >>> config.publishers.append(amqp_publisher)\n >>> config.publishers.append(publish_new_only(datedir_repo.publish))\n\n This function is deprecated. Instead please use publish_with_fallback.\n \"\"\"\n def result(report):\n if report.get('id'):\n return None\n return publisher(report)\n return result\n\n\ndef publish_with_fallback(*publishers):\n \"\"\"A publisher to fallback publishing through a list of publishers\n\n This is a publisher, see Config.publish for the calling and return\n conventions. This publisher delegates to the supplied publishers\n by calling them all until one reports that it has published the\n report, and aggregates the results.\n\n :param *publishers: a list of callables to publish oopses to.\n :return: a callable that will publish a report to each\n of the publishers when called.\n \"\"\"\n def result(report):\n ret = []\n for publisher in publishers:\n ret.extend(publisher(report))\n if ret:\n break\n return ret\n return result\n\n\ndef publish_to_many(*publishers):\n \"\"\"A fan-out publisher of oops reports.\n\n This is a publisher, see Config.publish for the calling and return\n conventions. This publisher delegates to the supplied publishers\n by calling them all, and aggregates the results.\n\n If a publisher returns a non-emtpy list (indicating that the report was\n published) then the last item of this list will be set as the 'id' key\n in the report before the report is passed to the next publisher. This\n makes it possible for publishers later in the chain to re-use the id.\n\n :param *publishers: a list of callables to publish oopses to.\n :return: a callable that will publish a report to each\n of the publishers when called.\n \"\"\"\n def result(report):\n ret = []\n for publisher in publishers:\n if ret:\n report['id'] = ret[-1]\n ret.extend(publisher(report))\n return ret\n return result\n\n\ndef convert_result_to_list(publisher):\n \"\"\"Ensure that a publisher returns a list.\n\n The old protocol for publisher callables was to return an id, or\n a False value if the report was not published. The new protocol\n is to return a list, which is empty if the report was not\n published.\n\n This function coverts a publisher using the old protocol in to one that\n uses the new protocol, translating values as needed.\n \"\"\"\n def publish(report):\n ret = publisher(report)\n if ret:\n return [ret]\n else:\n return []\n return publish\n" } ]
21
Jiboxiake/gfe_notebooks
https://github.com/Jiboxiake/gfe_notebooks
6e1c05f55c0b9ec23687adbc8d426de421c25c23
adeaa689cd1d264c8e30d2ffda6aba020e3974c9
0a750dbe1beb2770f290c44c66d9d52ad1b95bad
refs/heads/master
2023-06-27T20:30:50.423817
2021-02-11T14:09:37
2021-02-11T14:16:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6093780398368835, "alphanum_fraction": 0.6336079835891724, "avg_line_length": 40.10887145996094, "blob_id": "58f0a890704562e3e75064c0a47a79fa8f44b9dc", "content_id": "1787150c59b76c7b126d21cca984fb506bbc2305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10194, "license_type": "no_license", "max_line_length": 159, "num_lines": 248, "path": "/common.py", "repo_name": "Jiboxiake/gfe_notebooks", "src_encoding": "UTF-8", "text": "import functools\nimport glob\nimport math\nimport os.path\nimport pandas as pd\nimport re\nimport sqlalchemy as sqla\n\n\ndef import_gfe_dbms(sql_or_table, datasource):\n \"\"\"\n Load the content of a view or table from the database `datasource' into\n a newly created pandas dataframe\n :param sql_or_table: either a table/view to load or a full SQL command\n :return: a pandas dataframe with the content of the loaded table\n \"\"\"\n # Parse the datasource file\n if not datasource.endswith('.sqlite3'): datasource += \".sqlite3\"\n if not os.path.exists(datasource) and datasource.find('/') == -1:\n datasource = 'data/' + datasource\n if not os.path.exists(datasource):\n raise FileNotFoundError(\"The file `\" + datasource + \"' does not exist\")\n\n db = sqla.create_engine(\"sqlite:///\" + datasource)\n\n # connection = db.connect()\n sql_string = sql_or_table.lower() # convert to lower case\n if(not sql_string.startswith(\"select\") and not sql_string.startswith(\"with\")):\n sql_string = \"SELECT * FROM \" + sql_string # the argument is simply a table or a view\n df = pd.read_sql(sql_string, db)\n\n # replace the column client_graph (full path to the graph) with simply the graph name\n if \"client_graph\" in df.columns:\n def get_graph_name(absolute_path):\n filename = os.path.basename(absolute_path)\n # remove the extension\n if filename.endswith('properties'): filename = os.path.splitext(filename)[0]\n return filename\n df[\"graph\"] = df[\"client_graph\"].apply(get_graph_name)\n df[\"graph\"] = df[\"graph\"].apply(lambda x: re.sub(r'-dense$', '', x)) # remove the suffix -dense\n # replace client_graph with graph\n indexOf = df.columns.to_list().index(\"client_graph\")\n df.drop([\"client_graph\"], axis=1, inplace=True)\n df = df[ df.columns[:indexOf].to_list() + [\"graph\"] + df.columns[indexOf: -1].to_list() ]\n\n # replace the measured times with a TimeDelta (depending on the column suffix)\n units = {\n \"seconds\": [\"sec\", \"secs\", \"second\", \"seconds\"],\n \"milliseconds\": [\"millisec\", \"millisecs\", \"millisecond\", \"milliseconds\"],\n \"microseconds\": [\"usec\", \"usecs\", \"microsec\", \"microsecs\", \"microsecond\", \"microseconds\"]\n }\n for column in df.columns:\n column = str(column)\n indexOf = column.rfind(\"_\")\n if indexOf == -1: continue\n suffix = column[indexOf +1:].lower()\n unit = None\n for u in units:\n for c in units[u]:\n if suffix == c:\n unit = u\n break\n if unit is not None:\n df[column] = df[column].apply(lambda x: pd.to_timedelta(x, unit=unit) if x != 0 else pd.NaT)\n df.rename(columns={column: column[:indexOf]}, inplace=True)\n\n # add a convenience column in plain secs, to ease aggregation\n columns = df.columns; # original columns\n for index, column in reversed(list(enumerate( df.columns ))):\n if pd.api.types.is_timedelta64_dtype( df[column] ):\n df[column + \"_secs\"] = df[column].apply(lambda x: x.total_seconds()) # create the new column\n columns = columns.insert(index +1, column + \"_secs\") # reposition the new column just after the previous one\n df = df[ columns ] # reorder the columns \n \n # connection.close()\n return df\n\n\ndef import_gfe(sql_or_table):\n \"\"\"\n From 15/Oct/2021, we use again only a single database to store all results\n\n :param sql_or_table: the SQL query to execute over the databases\n :return: a new dataframe, representing the result of the query\n \"\"\"\n return import_gfe_dbms(sql_or_table, \"data/data21.sqlite3\")\n\n\n# def import_gfe(sql_or_table):\n# \"\"\"\n# Execute the query against all databases in the directory data/data* and report a new\n# data frame with the concatenation of all result sets obtained\n#\n# :param sql_or_table: the SQL query to execute over the databases\n# :return: a new dataframe, representing the result of the query\n# \"\"\"\n# list_data_frames = []\n# for database in glob.glob('data/data*.sqlite3'):\n# df = import_gfe_dbms(sql_or_table, database)\n# database = os.path.basename(database).replace('.sqlite3', '')\n# df.insert(0, \"database\", database) # add an attribute `database' with the name of the database\n# list_data_frames.append(df)\n#\n# result = pd.concat(list_data_frames, ignore_index=True)\n# return result\n\n\ndef import_graphmat(path_csv = 'data/graphmat/results.csv'):\n \"\"\"\n Retrieve a dataframe with the results from graphmat, as stored in results_graphmat.csv\n :return: a DataFrame with the results from graphmat\n \"\"\"\n graphmat_csv = pd.read_csv(path_csv)\n graphmat = pd.concat( [\n graphmat_csv.iloc[:, :-3],\n graphmat_csv.iloc[:, -3:].applymap(lambda x: pd.to_timedelta(x, 'seconds')) ],\n axis=\"columns\")\n\n # t_startup_pec is the percentage of t_makespan - t_processing\n graphmat[\"t_startup_perc\"] = ( graphmat[\"t_makespan\"] - graphmat[\"t_processing\"] ) / graphmat[\"t_makespan\"] * 100.0\n graphmat.sort_values([\"algorithm\", \"graph\"], inplace=True)\n\n return graphmat\n\n\ndef prepare_barchart(df, col_x_axis, col_group, col_y_axis):\n \"\"\"\n\n Example: prepare_barchart(\"graph\", \"library\", \"median\")\n\n :param df:\n :param col_x_axis: the column for the x axis (e.g. \"graph\")\n :param col_group: the attribute with the groups (e.g. \"library\")\n :param col_y_axis: the column fo the y axis is the actual measurement (e.g. completion_time)\n :return: a pandas DataFrame with the above specified format\n \"\"\"\n df = df.copy() # silent the warning SettingWithCopy\n\n convert_to_timedelta = False\n df[\"_ct\"] = df.loc[:, col_y_axis]\n try:\n df[\"_ct\"] = df[\"_ct\"].apply(lambda x: x.total_seconds())\n convert_to_timedelta = True\n except AttributeError:\n pass\n\n agg = df.groupby([col_x_axis, col_group]).agg(\n time = pd.NamedAgg(\n column=\"_ct\", aggfunc=\"median\"\n )\n )\n df.drop(\"_ct\", axis=1, inplace=True)\n agg.reset_index(inplace=True)\n\n tbl_final = None\n for group_name in agg[col_group].unique():\n # select the relevant data\n tbl = agg[agg[col_group] == group_name][[col_x_axis, \"time\"]]\n tbl.set_index(col_x_axis, inplace=True)\n if convert_to_timedelta:\n tbl = tbl.apply(lambda x: pd.to_timedelta(x, unit=\"seconds\"))\n tbl.rename({\"time\": group_name}, axis=1, inplace=True)\n tbl_final = pd.DataFrame(tbl) if tbl_final is None else pd.concat([tbl_final, tbl], axis=1, sort=True)\n\n # sort the attributes / libraries\n tbl_final = tbl_final[ sorted(tbl_final.columns.to_list()) ]\n\n return tbl_final\n\n\ndef aging_medians(df = None):\n '''\n Return for each library, graph and parallelism degree, the execution (exec_id) that accomplished the median throughput. The\n exec_id can be further used to pick the execution to portray in the plot for the throughput over time.\n \n :param df: an instance of view_updates_throughput, properly filtered\n :return: a table with the median throughput of each execution\n '''\n if df is None:\n df = import_gfe(\"view_updates\")\n\n # compute the median of each group\n def compute_median(group):\n num_samples = len(group)\n df = group.sort_values(\"throughput\")\n df = df.reset_index(drop=True)\n df = df.loc[ math.floor( num_samples / 2 ) ];\n df[\"count\"] = num_samples\n df[\"mem_gb\"] = round( df[\"memory_footprint_bytes\"] / 1024 / 1024 / 1024, 2);\n df = df[[\"exec_id\", \"throughput\", \"mem_gb\", \"completion_time\", \"count\", \"timeout_hit\"]]\n return df\n\n return df.groupby([\"aging\", \"library\", \"graph\", \"num_threads\"]).apply(compute_median)\n\n\ndef aging_execid_progress(df):\n '''\n Return for each library, graph and parallelism degree, the execution (exec_id) that accomplished the average execution time\n in view_updates_progress\n\n :param df: an instance of view_updates_progress, properly filtered\n :return: a matrix where the rows are pair <library, graph>, the columns the parallelism degree, and the component is exec_id with the median execution time\n '''\n\n df = df.copy() # silent the warning SettingWithCopy\n m = df[\"progress\"].max() # max aging coefficient\n medians = df[(df[\"aging\"] == m) & (df[\"progress\"] == m)].\\\n groupby([\"library\", \"graph\", \"num_threads\"]).\\\n aggregate(completion_time=pd.NamedAgg(\n column='completion_time',\n aggfunc=functools.partial(pd.Series.quantile, interpolation='nearest')\n ))\n\n join = pd.merge(medians, df) # library, graph, num_threads and completion_time are the only columns in common\n\n # in case there are multiple exec_id with the same execution time, select only one, the one with the min exec_id\n join = join.groupby([\"library\", \"graph\", \"num_threads\"]).agg(exec_id=('exec_id', 'min')).reset_index()\n\n # Use fill_value=\"NaN\" to avoid converting everything to fload and obtaining decimal IDs such as 1234.0\n return join.pivot_table(index=(\"library\", \"graph\"), columns=\"num_threads\", values=\"exec_id\", fill_value=\"NaN\")\n\n\ndef edges_per_graph():\n '''\n Retrieve the number of vertices and edges in each graph evaluated\n '''\n \n data = pd.DataFrame({\n \"graph\": [\"dota-league\", \"graph500-22\", \"uniform-22\", \"graph500-24\", \"uniform-24\", \"graph500-25\", \"uniform-25\", \"graph500-26\", \"uniform-26\"],\n \"num_vertices\": [61170, 2396657, 2396657, 8870942, 8870942, 17062472, 17062472, 32804978, 32804978],\n \"num_edges\": [50870313, 64155735, 64155735, 260379520, 260379520, 523602831, 523602831, 1051922853, 1051922853]\n })\n data = data.set_index(\"graph\")\n return data\n\n\ndef fmtlabel(value):\n '''\n The numeric label to be shown at the top of a bar chart\n '''\n if(value >= 10 ** 9):\n return \"{:.2f} G\".format(value / 10**9)\n elif(value >= 10 ** 6):\n return \"{:.2f} M\".format(value / 10**6)\n elif(value >= 10 ** 3):\n return \"{:.2f} k\".format(value / 10**3)\n else:\n return \"{:.2f}\".format(value)" }, { "alpha_fraction": 0.7698224782943726, "alphanum_fraction": 0.7857987880706787, "avg_line_length": 92.77777862548828, "blob_id": "565446ca31877d82461683665d4e664733f1207f", "content_id": "284650b61a3d99bf4d2c44accfd7782f6ba103cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1690, "license_type": "no_license", "max_line_length": 235, "num_lines": 18, "path": "/README.md", "repo_name": "Jiboxiake/gfe_notebooks", "src_encoding": "UTF-8", "text": "# Notebooks for the GFE experiments\n\nThis repository contains the notebooks used to analyse and to generate the plots featured in the [GFE Driver](https://github.com/cwida/gfe_driver). Some notebooks are in the Jupyter format and some in Mathematica v12.1 (sorry!).\n\nAfter downloading repository, fetch the database with the results from [Zenodo](https://zenodo.org/record/4534418) and place it into data/data21.sqlite3. It is a 600 MB database and it was a bit too much to store it in this repository.\n\nThe content of this repository: \n\n* automerge.pl: a script to load the results of new executions of the [GFE Driver](https://github.com/cwida/gfe_driver) into the database data/data21.sqlite3.\n* bm.nb: the notebook (Mathematica) to generate the plot of Figure 9 in the paper.\n* example.ipynb: a sample notebook to analyse the results for the experiments with insertions in Jupyter.\n* gapbs_speedup.ipynb: to generate the plot of Figure 8 in the paper. This is the difference in completion time of the native algorithms shipped by Stinger, LLAMA and GraphOne versus those provided by the GAP BS. \n* graphalytics_data.ipynb: sample notebook to visualize the results of Graphalytics.\n* graphalytics_gen_table.ipynb: the notebook used to generate Table 3 in the paper, that is, the results from Graphalytics.\n* insertions.nb: the notebook (Mathematica) to generate the plot of Figure 6 in the paper.\n* pip_freeze.txt: dependendencies for the Python environment and Jupyter.\n* updates.nb: the notebook (Mathematica) to generate the plot of Figure 7 in the paper.\n* views.sql: list of supplementary SQL views (already loaded in data/data21.sqlite3) to query the results of the experiments. \n\n" }, { "alpha_fraction": 0.6594749689102173, "alphanum_fraction": 0.6733554601669312, "avg_line_length": 40.51356887817383, "blob_id": "b4c2c56b8df92116f33a45dc0282ab8e53c74422", "content_id": "a585b5e7cd06e6d4c9619d63e626e43cba1fc53d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 19884, "license_type": "no_license", "max_line_length": 199, "num_lines": 479, "path": "/views.sql", "repo_name": "Jiboxiake/gfe_notebooks", "src_encoding": "UTF-8", "text": "/**\n * Clean up\n * We're going to recreate all views in the following...\n */\nDROP VIEW IF EXISTS view_latency_inserts;\nDROP VIEW IF EXISTS view_latency_updates;\nDROP VIEW IF EXISTS view_graphalytics_inserts; /* dependency on view_insert_only */\nDROP VIEW IF EXISTS view_inserts;\nDROP VIEW IF EXISTS view_graphalytics_updates; /* dependency on view_aging */\nDROP VIEW IF EXISTS view_updates_progress;\nDROP VIEW IF EXISTS view_updates;\nDROP VIEW IF EXISTS view_updates0;\nDROP VIEW IF EXISTS view_graphalytics_load;\nDROP VIEW IF EXISTS view_executions; /* Keep at the end due to dependencies */\n\n/**\n * The first view to create!\n * view_executions reports all parameters of the experiment, the type, the start and end time,\n * the machines where the experiment was executed, and so on\n */\nCREATE VIEW view_executions AS\nSELECT\n e.id AS exec_id,\n CASE\n WHEN(is_insert_only.id IS NOT NULL) THEN 'insert_only'\n WHEN(is_aging.id IS NOT NULL) THEN CASE WHEN(aging_impl IS NULL) THEN 'aging1' ELSE 'aging2' END\n WHEN(e.load IS NOT NULL AND CAST(e.load AS INT)) THEN 'load'\n ELSE NULL\n END AS 'experiment',\n e.library,\n COALESCE(CAST(e.aging AS REAL), 0.0) AS 'aging',\n COALESCE(CAST(e.aging_cooloff AS INT), 0) AS 'aging_cooloff_secs',\n COALESCE(CAST(e.aging_release_memory AS INT), /* default = true */ 1) AS 'aging_release_memory',\n COALESCE(CAST(e.aging_step_size AS REAL), 1.0) AS 'aging_step',\n COALESCE(CAST(e.aging_timeout AS INT), 0) AS 'aging_timeout',\n COALESCE(CAST(e.batch AS INT), 0) AS 'batch_sz',\n CASE\n WHEN(e.compiler IS NULL) THEN NULL\n WHEN(e.compiler = 'clang') THEN ('Clang ' || e.compiler_major || '.' || e.compiler_minor || (CASE (e.compiler_patch) WHEN NULL THEN '' WHEN '0' THEN '' ELSE ('.' || e.compiler_patch) END) )\n WHEN(e.compiler = 'gcc') THEN ('GCC ' || e.compiler_major || '.' || e.compiler_minor || (CASE (e.compiler_patch) WHEN NULL THEN '' WHEN '0' THEN '' ELSE ('.' || e.compiler_patch) END) )\n WHEN(e.compiler = 'icc') THEN ('Intel ICC ' || e.compiler_major || '.' || e.compiler_minor || (CASE (e.compiler_patch) WHEN NULL THEN '' WHEN '0' THEN '' ELSE ('.' || e.compiler_patch) END) )\n ELSE e.compiler || ' ' || e.compiler_major || '.' || e.compiler_minor || (CASE (e.compiler_patch) WHEN NULL THEN '' WHEN '0' THEN '' ELSE ('.' || e.compiler_patch) END)\n END AS 'compiler',\n e.compiler AS 'compiler_family',\n CAST(e.compiler_major AS INTEGER) AS 'compiler_major',\n CAST(e.compiler_minor AS INTEGER) AS 'compiler_minor',\n CAST(e.compiler_patch AS INTEGER) AS 'compiler_patch_level',\n COALESCE((SELECT 1 FROM aging_intermediate_memory_usage_v2 mem WHERE mem.exec_id = e.id), /* false */ 0) AS memfp,\n COALESCE(CAST(e.aging_memfp_report AS INT), 0) AS memfp_report,\n COALESCE(CAST(e.aging_memfp_physical AS INT), /* by default, yes */ 1) AS memfp_rss,\n COALESCE(CAST(e.aging_memfp_threshold AS INT), 0) AS memfp_threshold_bytes,\n CAST(e.num_repetitions AS INT) AS 'num_repetitions',\n CAST(e.num_threads_read AS INT) AS 'num_threads_read',\n CAST(e.num_threads_write AS INT) AS 'num_threads_write',\n -- Also transform num_threads_omp = 0 => NULL for compatibility with the notebook already created with Stinger\n CASE WHEN(e.num_threads_omp IS NULL OR e.num_threads_omp = '0') THEN NULL ELSE CAST(e.num_threads_omp AS INT) END AS 'num_threads_omp',\n COALESCE(CAST(e.measure_latency AS INT), 0) AS 'has_latency',\n COALESCE(CAST(e.num_validation_errors AS INT), -1) AS 'num_validation_errors',\n COALESCE(CAST(e.build_frequency AS INT), 0) AS 'build_frequency_millisecs',\n COALESCE(CAST(e.timeout AS INT), 0) AS 'timeout',\n COALESCE(CAST(e.directed AS INT), /* assume directed */ 1) AS 'is_directed',\n e.graph AS 'client_graph',\n COALESCE(role, 'client-server') AS 'mode', /* obsolete property */\n e.client_host AS 'client_host',\n e.server_host AS 'server_host',\n COALESCE(CAST(e.server_port AS INT), -1) AS 'server_port',\n COALESCE(e.hostname, e.server_host) AS 'hostname',\n CASE WHEN(e.server_host IS NULL) THEN\n CASE WHEN(INSTR(e.hostname, \"stones2\") > 0) THEN 'stones2'\n WHEN(INSTR(e.hostname, \"rocks2\") > 0) THEN 'rocks2'\n WHEN(INSTR(e.hostname, \"diamonds\") > 0) THEN 'diamonds'\n WHEN(INSTR(e.hostname, \"bricks\") > 0) THEN 'bricks'\n ELSE 'unknown' END\n ELSE\n CASE WHEN(INSTR(e.server_host, \"stones2\") > 0) THEN 'stones2'\n WHEN(INSTR(e.server_host, \"rocks2\") > 0) THEN 'rocks2'\n WHEN(INSTR(e.server_host, \"diamonds\") > 0) THEN 'diamonds'\n WHEN(INSTR(e.server_host, \"bricks\") > 0) THEN 'bricks'\n ELSE 'unknown' END\n END AS 'cluster',\n e.git_commit AS 'git_commit',\n COALESCE(aging_impl, 'version_1') AS 'aging_impl',\n e.time_start,\n e.time_end\nFROM executions e\n LEFT JOIN insert_only is_insert_only ON is_insert_only.exec_id = e.id\n LEFT JOIN aging is_aging ON is_aging.exec_id = e.id\n/*\n 17/Mar/2019 - Remove all the executions of GraphOne with S.F. 26 that were executed before this date. There was an\n issue with the vertex dictionary. It is statically allocated in the GraphOne library (not in the driver) and it did\n not have enough space to store all vertices, causing a memory overflow.\n Bug fixed in commit 0143b4ec on 17/Mar/2019, graphone library (not gfe driver), branch feature/gfe\n */\nWHERE\n NOT (graph LIKE '%-26.properties' AND library LIKE 'g1-%' AND time_start < '2020-03-17')\n;\n\n/**\n * Retrieve the results from the experiment 'insert_only'\n * That is how long it took for a library to load the graph using `num_threads' and\n * only relying on calls to insert a vertex and an edge at the time.\n */\nCREATE VIEW view_inserts AS\nSELECT\n e.exec_id,\n e.cluster,\n e.hostname,\n e.mode,\n e.library, /* the implementation tested */\n e.client_graph, /* the graph used, full absolute path relative to the client_host */\n e.is_directed, /* is the graph directed ? */\n -- e.batch_sz, /* if the insertions were sent from the client to the server in batches, report the size of each batch in terms of number of edges */\n e.compiler_family,\n e.compiler,\n e.num_threads_write AS num_threads, /* num threads used in the client/server */\n e.num_threads_omp AS omp_threads, /* num threads configured to be used in OMP */\n e.num_validation_errors, /* -1 => no validations performed; otherwise it's the number of edges missing from the original graph */\n e.has_latency, /* whether the experiment measured also the latency of the operations */\n io.scheduler, /* implementation detail, the internal algorithm used in the experiment to schedule the insertions among the used threads */\n CASE /* before 25/11/2019, the parameter --build_frequency was ignore and only one snapshot was created at the end of the experiment */\n WHEN(CAST(io.revision AS INTEGER) < 20191125) THEN 0\n ELSE e.build_frequency_millisecs\n END AS 'build_frequency_millisecs', /* how often manually create a new level/snapshot/delta, invoking the method #build() */\n io.num_build_invocations, /* the total number of levels/snapshots/deltas created, by manually invoking the method #build() */\n io.num_snapshots_created, /* total number of levels/snapshots/deltas created, either by explicitly invoking the method #build() or implicitly by the library */\n io.insertion_time AS insertion_time_usecs, /* total time to insert the elements, excl. building the final snapshot */\n 100.0 * (io.insertion_time) / (io.insertion_time + io.build_time) AS insertion_time_perc,\n io.build_time AS build_time_usecs, /* total time to build the *last* snapshot in LLAMA */\n 100.0 * (io.build_time) / (io.insertion_time + io.build_time) AS build_time_perc,\n io.insertion_time + io.build_time AS completion_time_usecs /* microsecs */\nFROM view_executions e JOIN insert_only io ON(e.exec_id = io.exec_id)\nWHERE e.mode = 'standalone'\n;\n\n/**\n * Retrieve the results from the experiment `graphalytics', after the graph\n * has been constructed with only inserts (no updates, no aging)\n */\nCREATE VIEW view_graphalytics_inserts AS\nSELECT\n i.library,\n i.cluster,\n i.client_graph,\n i.is_directed,\n i.compiler_family,\n i.compiler,\n i.build_frequency_millisecs, /* this needs to be accounted for llama */\n i.num_snapshots_created, /* as above */\n e.num_threads_read AS num_threads_read,\n e.num_threads_write AS num_threads_write,\n e.num_threads_omp AS omp_threads,\n s.type AS algorithm,\n s.mean AS mean_usecs,\n s.median AS median_usecs,\n s.min AS min_usecs,\n s.max AS max_usecs,\n s.p90 AS p90_usecs,\n s.p95 AS p95_usecs,\n s.p97 AS p97_usecs,\n s.p99 AS p99_usecs,\n s.num_trials,\n s.num_timeouts,\n s.num_trials = s.num_timeouts AS is_all_timeout\nFROM view_inserts i\n JOIN statistics s ON( i.exec_id = s.exec_id )\n JOIN view_executions e ON ( i.exec_id = e.exec_id )\n;\n\n/**\n * Final results from the `aging' experiment. Report the amount to insert/delete (updates) the given graph\n * by performing `aging'x times updates\n */\nCREATE VIEW view_updates AS\nWITH tp3 AS (\n SELECT exec_id, MAX(num_operations) / MAX(second) AS throughput\n FROM aging_intermediate_throughput3\n GROUP BY (exec_id)\n),\n /*\n Get the final (last recorded) memory usage for the execution\n */\n mem3 AS (\n SELECT mem1.exec_id, (memfp_process - memfp_driver) AS memory_footprint\n FROM\n ( SELECT exec_id, MAX(tick) AS tick FROM aging_intermediate_memory_usage_v2 WHERE cooloff = 0 GROUP BY (exec_id) ) AS mem0\n JOIN aging_intermediate_memory_usage_v2 mem1\n WHERE mem0.exec_id = mem1.exec_id AND mem0.tick = mem1.tick\n )\nSELECT\n e.exec_id,\n e.cluster,\n e.library,\n e.aging,\n e.client_graph,\n e.is_directed,\n e.compiler_family,\n e.compiler,\n a.num_threads,\n e.num_threads_omp AS omp_threads,\n e.has_latency,\n CASE WHEN (a.has_terminated_for_timeout ) THEN 'timeout'\n WHEN (a.has_terminated_for_memfp) THEN 'memory_overflow'\n ELSE 'completed'\n END AS outcome,\n a.num_updates AS num_edge_updates,\n e.build_frequency_millisecs,\n a.num_build_invocations,\n a.num_snapshots_created,\n a.completion_time AS completion_time_usecs,\n CASE WHEN ( a.has_terminated_for_timeout OR a.has_terminated_for_memfp ) THEN\n tp3.throughput\n ELSE\n a.num_updates / (a.completion_time /* microsecs, convert in secs */ / 1000 / 1000)\n END AS throughput,\n /*\n If driver_release_memory = 1 (true), then the driver released the log buffers while running the experiment.\n The execution times may be different when this is enabled or disabled, because of the additional memory\n usage in the process.\n The readings on the memory_footprint will be definitely different however.\n */\n e.aging_release_memory AS driver_release_memory,\n e.memfp AS memfp, -- whether the memory footprint was measured\n e.memfp_rss AS memfp_rss,\n mem3.memory_footprint AS memfp_bytes\nFROM view_executions e\n JOIN aging a ON (e.exec_id = a.exec_id)\n JOIN tp3 ON (e.exec_id = tp3.exec_id)\n LEFT JOIN mem3 ON (e.exec_id = mem3.exec_id)\nWHERE e.mode = 'standalone'\n AND aging_impl IN ('version_2', 'version_3') AND batch_sz = 0\n AND ((NOT a.has_terminated_for_timeout AND NOT a.has_terminated_for_memfp) OR (tp3.throughput IS NOT NULL))\n;\n\n/**\n * Show how long it took to perform 1x, 2x, 3x, ... updates w.r.t. the number of edges in the final graph\n */\nCREATE VIEW view_updates_progress AS\nWITH\n /*\n For some reason, sometimes it does not save in aging_intermediate_throughput the progress for the last chunk (typically 10),\n but we can infer it from the total execution time in the table `aging' (or view_updates). It is not the same as if it was\n reported in the table aging_intermediate_throughput though, especially in delta stores, because in this other case it also\n includes the time to build a new snapshot and terminate the worker threads in the experiment.\n */\n complete_intermediates AS (\n SELECT u.exec_id, ROUND(u.aging / e.aging_step, 2) AS aging_coeff, u.completion_time_usecs AS completion_time\n FROM view_updates u JOIN view_executions e ON (u.exec_id = e.exec_id)\n WHERE NOT EXISTS ( SELECT 1 FROM aging_intermediate_throughput i WHERE u.exec_id = i.exec_id AND ROUND(u.aging / e.aging_step, 2) = i.aging_coeff)\n UNION ALL\n SELECT exec_id, aging_coeff, completion_time\n FROM aging_intermediate_throughput\n ),\n deltas(exec_id, aging_coeff, completion_time, delta) AS (\n SELECT t.exec_id, t.aging_coeff, t.completion_time, t.completion_time AS delta\n FROM complete_intermediates t WHERE t.aging_coeff = 1\n UNION ALL\n SELECT t.exec_id, t.aging_coeff, t.completion_time, t.completion_time - d.completion_time AS delta\n FROM complete_intermediates t, deltas d\n WHERE t.exec_id = d.exec_id AND t.aging_coeff = d.aging_coeff + 1\n )\nSELECT\n e.exec_id,\n e.library,\n e.cluster,\n e.aging,\n e.client_graph,\n e.is_directed,\n e.num_threads_write AS num_threads,\n e.num_threads_omp AS omp_threads,\n e.has_latency,\n ROUND(d.aging_coeff * e.aging_step, 2) AS progress, -- normalise the progress\n d.completion_time AS completion_time_usecs,\n d.delta AS delta_usecs\nFROM view_executions e, deltas d\nWHERE e.exec_id = d.exec_id AND e.aging_impl IN ('version_2', 'version_3') AND e.mode = 'standalone'\n;\n\n/**\n * Report the throughput as edges/sec recorded for the aging (updates) experiment.\n */\nCREATE VIEW view_updates_throughput AS\nSELECT\n e.exec_id,\n e.library,\n e.cluster,\n e.aging,\n e.client_graph,\n e.is_directed,\n e.num_threads_write AS num_threads,\n e.num_threads_omp AS omp_threads,\n e.has_latency,\n a.second,\n a.num_operations,\n COALESCE(a.num_operations - (LAG(a.num_operations) OVER (PARTITION BY a.exec_id ORDER BY a.second)), a.num_operations) as throughput\nFROM aging_intermediate_throughput3 a JOIN view_executions e on a.exec_id = e.exec_id;\n\n/**\n * Report the memory footprint, in bytes, during the updates, every 10 secs.\n */\nCREATE VIEW view_updates_memory_footprint AS\nSELECT\n e.exec_id,\n e.library,\n e.cluster,\n e.aging,\n e.client_graph,\n e.is_directed,\n e.aging_release_memory AS driver_release_memory,\n e.num_threads_write AS num_threads,\n e.num_threads_omp AS omp_threads,\n e.has_latency,\n mem.tick AS second,\n (CAST( ait3.num_operations AS REAL ) / (SELECT u.num_updates FROM aging u WHERE u.exec_id = ait3.exec_id)) AS progress, /* in [0, 1] */\n (mem.memfp_process - mem.memfp_driver) AS memory_usage_bytes\nFROM aging_intermediate_memory_usage_v2 mem\n JOIN view_executions e on mem.exec_id = e.exec_id\n JOIN aging_intermediate_throughput3 ait3 ON (ait3.exec_id = mem.exec_id AND ait3.second = mem.tick)\nWHERE mem.cooloff = 0\n;\n\n/**\n * Retrieve the results from the experiment `graphalytics', after the graph\n * has been constructed with a mix of insertions/updates/deletions\n */\nCREATE VIEW view_graphalytics_updates AS\nSELECT\n u.exec_id,\n u.library,\n u.cluster,\n u.aging,\n u.client_graph,\n u.is_directed,\n u.compiler_family,\n u.compiler,\n e.num_threads_read AS num_threads,\n e.num_threads_omp AS omp_threads,\n s.type AS algorithm,\n s.mean AS mean_usecs,\n s.median AS median_usecs,\n s.min AS min_usecs,\n s.max AS max_usecs,\n s.p90 AS p90_usecs,\n s.p95 AS p95_usecs,\n s.p97 AS p97_usecs,\n s.p99 AS p99_usecs,\n s.num_trials,\n s.num_timeouts,\n s.num_trials = s.num_timeouts AS is_all_timeout\nFROM\n view_updates u\n JOIN statistics s ON( u.exec_id = s.exec_id )\n JOIN view_executions e ON ( s.exec_id = e.exec_id )\n;\n\nCREATE VIEW view_latency_inserts AS\nSELECT\n i.cluster,\n i.library,\n i.client_graph,\n i.is_directed,\n i.num_threads,\n i.omp_threads,\n l.num_operations,\n l.mean AS mean_nanosecs,\n l.median AS median_nanosecs,\n l.min AS min_nanosecs,\n l.max AS max_nanosecs,\n l.p90 AS p90_nanosecs,\n l.p95 AS p95_nanosecs,\n l.p97 AS p97_nanosecs,\n l.p99 AS p99_nanosecs\nFROM view_inserts i JOIN latencies l ON (i.exec_id = l.exec_id)\n;\n\nCREATE VIEW view_latency_updates AS\nSELECT\n u.cluster,\n u.library,\n u.aging,\n u.client_graph,\n u.is_directed,\n u.num_threads,\n u.omp_threads,\n u.num_build_invocations,\n u.num_snapshots_created,\n lIns.num_operations AS num_insertions,\n lIns.mean AS inserts_mean_nanosecs,\n lIns.median AS inserts_median_nanosecs,\n lIns.min AS inserts_min_nanosecs,\n lIns.max AS inserts_max_nanosecs,\n lIns.p90 AS inserts_p90_nanosecs,\n lIns.p95 AS inserts_p95_nanosecs,\n lIns.p97 AS inserts_p97_nanosecs,\n lIns.p99 AS inserts_p99_nanosecs,\n lDel.num_operations AS num_deletions,\n lDel.mean AS deletes_mean_nanosecs,\n lDel.median AS deletes_median_nanosecs,\n lDel.min AS deletes_min_nanosecs,\n lDel.max AS deletes_max_nanosecs,\n lDel.p90 AS deletes_p90_nanosecs,\n lDel.p95 AS deletes_p95_nanosecs,\n lDel.p97 AS deletes_p97_nanosecs,\n lDel.p99 AS deletes_p99_nanosecs,\n lUpd.num_operations AS num_updates,\n lUpd.mean AS updates_mean_nanosecs,\n lUpd.median AS updates_median_nanosecs,\n lUpd.min AS updates_min_nanosecs,\n lUpd.max AS updates_max_nanosecs,\n lUpd.p90 AS updates_p90_nanosecs,\n lUpd.p95 AS updates_p95_nanosecs,\n lUpd.p97 AS updates_p97_nanosecs,\n lUpd.p99 AS updates_p99_nanosecs\nFROM view_updates u\n JOIN latencies lIns ON (u.exec_id = lIns.exec_id AND lIns.type = 'inserts')\n JOIN latencies lDel ON (u.exec_id = lDel.exec_id AND lDel.type = 'deletes')\n JOIN latencies lUpd ON (u.exec_id = lUpd.exec_id AND lUpd.type = 'updates')\n;\n\n/**\n * Profiling of graphalytics (brittle implementation due to the usage of Percent_Rank)\n */\nCREATE VIEW view_graphalytics_profiler_inserts AS\nWITH\n total AS (\n SELECT\n e.library,\n e.cluster,\n e.client_graph,\n e.is_directed,\n e.build_frequency_millisecs, /* this needs to be accounted for llama */\n vi.num_snapshots_created,\n e.num_threads_read AS num_threads_read,\n e.num_threads_write AS num_threads_write,\n e.num_threads_omp AS omp_threads,\n gp.*,\n gp.cache_l1_misses + gp.cache_llc_misses + gp.cache_tlb_misses AS total_misses\n FROM graphalytics_profiler gp\n JOIN view_executions e ON e.exec_id = gp.exec_id\n JOIN view_inserts vi ON vi.exec_id = gp.exec_id\n ),\n ranks AS (\n SELECT *, PERCENT_RANK() OVER(PARTITION BY library, cluster, client_graph, algorithm ORDER BY total_misses) AS rank\n FROM total\n )\nSELECT library, cluster, client_graph, algorithm, cache_l1_misses, cache_llc_misses, cache_tlb_misses\nFROM ranks\nWHERE rank = 0.5\n;\n\n/**\n * Retrieve the results from the experiment `graphalytics', after the graph\n * has been directly loaded from the file\n */\nCREATE VIEW view_graphalytics_load AS\nSELECT\n e.library,\n e.cluster,\n e.client_graph,\n e.is_directed,\n e.compiler_family,\n e.compiler,\n e.num_threads_read AS num_threads_read,\n e.num_threads_omp AS omp_threads,\n s.type AS algorithm,\n s.mean AS mean_usecs,\n s.median AS median_usecs,\n s.min AS min_usecs,\n s.max AS max_usecs,\n s.p90 AS p90_usecs,\n s.p95 AS p95_usecs,\n s.p97 AS p97_usecs,\n s.p99 AS p99_usecs,\n s.num_trials,\n s.num_timeouts,\n s.num_trials = s.num_timeouts AS is_all_timeout\nFROM statistics s\n JOIN view_executions e ON ( s.exec_id = e.exec_id )\nWHERE e.experiment = 'load'\n;" } ]
3
ka233-stack/Diamond-doc-django
https://github.com/ka233-stack/Diamond-doc-django
9111b95579d42087653e7098b727217008a40ac9
72b3a114c4f7d2c18c6b527f1aebdda757232353
98ad908b32595370a35f635aa2ff6fe697b80793
refs/heads/master
2022-12-17T00:46:34.530321
2020-08-16T14:46:00
2020-08-16T14:46:00
286,484,257
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.576106071472168, "alphanum_fraction": 0.5780226588249207, "avg_line_length": 37.36897277832031, "blob_id": "32d8c02b021bf584a9296d0df218614e377b11ba", "content_id": "fab2285c58fce67a5da377b0792841d1161a88f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19581, "license_type": "no_license", "max_line_length": 128, "num_lines": 477, "path": "/JGS/JGS/views.py", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "from JGS.serializer import DocSerializer, DocListSerializer, UserInfoSerializer, UserRegSerializer, GroupCreateSerializer,\\\r\n GroupSerializer, CommentSerializer, CommentCreateSerializer, FavoriteSerializer, BrowseSerializer, ModuleSerializer,\\\r\n GroupLessSerializer, MessageSerializer, MessageCreateSerializer, DocPageSerializer\r\nfrom web_models import models\r\nfrom django.http import HttpResponse\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.pagination import PageNumberPagination\r\nimport uuid\r\n\r\n# Create your views here.\r\n\r\nclass UserAuthentication:\r\n def authenticate(self, request):\r\n\r\n token = request.query_params.get('token')\r\n user = models.Users.objects.filter(token=token).first()\r\n if user:\r\n return (user,token)\r\n else:\r\n return (None,None)\r\n\r\nclass DocView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n def get(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n ser = GroupLessSerializer(instance=doc.group)\r\n dict = ser.data['member']\r\n if (doc.group and ((request.user.id in dict) or request.user == doc.group.leader)) or doc.auth >= 1: # 团队文件且在团队内\r\n result = models.Doc.objects.filter(id=id).first()\r\n ser = DocListSerializer(instance=result, many=False)\r\n return Response(ser.data)\r\n if doc.group is None and doc.author == request.user: # 个人文件\r\n result = models.Doc.objects.filter(id=id).first()\r\n ser = DocListSerializer(instance=result, many=False)\r\n return Response(ser.data)\r\n return Response(\"无权限\")\r\n\r\n def post(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n ser = DocSerializer(data=request.data)\r\n\r\n if ser.is_valid():\r\n ser.save(author_id=request.user.id)\r\n return Response(ser.data)\r\n return Response(\"失败\")\r\n\r\n def patch(self, request, *args, **kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n ser = GroupLessSerializer(instance=doc.group)\r\n dict = ser.data['member']\r\n updates = doc.updates\r\n if (doc.group and ((request.user.id in dict) or request.user == doc.group.leader)) or doc.auth >= 3:\r\n if doc.status == 1:\r\n return Response('该文档正在被编辑')\r\n else:\r\n result = models.Doc.objects.filter(id=id).first()\r\n ser = DocListSerializer(instance=result, data=request.data, many=False, partial=True)\r\n if ser.is_valid():\r\n updates = updates+1\r\n ser.save(updates=updates)\r\n return Response(ser.data)\r\n if doc.author == request.user and doc.group is None:\r\n if doc.status == 1:\r\n return Response('该文档正在被编辑')\r\n else:\r\n result = models.Doc.objects.filter(id=id).first()\r\n ser = DocListSerializer(instance=result, data=request.data, many=False, partial=True)\r\n if ser.is_valid():\r\n updates = updates+1\r\n ser.save(updates=updates)\r\n return Response(ser.data)\r\n else:\r\n return Response('无权限')\r\n\r\n\r\n# 用户文件\r\nclass DocUserView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n def get(self, request, *args, **kwargs):\r\n result = models.Doc.objects.filter(author=request.user.id, delete=0).all()\r\n total = len(result)\r\n page_object = PageNumberPagination()\r\n result_ = page_object.paginate_queryset(result, request, self)\r\n ser = DocPageSerializer(instance=result_, many=True)\r\n dict = ser.data\r\n for row in dict:\r\n row['total'] = total\r\n return Response(ser.data)\r\n\r\n# 团队文件\r\nclass DocGroupView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n def get(self, request, *args, **kwargs):\r\n id = kwargs.get('pk')\r\n result = models.Doc.objects.filter(group=id, delete=0).all()\r\n total = len(result)\r\n page_object = PageNumberPagination()\r\n result_ = page_object.paginate_queryset(result, request, self)\r\n ser = DocPageSerializer(instance=result_, many=True)\r\n dict = ser.data\r\n for row in dict:\r\n row['total'] = total\r\n return Response(ser.data)\r\n\r\n# 回收站中的文件\r\nclass DocBinView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n def get(self, request, *args, **kwargs):\r\n result = models.Doc.objects.filter(author=request.user.id, delete=1).all()\r\n total = len(result)\r\n page_object = PageNumberPagination()\r\n result_ = page_object.paginate_queryset(result, request, self)\r\n ser = DocPageSerializer(instance=result_, many=True)\r\n dict = ser.data\r\n for row in dict:\r\n row['total'] = total\r\n return Response(ser.data)\r\n\r\nclass LoginView(APIView):\r\n def post(self,request,*args,**kwargs):\r\n user = models.Users.objects.filter(**request.data).first()\r\n if not user:\r\n return Response('登录失败')\r\n\r\n user_token = str(uuid.uuid4())\r\n user.token = user_token\r\n user.save()\r\n return Response(user_token)\r\n\r\nclass RegisterView(APIView):\r\n def post(self,request,*args,**kwargs):\r\n user1 = models.Users.objects.filter(username=request.data['username']).first()\r\n if user1:\r\n return Response('用户名已存在')\r\n user2 = models.Users.objects.filter(email=request.data['email']).first()\r\n if user2:\r\n return Response('邮箱已存在')\r\n ser = UserRegSerializer(data=request.data)\r\n if ser.is_valid():\r\n ser.save(nickname=request.data['username'])\r\n user = models.Users.objects.filter(username=request.data['username']).first()\r\n user_token = str(uuid.uuid4())\r\n user.token = user_token\r\n user.save()\r\n message = models.Message(touser=user, category=1, content=\"欢迎您使用 文档,该网站提供在线文档协作编辑等功能,祝您使用愉快!\")\r\n message.save()\r\n return Response(user_token)\r\n return Response('error')\r\n\r\nclass UserView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n def get(self,request,*args,**kwargs):\r\n ser = UserInfoSerializer(instance=request.user)\r\n return Response(ser.data)\r\n\r\n def patch(self, request, *args, **kwargs):\r\n\r\n ser = UserInfoSerializer(instance=request.user, data=request.data, partial=True)\r\n if ser.is_valid():\r\n ser.save()\r\n return Response(ser.data)\r\n return Response('修改失败')\r\n\r\nclass UserGroupView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n def get(self,request,*args,**kwargs):\r\n\r\n result = models.Groups.objects.filter().all()\r\n ser = GroupLessSerializer(instance=result, many=True)\r\n dict = ser.data\r\n queryset = None\r\n for row in dict:\r\n if request.user.id == row['leader'] or (request.user.id in row['member']):\r\n if queryset is None:\r\n queryset = models.Groups.objects.filter(id=row['id']).all()\r\n else:\r\n queryset = queryset | models.Groups.objects.filter(id=row['id']).all()\r\n ser_ = GroupSerializer(instance=queryset, many=True)\r\n return Response(ser_.data)\r\n\r\n\r\nclass FavoriteView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n def get(self,request,*args,**kwargs):\r\n ser = FavoriteSerializer(instance=request.user, many=False)\r\n page_object = PageNumberPagination()\r\n queryset = None\r\n total = 0\r\n for row in ser.data['favorite']:\r\n if queryset is None:\r\n queryset = models.Doc.objects.filter(id=row).all()\r\n total = total +1\r\n else:\r\n queryset = queryset | models.Doc.objects.filter(id=row).all()\r\n total = total + 1\r\n if queryset is None:\r\n return Response('查询结果为空')\r\n result_ = page_object.paginate_queryset(queryset, request, self)\r\n ser = DocPageSerializer(instance=result_, many=True)\r\n dict = ser.data\r\n for row in dict:\r\n row['total'] = total\r\n return Response(dict)\r\n\r\n # doc_id = kwargs.get('pk')\r\n # if not doc_id:\r\n # ser = FavoriteSerializer(instance=request.user, many=False)\r\n # return Response(ser.data)\r\n # else:\r\n # pass\r\n # return Response(\"失败\")\r\n\r\n # 添加收藏\r\n def put(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n touser = doc.author\r\n message = models.Message(touser=touser,category=5, content=\"您的文档 \"+\"《\"+doc.title+\"》\"+\"被他人收藏\")\r\n message.save()\r\n request.user.favorite.add(doc)\r\n return Response('成功')\r\n\r\n # 取消收藏\r\n def delete(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n request.user.favorite.remove(doc)\r\n return Response('成功')\r\n\r\nclass BrowseView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n def get(self, request, *args, **kwargs):\r\n ser = BrowseSerializer(instance=request.user, many=False)\r\n page_object = PageNumberPagination()\r\n queryset = None\r\n total = 0\r\n for row in ser.data['browse']:\r\n if queryset is None:\r\n queryset = models.Doc.objects.filter(id=row).all()\r\n total = total+1\r\n else:\r\n queryset = queryset | models.Doc.objects.filter(id=row).all()\r\n total = total + 1\r\n if queryset is None:\r\n return Response('查询结果为空')\r\n result_ = page_object.paginate_queryset(queryset, request, self)\r\n ser = DocPageSerializer(instance=result_, many=True)\r\n dict = ser.data\r\n for row in dict:\r\n row['total'] = total\r\n return Response(dict)\r\n # doc_id = kwargs.get('pk')\r\n # if not doc_id:\r\n # ser = BrowseSerializer(instance=request.user, many=False)\r\n # return Response(ser.data)\r\n # else:\r\n # pass\r\n # return Response(\"失败\")\r\n\r\n # 添加到最近浏览\r\n def put(self, request, *args, **kwargs):\r\n\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n request.user.browse.add(doc)\r\n return Response('成功')\r\n\r\nclass GroupView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n # 创建团队\r\n def post(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n group = models.Groups.objects.filter(name=request.data['name']).first()\r\n if group:\r\n return Response('小组名重复')\r\n ser = GroupCreateSerializer(data=request.data)\r\n if ser.is_valid():\r\n ser.save(leader_id=request.user.id)\r\n return Response('成功')\r\n return Response('失败')\r\n\r\n def get(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n group = models.Groups.objects.get(id=id)\r\n isManager = False\r\n if group.leader.token == request.auth:\r\n isManager = True\r\n result = models.Groups.objects.filter(id=id).first()\r\n ser = GroupSerializer(instance=result, many=False)\r\n dict = ser.data\r\n dict['isManager'] = isManager\r\n return Response(dict)\r\n\r\n # 解散团队(权限)\r\n def delete(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n group = models.Groups.objects.get(id=id)\r\n if request.user.id == group.leader.id:\r\n ser = GroupLessSerializer(instance=group)\r\n for row in ser.data['member']:\r\n touser = models.Users.objects.filter(id=row).first()\r\n message = models.Message(touser=touser, category=8, content=\"您加入的\" + group.name + \"团队已解散\")\r\n message.save()\r\n models.Groups.objects.get(id=id).delete()\r\n models.Doc.objects.filter(group=group).all().delete()\r\n return Response('解散成功')\r\n else:\r\n return Response('解散失败')\r\n\r\n # 加入团队\r\n def put(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n group = models.Groups.objects.get(id=id)\r\n if request.data['decision'] is True:\r\n group.member.add(request.user)\r\n user_id = request.data['user_id']\r\n message = models.Message(touser_id=user_id, category=3, content=\"您已成功加入\"+group.name+\"小组\")\r\n message.save()\r\n else:\r\n user_id = request.data['user_id']\r\n message = models.Message(touser_id=user_id, category=3, content=\"您加入\" + group.name + \"小组的申请已被拒绝\")\r\n message.save()\r\n return Response('成功')\r\n\r\nclass GroupMemberView(APIView):\r\n # 退出团队\r\n authentication_classes = [UserAuthentication]\r\n def delete(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n group = models.Groups.objects.get(id=id)\r\n group.member.remove(request.user)\r\n touser = group.leader\r\n message = models.Message(touser=touser, category=6, content=request.user.username+\"退出了您的\"+group.name+\"团队\")\r\n message.save()\r\n return Response('成功')\r\n\r\n # 移除成员(权限)\r\n def put(self,request,*args,**kwargs):\r\n group_id = kwargs.get('pk')\r\n member_id = kwargs.get('pkk')\r\n group = models.Groups.objects.get(id=group_id)\r\n if request.user.id == group.leader.id:\r\n user = models.Users.objects.get(id=member_id)\r\n group.member.remove(user)\r\n touser = user\r\n message = models.Message(touser=touser, category=7, content=\"您被移出了\" + group.name + \"团队\")\r\n message.save()\r\n return Response('成功')\r\n else:\r\n return Response('失败')\r\n\r\n\r\nclass CommentView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n def post(self,request,*args,**kwargs):\r\n if not request.user:\r\n return Response(\"请先登录\")\r\n id = kwargs.get('pk')\r\n doc = models.Doc.objects.get(id=id)\r\n ser = GroupLessSerializer(instance=doc.group)\r\n dict = ser.data['member']\r\n\r\n if (doc.group and ((request.user.id in dict) or request.user == doc.group.leader)) or doc.auth >= 2:\r\n ser = CommentCreateSerializer(data=request.data)\r\n if ser.is_valid():\r\n touser = doc.author\r\n message = models.Message(touser=touser, category=4, content=\"您的文档 \" + \"《\" + doc.title + \"》\" + \"被他人评论\")\r\n message.save()\r\n ser.save(commenter_id=request.user.id, document_id=id)\r\n return Response(ser.data)\r\n if doc.author == request.user and doc.group is None:\r\n ser = CommentCreateSerializer(data=request.data)\r\n if ser.is_valid():\r\n touser = doc.author\r\n if touser != request.user:\r\n message = models.Message(touser=touser, category=4, content=\"您的文档 \" + \"《\" + doc.title + \"》\" + \"被他人评论\")\r\n message.save()\r\n ser.save(commenter_id=request.user.id, document_id=id)\r\n return Response(ser.data)\r\n else:\r\n return Response(\"无权限\")\r\n\r\n\r\n def get(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n result = models.Comment.objects.filter(document_id=id).all()\r\n ser = CommentSerializer(instance=result, many=True)\r\n return Response(ser.data)\r\n\r\n\r\nclass ModuleView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n # 获取个人及默认模板列表\r\n def get(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n if id:\r\n result = models.Module.objects.filter(id=id).first()\r\n ser = ModuleSerializer(instance=result, many=False)\r\n return Response(ser.data)\r\n else:\r\n result = models.Module.objects.filter(creater=None).all() | models.Module.objects.filter(creater=request.user).all()\r\n ser = ModuleSerializer(instance=result, many=True)\r\n return Response(ser.data)\r\n\r\n def post(self,request,*args,**kwargs):\r\n ser = ModuleSerializer(data=request.data, many=False)\r\n if ser.is_valid():\r\n ser.save(creater=request.user)\r\n return Response(ser.data)\r\n\r\nclass MessageView(APIView):\r\n authentication_classes = [UserAuthentication]\r\n\r\n def get(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n if id:\r\n result = models.Message.objects.filter(id=id).first()\r\n ser = MessageSerializer(instance=result, many=False)\r\n return Response(ser.data)\r\n else:\r\n result = models.Message.objects.filter(touser=request.user).all()\r\n ser = MessageSerializer(instance=result, many=True)\r\n count = 0\r\n for row in ser.data:\r\n if row['status'] == 1:\r\n count = count + 1\r\n dict = ser.data\r\n dict[0]['count'] = count # 在第一条信息中存储了一共几条未读\r\n return Response(dict)\r\n\r\n def post(self,request,*args,**kwargs): # 申请加入团队生成给组长的消息 传过来小组ID、申请人为当前用户、消息类型:2\r\n group = models.Groups.objects.filter(id=request.data['group_id']).first()\r\n if group is None:\r\n return Response('团队不存在')\r\n ser = MessageCreateSerializer(data=request.data)\r\n ser_ = GroupLessSerializer(instance=group)\r\n dict = ser_.data['member']\r\n if request.user == group.leader or (request.user.id in dict):\r\n return Response('已在该组中')\r\n if ser.is_valid():\r\n ser.save(touser=group.leader, senduser=request.user, group=group,\r\n content=\"用户名为\"+request.user.username+\"的用户申请加入您的\"+group.name+\"小组。\")\r\n\r\n return Response('成功')\r\n\r\n def patch(self,request,*args,**kwargs):\r\n id = kwargs.get('pk')\r\n result = models.Message.objects.filter(id=id).first()\r\n ser = MessageSerializer(data=request.data, instance=result, partial=True)\r\n if ser.is_valid():\r\n ser.save()\r\n return Response('成功')\r\n\r\n return Response('失败')\r\n\r\n\r\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 20, "blob_id": "4bc35315fd8295793726b5a23e7be8545a551a2f", "content_id": "09892a3d070109d3f52d09a6e957f658ea801557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "# Diamond-doc-django" }, { "alpha_fraction": 0.8219895362854004, "alphanum_fraction": 0.8219895362854004, "avg_line_length": 26.428571701049805, "blob_id": "196f26317b9468501f17b03000509a6fb9037ce4", "content_id": "3934dbc9a8247566d2c3f9c3578901fa6b914ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/JGSWD/admin.py", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom . import models\n\nadmin.site.register(models.Users)\nadmin.site.register(models.Doc)\nadmin.site.register(models.Groups)\nadmin.site.register(models.Comment)" }, { "alpha_fraction": 0.618850588798523, "alphanum_fraction": 0.6225287318229675, "avg_line_length": 47.431819915771484, "blob_id": "fe77873144b786eccf2285c7ba9411c9d8affad4", "content_id": "5e5c464c83ce97cf9fc49ada8554cd5bcf344baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 90, "num_lines": 44, "path": "/JGS/JGS/urls.py", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "\"\"\"JGS URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, re_path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n re_path(r'^doc/$', views.DocView.as_view()),\r\n re_path(r'^doc/(?P<pk>\\d+)/$', views.DocView.as_view()),\r\n re_path(r'^login/$', views.LoginView.as_view()),\r\n re_path(r'^register/$', views.RegisterView.as_view()),\r\n path('user/', views.UserView.as_view()),\r\n re_path(r'^group/$', views.GroupView.as_view()),\r\n re_path(r'^group/(?P<pk>\\d+)/$', views.GroupView.as_view()),\r\n re_path(r'^comment/(?P<pk>\\d+)/$', views.CommentView.as_view()),\r\n re_path(r'^group/(?P<pk>\\d+)/member/$', views.GroupMemberView.as_view()),\r\n re_path(r'^group/(?P<pk>\\d+)/member/(?P<pkk>\\d+)/$', views.GroupMemberView.as_view()),\r\n re_path(r'^favorite/$', views.FavoriteView.as_view()),\r\n # re_path(r'^favorite/(?P<pk>\\d+)/$', views.FavoriteView.as_view()),\r\n re_path(r'^browse/$', views.BrowseView.as_view()),\r\n # re_path(r'^browse/(?P<pk>\\d+)/$', views.BrowseView.as_view()),\r\n re_path(r'^doc/user/$', views.DocUserView.as_view()),\r\n re_path(r'^doc/bin/$', views.DocBinView.as_view()),\r\n re_path(r'^doc/group/(?P<pk>\\d+)/$', views.DocGroupView.as_view()),\r\n path('module/', views.ModuleView.as_view()),\r\n re_path(r'^module/(?P<pk>\\d+)/$', views.ModuleView.as_view()),\r\n path('message/', views.MessageView.as_view()),\r\n re_path(r'^message/(?P<pk>\\d+)/$', views.MessageView.as_view()),\r\n path('group/user/', views.UserGroupView.as_view()),\r\n]\r\n" }, { "alpha_fraction": 0.6576998233795166, "alphanum_fraction": 0.6682261228561401, "avg_line_length": 35.64285659790039, "blob_id": "5688977101d1ac71ed05bbbf563cd8c02472a443", "content_id": "1678298a3a3d8427b97c303e2670c9bee663449c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2865, "license_type": "no_license", "max_line_length": 97, "num_lines": 70, "path": "/JGSWD/models.py", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Users(models.Model):\n id = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=20, verbose_name='昵称')\n username = models.CharField(max_length=25, verbose_name='用户名')\n password = models.CharField(max_length=20, verbose_name='密码')\n email = models.EmailField(verbose_name='电子邮箱')\n birthday = models.DateField(verbose_name='生日')\n sex = models.CharField(max_length=5, verbose_name='性别')\n\n def __str__(self):\n return self.username\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = '用户'\n\n\nclass Groups(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=20, verbose_name='组名称')\n leader = models.CharField(max_length=25, verbose_name='组长用户名')\n member = models.ManyToManyField(to='Users', verbose_name='小组成员')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '小组'\n verbose_name_plural = '小组'\n\n\nclass Doc(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=25, verbose_name='文档名称')\n title = models.CharField(max_length=128, verbose_name='文档标题')\n content = models.TextField(verbose_name='文档内容')\n createtime = models.DateTimeField(auto_now_add=True, verbose_name='文档创建时间')\n updatetime = models.DateTimeField(auto_now=True, verbose_name='文档更新时间')\n updates = models.PositiveIntegerField(default=0, verbose_name='文档被修改次数')\n author = models.ForeignKey(to='Users', to_field='id', verbose_name='文档创建者')\n group = models.ForeignKey(to='Groups', to_field='id', verbose_name='文档所属组')\n delete = models.IntegerField(default=0, verbose_name='文档是否被删除') # 0表示未被删除,1表示在回收站,2表示彻底删除\n status = models.IntegerField(default=0, verbose_name='文档是否正被编辑') # 0表示未被编辑,1表示正在被编辑\n favorite = models.ManyToManyField(to='Users', verbose_name='收藏文档')\n browse = models.ManyToManyField(to='Users', verbose_name='浏览记录')\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['-updatetime']\n verbose_name = '文档'\n verbose_name_plural = '文档'\n\n\nclass Comment(models.Model):\n id = models.AutoField(primary_key=True)\n content = models.CharField(max_length=128, verbose_name='评论')\n commenter = models.ForeignKey(to='Users', to_field='id', verbose_name='评论者')\n document = models.ForeignKey(to='Doc', to_field='id', verbose_name='评论文档')\n\n def __str__(self):\n return self.id\n\n class Meta:\n verbose_name = '评论'\n verbose_name_plural = '评论'\n" }, { "alpha_fraction": 0.6194753646850586, "alphanum_fraction": 0.6212720274925232, "avg_line_length": 24.254716873168945, "blob_id": "4b3723bf35cdb037f8b08918e0f3c35efb5d2bc9", "content_id": "6dbefcd59564e56613b769d3a32b4917c13b5a32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2839, "license_type": "no_license", "max_line_length": 111, "num_lines": 106, "path": "/JGS/JGS/serializer.py", "repo_name": "ka233-stack/Diamond-doc-django", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\r\nfrom web_models import models\r\n\r\n\r\nclass DocSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Doc\r\n exclude = ['author']\r\n\r\n\r\nclass DocListSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Doc\r\n exclude = []\r\n depth = 1\r\n\r\n\r\nclass UserInfoSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Users\r\n exclude = []\r\n\r\n\r\nclass UserRegSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Users\r\n exclude = ['nickname']\r\n\r\n\r\nclass FavoriteSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = models.Users\r\n fields = ['favorite']\r\n\r\n\r\nclass BrowseSerializer(serializers.ModelSerializer):\r\n\r\n class Meta:\r\n model = models.Users\r\n fields = ['browse']\r\n # browse_info = serializers.SerializerMethodField()\r\n #\r\n # class Meta:\r\n # model = models.Users\r\n # fields = ['id', 'username', 'browse_info']\r\n #\r\n # def get_browse_info(self, obj):\r\n # return [row.nickname for row in obj.browse.all().values()]\r\n\r\nclass GroupCreateSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Groups\r\n exclude = ['leader', 'member']\r\n\r\n\r\nclass GroupSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Groups\r\n exclude = []\r\n depth = 1\r\n\r\nclass GroupLessSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Groups\r\n exclude = []\r\n\r\n\r\nclass CommentSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Comment\r\n exclude = []\r\n depth = 1\r\n\r\nclass CommentCreateSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Comment\r\n exclude = ['document', 'commenter']\r\n depth = 1\r\n\r\nclass ModuleSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Module\r\n exclude = []\r\n depth = 1\r\n\r\nclass MessageSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = models.Message\r\n exclude = []\r\n\r\n\r\nclass MessageCreateSerializer(serializers.ModelSerializer): # 只有在申请加入团队时产生消息\r\n class Meta:\r\n model = models.Message\r\n exclude = ['touser', 'senduser', 'content']\r\n\r\nclass DocPageSerializer(serializers.ModelSerializer): # 只有在申请加入团队时产生消息\r\n nickname = serializers.SerializerMethodField()\r\n class Meta:\r\n model = models.Doc\r\n fields = ['id','title','content','updates','delete','createtime','updatetime','status','auth','author',\r\n 'group','nickname']\r\n\r\n def get_nickname(self, obj):\r\n return obj.author.nickname\r\n" } ]
6
pran-jal/Discord-Bot
https://github.com/pran-jal/Discord-Bot
d3346533db95ed993791395feb9f9dc0ab405cc1
69b2eaf5810e161bea728f227cda01abcab61e52
574b04bd1736eabe1232fe18633948487c57ef4a
refs/heads/main
2023-07-07T08:33:32.178335
2021-07-26T12:58:36
2021-07-26T12:58:36
389,142,421
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6456692814826965, "alphanum_fraction": 0.6614173054695129, "avg_line_length": 19.16666603088379, "blob_id": "b1833af5c79893cbd5c054b638541735167ab5c9", "content_id": "161b35c35a9fd0b3e48c57b3a14526f696a9be85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/Audio_maker.py", "repo_name": "pran-jal/Discord-Bot", "src_encoding": "UTF-8", "text": "from gtts import gTTS\r\nimport os\r\n\r\ndef convert(text):\r\n gTTS(text=text).save(\"welcome.mp3\")\r\n os.system(\"welcome.mp3\")\r\n" }, { "alpha_fraction": 0.6360344290733337, "alphanum_fraction": 0.6423721313476562, "avg_line_length": 28.26027488708496, "blob_id": "5a9d922c5e01e3adc14ab3e5fd0699b42e07acca", "content_id": "7057e7555e529842b98b4ee714b864c24ea5b010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2209, "license_type": "no_license", "max_line_length": 77, "num_lines": 73, "path": "/discordbot.py", "repo_name": "pran-jal/Discord-Bot", "src_encoding": "UTF-8", "text": "#Discord Voice Assistent \r\n#Permissions = 36715520\r\n\r\nimport discord\r\nimport key\r\nfrom discord.ext import commands\r\n\r\nintents = discord.Intents.default()\r\nintents.members = True\r\nclient = commands.Bot(command_prefix = '', intents=intents)\r\n\r\ntextchannel = 0\r\nvoicechannel = 0\r\n\r\[email protected]\r\nasync def on_ready():\r\n global textchannel, voicechannel\r\n a=1\r\n b=1\r\n print(f'We have logged in as {client.user.name}')\r\n for guild in client.guilds:\r\n for channel in guild.channels:\r\n if str(channel.type) == 'text' and a:\r\n textchannel = channel.id\r\n a=0\r\n elif str(channel.type) == 'voice' and b:\r\n voicechannel = channel.id\r\n b=0\r\n channel = client.get_channel(textchannel)\r\n await channel.send('Bot Online', tts=True)\r\n\r\[email protected]\r\nasync def on_member_join(member):\r\n channel = client.get_channel(textchannel)\r\n await channel.send(f'Hello!, {member.name} welcome', tts=True)\r\n\r\[email protected]\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n if message.content.startswith('hi') :\r\n await message.channel.send(f'Hello! {message.author.name}', tts=True)\r\n else :\r\n await client.process_commands(message)\r\n\r\[email protected]\r\nasync def on_voice_state_update(member, before, after):\r\n channel = client.get_channel(textchannel)\r\n if before.channel is not None and after.channel is None:\r\n msg = 'left'\r\n elif before.channel is None and after.channel is not None:\r\n msg = 'joined'\r\n await channel.send(f'{member.name} has {msg} the voice', tts=True)\r\n\r\n\r\[email protected](pass_context=True)\r\nasync def join(ctx):\r\n if ctx.author.voice:\r\n await ctx.message.author.voice.channel.connect()\r\n elif voicechannel:\r\n channel = client.get_channel(voicechannel)\r\n await channel.connect()\r\n else:\r\n await ctx.send('Failed to Join. Join a voice channel and try again')\r\n\r\[email protected](pass_context=True)\r\nasync def leave(ctx):\r\n if ctx.voice_client:\r\n await ctx.guild.voice_client.disconnect()\r\n else:\r\n await ctx.send('I am not in voice')\r\n\r\nclient.run(key.token)\r\n" } ]
2
rohit7s/Crawlr
https://github.com/rohit7s/Crawlr
a60396f10cff189a1f663697dd7f1df9a04c12e1
c713e793a26c4f42cff3a5d902a39832643bfe2e
4be7b7d2ca58d0e521422022badc23a747245cf4
refs/heads/master
2020-09-25T16:22:09.098305
2019-12-14T05:10:32
2019-12-14T05:10:32
226,042,540
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6261407136917114, "alphanum_fraction": 0.6485133767127991, "avg_line_length": 35.537635803222656, "blob_id": "8ed1b9c8954d615964d53f0a88accd270982770c", "content_id": "ec9406881168cb1a1850ed7f580667151102ca7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3397, "license_type": "no_license", "max_line_length": 182, "num_lines": 93, "path": "/crawlr_web_app-master/search/views.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse,HttpResponseRedirect,Http404\nfrom django.shortcuts import reverse\nimport requests as re\nfrom crawlr_web import settings\nimport json\nfrom crawlr_web.decorators import login_required\n\n@login_required\ndef resultallpage(request):\n query = request.GET['q']\n search_text = request.GET['t']\n if query == None:\n return redirect('search:all')\n return render(request,'result.html',{'Question_id':query,'search_query':search_text})\n\n@login_required\ndef allSearch(request):\n headers = {'content-type': 'application/json','authorization': request.session.get('jwt_token')}\n pageNo = request.GET.get('page')\n if pageNo == None:\n pageNo = 1\n else:\n pageNo = int(pageNo)\n if pageNo < 0 or pageNo == 0:\n pageNo = 1\n try:\n responce = re.get(settings.API_URL+'/search/all',params={'pageNo':pageNo},headers=headers)\n except Exception as e:\n print(e)\n raise Http404('something went wrong')\n if responce.status_code == 200:\n res = responce.json()\n data = res['data']\n for i in data:\n i['id'] = i.pop('_id')\n if data:\n pageNo += 1\n next = True\n else:\n next = False\n return render(request, 'mysearch.html', {'searches':data,'pageNo': pageNo, 'next': next})\n if responce.status_code == 401:\n return redirect('auth:login')\n raise Http404('some error occurred')\n\n@login_required\ndef resultpage(request):\n query = request.POST['q']\n if len(query) == 0:\n return HttpResponseRedirect(reverse('homepage'))\n try:\n responce = re.post(settings.API_URL+'/search',data=json.dumps({'searchQuery':query}),headers={'authorization':request.session['jwt_token'],'content-type':'application/json'})\n except Exception as e:\n print(e)\n raise Http404('somerthing went wrong')\n if responce.status_code == 200:\n return render(request,'result.html',{'Question_id':responce.json()['id'],'search_query':query})\n if responce.status_code == 401:\n return redirect('auth:login')\n raise Http404('something went wrong')\n # return render(request,'result.html',{'Question_id':'5dd780c85ddf35001749ff73'})\n\n\n@login_required\ndef ResultApi(request):\n id = request.GET['id']\n try:\n responce = re.get(settings.API_URL+'/search',params={'searchID':id},headers={'content-type':'application/json','authorization':request.session['jwt_token']})\n except Exception as e:\n print(e)\n return HttpResponse(json.dumps({'code':400}))\n if responce.status_code == 200:\n res = responce.json()\n return HttpResponse(json.dumps(res))\n if responce.status_code == 401:\n return HttpResponse(json.dumps({'code':401}))\n return HttpResponse(json.dumps({'code':400}))\n\n\n@login_required\ndef cancelSearch(request):\n id = request.GET['id']\n try:\n responce = re.delete(settings.API_URL+'/search/cancel?id='+str(id),headers={'content-type':'application/json','authorization':request.session['jwt_token']})\n except Exception as e:\n print(e)\n raise Http404('something went wrong')\n if responce.status_code == 200:\n return redirect('search:all')\n if responce.status_code == 401:\n return redirect('auth:login')\n raise Http404('something went wrong')" }, { "alpha_fraction": 0.5102488994598389, "alphanum_fraction": 0.5226939916610718, "avg_line_length": 28.717391967773438, "blob_id": "30a8f4f3c945495e32ea36925abd589fb76bb956", "content_id": "6b9f6b55d08bcb484ea385741c2c578aa5a54ea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 66, "num_lines": 46, "path": "/crawlr_web_app-master/static/js/profile.js", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n var $data = $(\"#profileurl\");\n $.ajax({\n type: \"GET\",\n url: $data.val(),\n success: function(res) {\n\n responce = JSON.parse(res);\n if (responce.status == 200) {\n $(\"#profileimage1\").html(\n '<img class=\"rounded-circle z-depth-2\" src=\"' +\n responce.image +\n '\" width=\"32\" height=\"32\"></img>'\n );\n $(\"#profileimage2\").html(\n '<img class=\"rounded-circle z-depth-2\" src=\"' +\n responce.image +\n '\" width=\"80\" height=\"80\"></img>'\n );\n $(\"#fullname1\").html(responce.fullName);\n $(\"#fullname2\").html(star(responce));\n $(\"#email\").html(responce.email);\n $(\"#about\").html(responce.bio);\n $(\"#searches\").html(responce.searches);\n $(\"#questions\").html(responce.questions);\n $(\"#replys\").html(responce.karma);\n $(\".loading\").hide();\n $(\"#profileformnavbar\").show();\n if (responce.isPremiumUser) {\n $('#premiumbutton').hide()\n }\n } else {\n console.log(\"some error occured\");\n }\n }\n });\n});\n\nfunction star(responce){\n var stringstr = responce.fullName\n if (responce.isPremiumUser) {\n stringstr += '<i style=\"color:gold\" data-feather=\"star\"></i>';\n stringstr += '<script>feather.replace()</script>';\n }\n return stringstr\n}" }, { "alpha_fraction": 0.5641392469406128, "alphanum_fraction": 0.5727229118347168, "avg_line_length": 36.12389373779297, "blob_id": "e803963dd62c01c87cc73f47663d00bdd290d78d", "content_id": "7e12197bb38fd4640ecdf9ced1ef8ac91a05532b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4194, "license_type": "no_license", "max_line_length": 119, "num_lines": 113, "path": "/crawlr_web_app-master/authentication/views.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse, Http404\nfrom django.shortcuts import redirect, render\nimport requests as re\nfrom crawlr_web import settings\nimport json\nfrom crawlr_web.decorators import login_required\n\n\n@login_required\ndef homepage(request):\n return render(request, 'index.html')\n\n\ndef logIn(request):\n if 'jwt_token' in request.session:\n headers = {'content-type': 'application/json',\n 'authorization': request.session['jwt_token']}\n url = settings.API_URL+'/auth/test'\n responce = re.post(url, headers=headers)\n if responce.status_code == 200:\n return redirect('homepage')\n return render(request, 'login.html')\n return render(request, 'login.html')\n\n\ndef logOut(request):\n try:\n del request.session['jwt_token']\n del request.session['UserID']\n except KeyError:\n pass\n return redirect('auth:login')\n\n\ndef linkedInTokenHandle(request):\n code = request.GET.get('code')\n state = request.GET.get('state')\n accessToken = '?code='+code+'&state='+state\n responce = re.post(settings.API_URL+'/auth/linkedin/callback/'+accessToken)\n if responce.status_code == 200:\n jwt_token = responce.json()\n if 'JWT' in jwt_token.keys():\n token = jwt_token['JWT']\n UserID = jwt_token['UserID']\n request.session['jwt_token'] = token\n request.session['user_id'] = UserID\n return redirect('homepage')\n else:\n return render(request, 'profile_comfirmation.html', {'jwt_token': jwt_token})\n return redirect('auth:login')\n\n\ndef profileComfirm(request):\n if request.method == 'POST':\n provider = request.POST.get('provider')\n id = request.POST.get('id')\n fullName = request.POST.get('fullName')\n email = request.POST.get('email')\n image = request.POST.get('image')\n data = {'provider': provider, 'id': id,\n 'fullName': fullName, 'image': image, 'email': email}\n headers = {'content-type': 'application/json'}\n responce = re.post(settings.API_URL+'/auth/confirm',\n data=json.dumps(data), headers=headers)\n if responce.status_code == 200:\n jwt_token = responce.json()\n if 'JWT' in jwt_token.keys():\n token = jwt_token['JWT']\n UserID = jwt_token['UserID']\n request.session['jwt_token'] = token\n request.session['user_id'] = UserID\n headers = {'content-type': 'application/json',\n 'authorization': token}\n url = settings.API_URL+'/auth/test'\n responce = re.post(url, headers=headers)\n return redirect('homepage')\n else:\n raise Http404('something went wrong')\n if responce.status_code == 401:\n return render(request, 'profile_comfirmation.html', {'jwt_token': data, 'error': 'Please give Valid Data'})\n return redirect('auth:login')\n raise Http404('something went wrong try to login again')\n\n\n@login_required\ndef getProfile(request):\n if request.is_ajax():\n try:\n responce = re.get(settings.API_URL+'/user', headers={\n 'authorization': request.session['jwt_token'], 'uid': request.session['user_id']})\n except Exception as e:\n print(e)\n return HttpResponse(json.dumps({'status': 500}))\n data = responce.json()\n if responce.status_code == 200:\n return HttpResponse(json.dumps({\n \"status\": 200,\n \"image\": data['image'],\n \"fullName\": data['fullName'],\n \"questions\": data['questions'],\n \"searches\": len(data['searches']),\n \"karma\": data['karma'],\n \"email\": data['email'],\n \"bio\": data['bio'],\n \"isPremiumUser\": data['isPremiumUser']\n }))\n else:\n return HttpResponse(json.dumps({'status': 500}))\n return HttpResponse(json.dumps({'status': 500}))\n\n@login_required\ndef editProfile(request):\n return HttpResponse('sdf')" }, { "alpha_fraction": 0.693617045879364, "alphanum_fraction": 0.693617045879364, "avg_line_length": 46, "blob_id": "bb7a09b461a9d6d15148423de84bad6047c92cbb", "content_id": "b35fcc59013344969a7299f3bc0ae08185c6ed3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 81, "num_lines": 15, "path": "/crawlr_web_app-master/question/urls.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.urls import path,re_path\nfrom . import views\n\napp_name = 'question'\nurlpatterns = [\n path('pay/', views.payment, name='payment_api'), \n path('payment_Success/', views.payment_success, name='payment_Success'),\n path('api/post',views.QuestionPost,name=\"post_api\"),\n path('api/postreply',views.ApiReplyPost,name=\"reply_post_api\"),\n path('api/deletereply',views.ApiReplyDelete,name=\"reply_delete_api\"),\n path('api/verifyreply',views.ApiReplyVerify,name=\"reply_verify_api\"),\n re_path('reply/(?P<question>\\w+)/',views.ReplyPost,name=\"reply\"),\n re_path('delete/(?P<question>\\w+)/',views.DeleteQuestion,name=\"delete_ques\"),\n re_path('all/',views.QuestionList,name=\"all\"),\n]\n" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.6849315166473389, "avg_line_length": 32.181819915771484, "blob_id": "5a2f838044eed5e72115deb3ec4e222ea054455d", "content_id": "1570903f23464426ab1ddc7b0a3a30257bae13ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/crawlr_web_app-master/authentication/urls.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\napp_name='authentication'\nurlpatterns = [\n path('login/',views.logIn, name='login'),\n path('profile/',views.profileComfirm, name='profile_comfirm'),\n path('api/profile/',views.getProfile, name='profile'),\n path('logout/',views.logOut, name='logout'),\n path('edit/',views.editProfile, name='edit'),\n]\n" }, { "alpha_fraction": 0.7190183997154236, "alphanum_fraction": 0.7190183997154236, "avg_line_length": 41.94736862182617, "blob_id": "86a5eae5de91d6cf21668d4415e5f186fc60f2db", "content_id": "f4445475f1cf4f7d79d9dbe72c6329c1b4ca6f10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 105, "num_lines": 19, "path": "/crawlr_web_app-master/crawlr_web/urls.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.urls import path,include,re_path\nfrom authentication.views import linkedInTokenHandle,homepage\nfrom . import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('auth/',include('authentication.urls',namespace='auth')),\n path('ques/',include('question.urls',namespace='ques')),\n path('search/',include('search.urls',namespace='search')),\n re_path('login/',linkedInTokenHandle),\n path('',homepage,name=\"homepage\")\n]\nif settings.DEBUG:\n urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nurlpatterns += patterns('',\n (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n )" }, { "alpha_fraction": 0.6327160596847534, "alphanum_fraction": 0.6358024477958679, "avg_line_length": 26, "blob_id": "b55e72940e929b4b3b12bd455431ccb38428ea5b", "content_id": "2b155fbcde73ff745b86c1d05b9ff35e4e9c6d33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/crawlr_web_app-master/crawlr_web/decorators.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.shortcuts import redirect\nfrom django.urls import reverse_lazy\n\ndef login_required(func):\n def decorator(*args,**kwargs):\n request = args[0]\n if 'jwt_token' in request.session:\n return func(*args,**kwargs)\n else:\n return redirect('auth:login')\n\n return decorator\n" }, { "alpha_fraction": 0.6899441480636597, "alphanum_fraction": 0.6899441480636597, "avg_line_length": 31.545454025268555, "blob_id": "ccf9f37df30a47e537d07491154bd0d2df0695af", "content_id": "0abbb368298610db7f45cb28aaa35d43647be7ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 358, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/crawlr_web_app-master/search/urls.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'search'\nurlpatterns = [\n path('result/',views.resultpage,name='result'),\n path('cancel/',views.cancelSearch,name='cancel'),\n path('resultall/',views.resultallpage,name='resultall'),\n path('all/',views.allSearch,name='all'),\n path('api/result/',views.ResultApi,name='result_api')\n]\n" }, { "alpha_fraction": 0.6238812804222107, "alphanum_fraction": 0.6389486789703369, "avg_line_length": 42.26960754394531, "blob_id": "6669ab22881a9a372cfd34703fddd2b320e57fe0", "content_id": "fc905d7b9e13a33c948b07db601af17bbd71c1b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8827, "license_type": "no_license", "max_line_length": 193, "num_lines": 204, "path": "/crawlr_web_app-master/question/views.py", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom crawlr_web.decorators import login_required\nfrom django.http import HttpResponse, Http404\nimport requests as re\nfrom crawlr_web import settings\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\n\n\n@login_required\ndef payment(request):\n return render(request, 'payment.html')\n\n@login_required\ndef payment_success(request):\n razorpay_payment_id = request.POST.get('razorpay_payment_id')\n headers = {'content-type': 'application/json','authorization': request.session.get('jwt_token')}\n try:\n response = re.post(settings.API_URL + '/user', data=json.dumps({'paymentID': razorpay_payment_id}), headers=headers)\n except Exception as e:\n print(e)\n return HttpResponse(json.dumps({'status': 500}))\n return redirect('/auth/login')\n\n@login_required\ndef QuestionList(request):\n headers = {'content-type': 'application/json','authorization': request.session.get('jwt_token')}\n data1 = {}\n pageNo = request.GET.get('page')\n if pageNo == None:\n pageNo = 1\n else:\n pageNo = int(pageNo)\n if pageNo < 0 or pageNo == 0:\n pageNo = 1\n if pageNo == 1:\n try:\n responce = re.get(settings.API_URL+'/trending')\n except Exception as e:\n print(e)\n raise Http404('something went wrong')\n data1 = responce.json()\n params = {'pageNo': pageNo}\n try:\n responce = re.get(settings.API_URL + '/question/all',params=params, headers=headers)\n except re.ConnectionError as e:\n raise Http404('Check internet connection')\n except re.Timeout as e:\n raise Http404('Connection Timeout')\n except re.RequestException as e:\n raise Http404('Something went wrong')\n except KeyboardInterrupt:\n raise Http404('Someone closed the program')\n if responce.status_code == 200:\n json_res = responce.json()\n data = json_res['data']\n if data:\n pageNo += 1\n next = True\n else:\n next = False\n return render(request, 'question.html', {'trending':data1,'question': data, 'pageNo': pageNo, 'next': next, 'user': request.session['user_id']})\n if responce.status_code == 401:\n return redirect('auth:login')\n raise Http404('some error occurred')\n # return render(request,'question.html',{'question':'spdofsdopf','pageNo':0,'next':False})\n\n@login_required\ndef DeleteQuestion(request,question):\n headers = {'content-type': 'application/json','authorization': request.session.get('jwt_token')}\n params = {'QuestionID': question}\n try:\n responce = re.delete(settings.API_URL+'/question?QuestionID='+question,data=json.dumps(params),headers=headers)\n except Exception as e:\n print(e)\n raise Http404('someting went wrong ')\n if responce.status_code == 401:\n return redirect('auth:login')\n if responce.status_code == 200:\n messages.success(request, 'your question successfully deleted !', extra_tags='alert alert-info')\n return redirect('ques:all')\n raise Http404('something went wrong')\n\n@login_required\ndef ReplyPost(request, question):\n pageNo = request.GET.get('page')\n question_text = request.GET.get('question')\n asker = request.GET.get('asker')\n if pageNo == None:\n pageNo = 1\n else:\n pageNo = int(pageNo)\n if pageNo < 0 or pageNo == 0:\n pageNo = 1\n headers = {'content-type': 'application/json','authorization': request.session.get('jwt_token')}\n params = {'pageNo': pageNo, 'questionID': question}\n try:\n responce = re.get(settings.API_URL + '/reply',params=params, headers=headers)\n except Exception as r:\n print(r)\n raise Http404('something went wring')\n if responce.status_code == 200:\n json_res = responce.json()\n data = json_res['data']\n if data:\n pageNo += 1\n next = True\n else:\n next = False\n return render(request, 'reply.html', {'asker':asker,'question': question_text,'question_id':question, 'reply': data, 'pageNo': pageNo, 'next': next, 'user': request.session['user_id']})\n\n if responce.status_code == 401:\n return redirect('auth:login')\n raise Http404('something went wrong')\n # return render(request, 'reply.html', {'question': '', 'reply': '', 'pageNo': '', 'next': False, 'user': request.session.get('UserID')})\n\n\n@login_required\n@csrf_exempt\ndef QuestionPost(request):\n if request.is_ajax():\n question = request.POST['question']\n if len(question) == 0:\n messages.error(request, 'your question area is empty',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n try:\n responce = re.post(settings.API_URL + '/question', data={'question': question}, headers={'authorization': request.session['jwt_token']})\n except Exception as e:\n print(e)\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n if responce.status_code == 200:\n messages.success(request, 'your question successfully added!', extra_tags='alert alert-info')\n return HttpResponse(json.dumps({'status': 200}))\n else:\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n\n@login_required\n@csrf_exempt\ndef ApiReplyPost(request):\n if request.is_ajax():\n question = request.POST['questionid']\n reply = request.POST['reply']\n try:\n responce = re.post(settings.API_URL + '/reply', data={'QuestionID': question,'reply':reply}, headers={'authorization': request.session['jwt_token']})\n except Exception as e:\n print(e)\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n if responce.status_code == 200:\n messages.success(request, 'your reply successfully added!', extra_tags='alert alert-info')\n return HttpResponse(json.dumps({'status': 200}))\n else:\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n\n@login_required\n@csrf_exempt\ndef ApiReplyDelete(request):\n if request.is_ajax():\n question = request.POST['question']\n id = request.POST['id']\n try:\n responce = re.delete(settings.API_URL + '/reply?QuestionID='+question+'&ReplyID='+id, headers={'authorization': request.session['jwt_token']})\n except Exception as e:\n print(e)\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n if responce.status_code == 200:\n messages.success(request, 'your reply successfully deleted!', extra_tags='alert alert-info')\n return HttpResponse(json.dumps({'status': 200}))\n else:\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n\n@login_required\n@csrf_exempt\ndef ApiReplyVerify(request):\n if request.is_ajax():\n question = request.POST['question']\n id = request.POST['id']\n params = {'QuestionID':question,'ReplyID':id}\n try:\n responce = re.post(settings.API_URL + '/reply/verify',data=params,headers={'authorization': request.session['jwt_token']})\n except Exception as e:\n print(e)\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n if responce.status_code == 200:\n messages.success(request, 'your reply successfully verified!', extra_tags='alert alert-info')\n return HttpResponse(json.dumps({'status': 200}))\n else:\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n messages.error(request, 'some error occurred',extra_tags='alert alert-danger')\n return HttpResponse(json.dumps({'status': 500}))\n" }, { "alpha_fraction": 0.5696969628334045, "alphanum_fraction": 0.5787878632545471, "avg_line_length": 14.045454978942871, "blob_id": "46aed27d8565a3609bc239f0e04fc75313def666", "content_id": "9d8afc4d4ff29ae182877e0d02935699fb12b260", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 330, "license_type": "no_license", "max_line_length": 38, "num_lines": 22, "path": "/crawlr_web_app-master/Pipfile", "repo_name": "rohit7s/Crawlr", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\n\n[requires]\npython_version = \"3.6\"\n\n[packages]\nDjango = \"*\"\npytz = \"*\"\nsqlparse = \"*\"\ndj-database-url = \"*\"\ndj-static = \"*\"\ngunicorn = \"*\"\nUnipath = \"*\"\npython-decouple = \"*\"\nPillow = \"*\"\nMarkdown = \"*\"\nbleach = \"*\"\npsycopg2 = \"*\"\nwhitenoise = \"*\"\nrequests = \"*\"" } ]
10
guillaume-havard/test_automation_heroku
https://github.com/guillaume-havard/test_automation_heroku
130b8ed985fb85acdbcd252426422fcebad02bdf
58f8f9f9b8646f0fd88efb1c4e976ecd55dd0888
9df6f4fcc3b882e170eb832cfce05612b8d75e69
refs/heads/main
2023-03-23T01:44:21.187137
2021-03-14T17:29:33
2021-03-14T17:35:44
347,686,111
0
0
MIT
2021-03-14T16:15:00
2021-03-14T17:27:32
2021-03-14T17:35:44
Python
[ { "alpha_fraction": 0.5810810923576355, "alphanum_fraction": 0.6216216087341309, "avg_line_length": 13.800000190734863, "blob_id": "57b84f42206fa601009957ab50a19de975274e98", "content_id": "dcb3007d6d43faa8643f37ab889e3f9913359eef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/tests/test_main.py", "repo_name": "guillaume-havard/test_automation_heroku", "src_encoding": "UTF-8", "text": "import main\n\n\ndef test_add_two_int():\n assert main.func_add(3, 3) == 6\n" }, { "alpha_fraction": 0.5630252361297607, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 12.222222328186035, "blob_id": "5e3c820d369d6a51b6218349c59422dd7f420833", "content_id": "aa05233ceca167bf2f62bc2c761daa760d22fda6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "permissive", "max_line_length": 50, "num_lines": 18, "path": "/main.py", "repo_name": "guillaume-havard/test_automation_heroku", "src_encoding": "UTF-8", "text": "import flask\n\n\ndef func_add(a: int, b: int) -> int:\n return a + b\n\n\napp = flask.Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n return \"<h1 style='color:black'>Coucou !</h1>\"\n\n\[email protected](\"/pouet\")\ndef pouet():\n return \"Pouet !\"\n" } ]
2
privm/TGbotsimpleaf
https://github.com/privm/TGbotsimpleaf
0cbf8b3767c7448730870236bedd59fdec7277d4
86d9dee7821ca9c98a0480724f16422daac7e3ce
cc3f2e9981c262c806c4df52327b5894ae5d154f
refs/heads/main
2023-08-15T10:56:24.940313
2021-10-02T22:03:17
2021-10-02T22:03:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8059701323509216, "alphanum_fraction": 0.8059701323509216, "avg_line_length": 21.33333396911621, "blob_id": "702b1a9fbac414d1e6e9dd636898b3af6e8142a1", "content_id": "7a353acf9183ea62dad9284894634bb6025e324c", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "permissive", "max_line_length": 28, "num_lines": 3, "path": "/README.md", "repo_name": "privm/TGbotsimpleaf", "src_encoding": "UTF-8", "text": "# simpleTGbot\na Telegram chatbot simple af\nyou can use with Heroku\n" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 28.647058486938477, "blob_id": "bbefb20e9c7428a26a2951664aac4412e383dccc", "content_id": "1bd927661c16db29af569c54ee6a06c44df1db05", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "permissive", "max_line_length": 94, "num_lines": 17, "path": "/main.py", "repo_name": "privm/TGbotsimpleaf", "src_encoding": "UTF-8", "text": "from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\nfrom telegram import InputMediaPhoto\n\n# import modules\nimport helloworld\n\n# token for @yourTGbot\nupdater = Updater('bot_key_here')\n\n# calling a function from a module with a callback\n# the logic is (\"command's name\", callback = module name.function name)\nupdater.dispatcher.add_handler(CommandHandler('helloworld', callback = helloworld.helloworld))\n# add the next ones\n\nupdater.start_polling()\nupdater.idle()\n" }, { "alpha_fraction": 0.7772511839866638, "alphanum_fraction": 0.7772511839866638, "avg_line_length": 40.400001525878906, "blob_id": "604aca87d33b91ddefaa24ea1bfce6ddf277fe36", "content_id": "28f6834defd8eab8bee486df994c008c23603d06", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "permissive", "max_line_length": 65, "num_lines": 5, "path": "/helloworld.py", "repo_name": "privm/TGbotsimpleaf", "src_encoding": "UTF-8", "text": "from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\n\ndef helloworld(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Hello world!')\n \n" } ]
3
dirmyfirst/linear-regression
https://github.com/dirmyfirst/linear-regression
8205641720e05e90e437b0c712c8acbdb85ef2b5
18c4ec8e04abc5c9541c64bf1c3e0ea93ea66c64
fded4b32b83217e4103e69641a308c044f7352c4
refs/heads/master
2020-04-09T08:31:43.409546
2018-12-03T14:03:02
2018-12-03T14:03:02
160,197,517
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6660447716712952, "alphanum_fraction": 0.6884328126907349, "avg_line_length": 19.538461685180664, "blob_id": "34b0db59e08dafd41ed83f78302ee84990d013f4", "content_id": "3dca4651378b9177e40f9f2c686359584ade84fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 746, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/README.md", "repo_name": "dirmyfirst/linear-regression", "src_encoding": "UTF-8", "text": "# linear-regression\n简单的线性回归学习 \n将每次的机器学习记录上传,记录学习过程 \n\nimport pandas as pd\n\n#读取数据\npath = r'test.xlsx'\ndf = pd.read_excel(path)\nprint(df)\n\n#确定自变量为x和因变量为y\nx = df[['活动推广费']]\ny = df[['销售额']]\n\n#导入线性回归模型\nfrom sklearn.linear_model import LinearRegression\n\nlinreg = LinearRegression() #建立模型 \nlinreg.fit(x,y) #训练模型 \nprint(linreg.score(x,y)) #模型评估 \nprint(linreg.predict([[60]])) #模型预测,导入60成活动推广费用,预测可以达到多少销售额 \n\nprint(linreg.intercept_[0]) #查看截距 \nprint(linreg.coef_[0][0]) #查看参数 \nprint(linreg.intercept_[0] + linreg.coef_[0][0] * 60) \n" }, { "alpha_fraction": 0.6466346383094788, "alphanum_fraction": 0.6706730723381042, "avg_line_length": 17.909090042114258, "blob_id": "c46604b1bdb305931e74611a2c388220d26f155d", "content_id": "3d77c33bb72e9d61b5729f33d586bd77e7db0a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/简单线性回归.py", "repo_name": "dirmyfirst/linear-regression", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\npath = r'test.xlsx'\ndf = pd.read_excel(path)\nprint(df)\n\nx = df[['活动推广费']]\n\ny = df[['销售额']]\n\nfrom sklearn.linear_model import LinearRegression\n\nlinreg = LinearRegression() #建立模型\nlinreg.fit(x,y) #训练模型\nprint(linreg.score(x,y)) #模型评估\nprint(linreg.predict([[60]])) #模型预测\n\n\nprint(linreg.intercept_[0]) #查看截距\nprint(linreg.coef_[0][0]) ##查看参数\nprint(linreg.intercept_[0] + linreg.coef_[0][0] * 60)\n" } ]
2
evanshort73/und
https://github.com/evanshort73/und
8b66197ef9e9709e835df5b57ee8752e9b850887
34d320cb4475a914af1d870848ffc35636516305
af79fa4f7a3c167e9a25baf8410139fd899234f5
refs/heads/master
2020-12-25T18:44:00.007064
2017-06-11T07:57:31
2017-06-11T07:57:31
93,990,196
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 36, "blob_id": "0a8461faef20ebad9763ded5d737eb6336374981", "content_id": "d825aeb753a9160d599607ef7e854427f29847e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "permissive", "max_line_length": 67, "num_lines": 2, "path": "/README.md", "repo_name": "evanshort73/und", "src_encoding": "UTF-8", "text": "# und\nconvenience functions for generating and analyzing sound with numpy\n" }, { "alpha_fraction": 0.5742341876029968, "alphanum_fraction": 0.6114130616188049, "avg_line_length": 29.749019622802734, "blob_id": "3f1f18016bbe432b66cd367b4d76a312037e0ce3", "content_id": "5c3eca2bcbf7dd752e87b265eabd9b247855526b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8096, "license_type": "permissive", "max_line_length": 101, "num_lines": 255, "path": "/und.py", "repo_name": "evanshort73/und", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.io import wavfile\r\nfrom scipy.signal import argrelmax\r\nimport hashlib\r\nimport os\r\nfrom codenamize import codenamize\r\nfrom scipy import signal\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\ndef concat(*args):\r\n return np.concatenate(args, axis=-1)\r\n\r\ndef constant(value, duration, rate=44100, channels=1):\r\n samples = int(np.round(duration * rate))\r\n shape = (channels, samples) if channels > 1 else samples\r\n return np.full(shape, value, dtype=float)\r\n\r\ndef crossfade(a, b, phase):\r\n assert a.shape == b.shape == phase.shape\r\n assert 0 <= phase.min()\r\n assert phase.max() <= 1\r\n return a * (1 - phase) + b * phase\r\n\r\ndef line(v1, v2, duration, rate=44100):\r\n samples = int(np.round(duration * rate))\r\n return np.linspace(v1, v2, num=samples, endpoint=False)\r\n\r\ndef logline(v1, v2, duration, rate=44100):\r\n x = line(np.log(v1), np.log(v2), duration, rate)\r\n np.exp(x, out = x)\r\n return x\r\n\r\ndef lowpass(x, cutoff, order=5, rate=44100):\r\n nyquist = 0.5 * rate\r\n normalized_cutoff = cutoff / nyquist\r\n b, a = signal.butter(order, normalized_cutoff, btype=\"low\")\r\n return signal.lfilter(b, a, x)\r\n\r\ndef gaussian(duration, rate=44100):\r\n samples = int(np.round(duration * rate))\r\n return np.random.normal(0, 1, samples)\r\n\r\ndef exponential(scale, cutoff=10):\r\n # samples are truncated at cutoff * scale\r\n chosen = np.random.random(scale.shape)\r\n return -np.log(1 - (1 - np.exp(-cutoff)) * chosen) * scale\r\n\r\ndef exponential_poisson(spike_rate, rate=44100, cutoff=10):\r\n p = spike_rate / rate\r\n neverland = p < 1e-10\r\n p[neverland] = 0.5\r\n result = exponential(-1 / np.log(p), cutoff)\r\n result[neverland] = 0\r\n result[result < 1] = 0\r\n return result\r\n\r\ndef poisson(lam, rate=44100):\r\n warped_duration = lam.sum()\r\n chosen = np.empty(0)\r\n n = 1\r\n while chosen.sum() < warped_duration:\r\n chosen = concat(chosen, np.random.exponential(rate, n))\r\n n *= 2\r\n times = np.searchsorted(np.cumsum(lam), np.cumsum(chosen))\r\n result = np.zeros_like(lam)\r\n result[times[times < len(lam)]] = 1\r\n return result\r\n\r\ndef poisson(lam, rate=44100, interval_offset=0):\r\n t_offset = interval_offset * rate\r\n result = np.zeros_like(lam)\r\n for i in np.ndindex(*lam.shape[1:]):\r\n j = (np.s_[:],) + i\r\n f_table = concat([0], np.cumsum(lam[j]))\r\n def f(t):\r\n k = np.clip(int(t), 0, len(lam) - 1)\r\n phase = np.clip(t - k, 0, 1)\r\n return f_table[k] + lam[(k,) + i] * phase\r\n def af(s):\r\n k = np.searchsorted(f_table, s) - 1\r\n if k < 0:\r\n return 0\r\n if k >= len(lam):\r\n return len(lam)\r\n phase = np.clip((s - f_table[k]) / lam[(k,) + i], 0, 1)\r\n return k + phase\r\n times = []\r\n t = af(np.random.exponential(rate)) + interval_offset\r\n while t < len(lam):\r\n times.append(t)\r\n t = af(f(t + t_offset) + np.random.exponential(rate))\r\n result[(np.array(times, dtype=int),) + i] = 1\r\n return result\r\n\r\ndef local_max(x, min_interval=0, rate=44100):\r\n result = np.zeros_like(x)\r\n max_indices = argrelmax(x, order=int(max(min_interval * rate, 1)))\r\n result[max_indices] = x[max_indices]\r\n return result\r\n\r\ndef flip_coins(p):\r\n return np.random.random(p.shape) < p\r\n\r\ndef fade(duration, start=0, stop=1, rate=44100):\r\n samples = int(duration * rate)\r\n run = start - stop\r\n return np.clip(np.linspace(start / run, (start - 1) / run, samples), 0, 1)\r\n\r\ndef sinusoid(f, phase=0.25, rate=44100):\r\n x = np.empty_like(f, dtype=float)\r\n x[0] = phase * rate\r\n x[1:] = f[:-1]\r\n np.cumsum(x, out = x)\r\n x *= 2 * np.pi / rate\r\n np.cos(x, out = x)\r\n return x\r\n\r\ndef apply_reverb(x, reverb, origin=0):\r\n result = np.empty_like(x)\r\n for i in np.ndindex(*x.shape[1:]):\r\n j = (np.s_[:],) + i\r\n result[j] = np.convolve(x[j], reverb)[origin:len(x) + origin]\r\n return result\r\n\r\ndef apply_blur(x, blur):\r\n assert len(blur) % 2\r\n return apply_reverb(x, blur, origin=len(blur) // 2)\r\n\r\ndef gaussian_window(sd, rate=44100, threshold=1e-6):\r\n # sd is in seconds. the window generally sums to 1 but for small sd the\r\n # single center value can be greater than 1\r\n y_scale = 1 / (np.sqrt(2 * np.pi) * sd * rate)\r\n cutoff = int(sd * rate * np.sqrt(2 * np.log(y_scale / threshold)) + 1)\r\n x_scale = cutoff / (sd * rate)\r\n arg = np.linspace(-x_scale, x_scale, 2 * cutoff + 1)\r\n return y_scale * np.exp(-0.5 * arg * arg)\r\n\r\nmiddle_c = 220 * 2 ** 0.25\r\n\r\n#import matplotlib.pyplot as plt; x = saw1(0.01, 300, rate=1); plt.plot(x); plt.show()\r\ndef saw1(f, duration, phase=0, rate=44100):\r\n x = np.zeros(int(round(duration * rate)))\r\n for i in range(1, int(0.5 * rate / f) + 1):\r\n start = 2 * np.pi * i * phase\r\n stop = 2 * np.pi * i * (f * len(x) / rate + phase)\r\n factor = (1 - 2 * (i % 2)) / i\r\n x += np.sin(np.linspace(start, stop, num=len(x), endpoint=False)) * factor\r\n x *= -2 / np.pi\r\n return x\r\n\r\n#import matplotlib.pyplot as plt; x = square1(0.01, 300, rate=1); plt.plot(x); plt.show()\r\ndef square1(f, duration, phase=0, rate=44100):\r\n x = np.zeros(int(round(duration * rate)))\r\n for i in range(1, int(0.5 * rate / f) + 1, 2):\r\n start = 2 * np.pi * i * phase\r\n stop = 2 * np.pi * i * (f * len(x) / rate + phase)\r\n x += np.sin(np.linspace(start, stop, num=len(x), endpoint=False)) / i\r\n x *= 4 / np.pi\r\n return x\r\n\r\n#import matplotlib.pyplot as plt; x = square2(0.01, 300, pulse=0.25, rate=1); plt.plot(x); plt.show()\r\ndef square2(f, duration, pulse=0.5, phase=0, rate=44100):\r\n x = np.zeros(int(round(duration * rate)))\r\n for i in range(1, int(0.5 * rate / f) + 1):\r\n start = 2 * np.pi * i * (phase - 0.5 * pulse)\r\n stop = 2 * np.pi * i * (f * len(x) / rate + phase - 0.5 * pulse)\r\n factor = -4 * np.sin(np.pi * i * pulse) / (np.pi * i)\r\n x += np.cos(np.linspace(start, stop, num=len(x), endpoint=False)) * factor\r\n return x\r\n\r\ndef int16_normalize(data):\r\n if np.iscomplexobj(data):\r\n raise TypeError()\r\n normalized = data.T * (32767 / max(np.max(data), -np.min(data)))\r\n np.round(normalized, out=normalized)\r\n return normalized.astype(np.int16)\r\n\r\ndef get_codename(normalized):\r\n return codenamize(\r\n hashlib.sha1(normalized.ravel().view(np.uint8)).hexdigest()\r\n )\r\n\r\nlast_saved = float(\"-inf\")\r\n\r\ndef save_helper(normalized, filename, rate):\r\n global last_saved\r\n if not os.path.exists(\"sounds\"):\r\n os.makedirs(\"sounds\")\r\n if time.time() - last_saved < 1:\r\n time.sleep(1)\r\n wavfile.write(\"sounds/\" + filename, rate, normalized)\r\n last_saved = time.time()\r\n\r\ndef channel_iter(*args):\r\n return zip(*[x.reshape((-1, x.shape[-1])) for x in args])\r\n\r\ndef get_spectrum(x):\r\n spectrum = np.empty(x.shape[:-1] + (x.shape[-1] // 2 + 1,))\r\n for channel, spectrum_channel in channel_iter(x, spectrum):\r\n spectrum_channel[:] = np.abs(np.fft.rfft(channel))\r\n return spectrum\r\n\r\ndef save(data, rate=44100):\r\n normalized = int16_normalize(data)\r\n filename = get_codename(normalized) + \".wav\"\r\n save_helper(normalized, filename, rate)\r\n return filename\r\n\r\ndef save_spectrum(data, rate=44100):\r\n filename = get_codename(int16_normalize(data)) + \"_s.wav\"\r\n save_helper(int16_normalize(get_spectrum(data)), filename, rate)\r\n return filename\r\n\r\ndef save_fft(data, channel=None, rate=44100):\r\n if type(channel) is int:\r\n channel = [channel]\r\n if channel is None:\r\n assert data.ndim == 1\r\n suffix = \"\"\r\n else:\r\n assert data.ndim == 2\r\n suffix = \",\".join(str(i) for i in channel)\r\n filename = get_codename(int16_normalize(data)) + \"_f\" + suffix + \".wav\"\r\n fft = np.fft.rfft(data[channel].mean(axis=0))\r\n fft = np.array([fft.real, fft.imag])\r\n save_helper(int16_normalize(fft), filename, rate)\r\n return filename\r\n\r\ndef load(filename, rate=44100):\r\n actual_rate, x = wavfile.read(filename)\r\n assert actual_rate == rate\r\n assert x.dtype is np.int16\r\n x = x.T.astype(float)\r\n x /= 32768\r\n return x\r\n\r\ndef show(*args):\r\n for x in args:\r\n plt.plot(x)\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n np.random.seed(0)\r\n poisson(constant(10, 1))\r\n \r\n np.random.seed(0)\r\n\r\n #print(save(\r\n # sinusoid(\r\n # lowpass(gaussian(2), 200) * 200 + 440\r\n # )\r\n #))\r\n\r\n #input()\r\n" } ]
2
openstack/rally-openstack
https://github.com/openstack/rally-openstack
a1b9d778b1f0ac6252a9e1bb79903659ba265f51
9ff67887bf848c5966bb4a2f37018500d30dbe45
b9136249c7ceca23f4c1931c7245cdca7fa7b791
refs/heads/master
2023-08-24T01:25:56.543858
2023-08-20T14:36:19
2023-08-20T16:11:57
122,128,286
41
50
Apache-2.0
2018-02-19T22:24:48
2021-08-09T18:01:59
2021-08-16T07:28:09
Python
[ { "alpha_fraction": 0.6967535018920898, "alphanum_fraction": 0.7022832632064819, "avg_line_length": 42.123077392578125, "blob_id": "1003b8cff9010277c9b52300fe329a2c860bf484", "content_id": "781751d4b9058381413de42901a1e5a9812729bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5606, "license_type": "permissive", "max_line_length": 78, "num_lines": 130, "path": "/rally_openstack/task/scenarios/barbican/secrets.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport base64\nimport datetime as dt\nimport os\n\nfrom rally.task import validation\nfrom rally.utils import encodeutils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.barbican import utils\n\n\"\"\"Scenarios for Barbican secrets.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanSecrets.list\")\nclass BarbicanSecretsList(utils.BarbicanBase):\n def run(self):\n \"\"\"List secrets.\"\"\"\n self.admin_barbican.list_secrets()\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.create\")\nclass BarbicanSecretsCreate(utils.BarbicanBase):\n def run(self):\n \"\"\"Create secret.\"\"\"\n self.admin_barbican.create_secret()\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.create_and_delete\")\nclass BarbicanSecretsCreateAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and Delete secret.\"\"\"\n secret = self.admin_barbican.create_secret()\n self.admin_barbican.delete_secret(secret.secret_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.create_and_get\")\nclass BarbicanSecretsCreateAndGet(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and Get Secret.\"\"\"\n secret = self.admin_barbican.create_secret()\n self.assertTrue(secret)\n secret_info = self.admin_barbican.get_secret(secret.secret_ref)\n self.assertEqual(secret.secret_ref, secret_info.secret_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.get\")\nclass BarbicanSecretsGet(utils.BarbicanBase):\n def run(self, secret_ref=None):\n \"\"\"Create and Get Secret.\n\n :param secret_ref: Name of the secret to get\n \"\"\"\n if secret_ref is None:\n secret = self.admin_barbican.create_secret()\n self.admin_barbican.get_secret(secret.secret_ref)\n else:\n self.admin_barbican.get_secret(secret_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.create_and_list\")\nclass BarbicanSecretsCreateAndList(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and then list all secrets.\"\"\"\n self.admin_barbican.create_secret()\n self.admin_barbican.list_secrets()\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"barbican\"]},\n name=\"BarbicanSecrets.create_symmetric_and_delete\")\nclass BarbicanSecretsCreateSymmetricAndDelete(utils.BarbicanBase):\n def run(self, payload, algorithm, bit_length, mode):\n \"\"\"Create and delete symmetric secret\n\n :param payload: The unecrypted data\n :param algorithm: the algorithm associated with the secret key\n :param bit_length: the big length of the secret key\n :param mode: the algorithm mode used with the secret key\n \"\"\"\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\n payload = encodeutils.safe_encode(payload)\n salt = os.urandom(16)\n kdf = PBKDF2HMAC(\n algorithm=hashes.SHA256(), length=32, salt=salt,\n iterations=1000, backend=default_backend())\n payload = base64.b64encode(kdf.derive(payload))\n payload = encodeutils.safe_decode(payload)\n expire_time = (dt.datetime.utcnow() + dt.timedelta(days=5))\n secret = self.admin_barbican.create_secret(\n expiration=expire_time.isoformat(), algorithm=algorithm,\n bit_length=bit_length, mode=mode, payload=payload,\n payload_content_type=\"application/octet-stream\",\n payload_content_encoding=\"base64\")\n self.admin_barbican.delete_secret(secret.secret_ref)\n" }, { "alpha_fraction": 0.7293814420700073, "alphanum_fraction": 0.7293814420700073, "avg_line_length": 34.272727966308594, "blob_id": "6c7d75c9da136b22a68bdd7540b011de162b3b00", "content_id": "8fb83bb1a8711f84d7026ade2662ecbf38c2a5ad", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 388, "license_type": "permissive", "max_line_length": 89, "num_lines": 11, "path": "/samples/tasks/support/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "instance_linpack.sh\n===================\n\ninstance_linpack.sh, will kick off a CPU intensive workload within an OpenStack instance.\nThis script will return the avg gflops and max gflops Linpack reports in a JSON format.\nTo run this workload, the VM must have linpack installed prior to running.\n\ninstance_test.sh\n================\n\nThe script was absorbed by VMTasks.dd_load_test scenario.\n" }, { "alpha_fraction": 0.6584692001342773, "alphanum_fraction": 0.6645186543464661, "avg_line_length": 38.60416793823242, "blob_id": "10639f49d2bdedf927e4c09df4882356b0b03177", "content_id": "7cbb5af158ceff48d76c57ae8e6ad36193cc2168", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7604, "license_type": "permissive", "max_line_length": 79, "num_lines": 192, "path": "/tests/unit/task/scenarios/mistral/test_executions.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Nokia Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.mistral import executions\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.mistral.executions\"\nMISTRAL_WBS_BASE = \"rally_openstack.task.scenarios.mistral.workbooks\"\n\n\nWB_DEFINITION = \"\"\"---\nversion: 2.0\nname: wb\nworkflows:\n wf1:\n type: direct\n tasks:\n noop_task:\n action: std.noop\n wf2:\n type: direct\n tasks:\n noop_task:\n action: std.noop\n wf3:\n type: direct\n tasks:\n noop_task:\n action: std.noop\n wf4:\n type: direct\n tasks:\n noop_task:\n action: std.noop\n\"\"\"\n\nWB_DEF_ONE_WF = \"\"\"---\nversion: 2.0\nname: wb\nworkflows:\n wf1:\n type: direct\n tasks:\n noop_task:\n action: std.noop\n\"\"\"\n\nPARAMS_EXAMPLE = {\"env\": {\"env_param\": \"env_param_value\"}}\nINPUT_EXAMPLE = \"\"\"{\"input1\": \"value1\", \"some_json_input\": {\"a\": \"b\"}}\"\"\"\n\nWB = type(\"obj\", (object,), {\"name\": \"wb\", \"definition\": WB_DEFINITION})()\nWB_ONE_WF = (\n type(\"obj\", (object,), {\"name\": \"wb\", \"definition\": WB_DEF_ONE_WF})()\n)\n\n\nclass MistralExecutionsTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.ListExecutions._list_executions\" % BASE)\n def test_list_executions(self, mock__list_executions):\n executions.ListExecutions(self.context).run()\n self.assertEqual(1, mock__list_executions.called)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n def test_create_execution(self, mock__create_workbook,\n mock__create_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(WB_DEFINITION)\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n def test_create_execution_with_input(self, mock__create_workbook,\n mock__create_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEFINITION, wf_input=INPUT_EXAMPLE)\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n @mock.patch(\"json.loads\", return_value=PARAMS_EXAMPLE)\n def test_create_execution_with_params(self, mock_loads,\n mock__create_workbook,\n mock__create_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEFINITION, params=str(PARAMS_EXAMPLE))\n\n self.assertEqual(1, mock_loads.called)\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n def test_create_execution_with_wf_name(self, mock__create_workbook,\n mock__create_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEFINITION, \"wf4\")\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n\n # we concatenate workbook name with the workflow name in the test\n # the workbook name is not random because we mock the method that\n # adds the random part\n mock__create_execution.assert_called_once_with(\"wb.wf4\", None,)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_workbook\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n def test_create_delete_execution(\n self, mock__create_workbook, mock__create_execution,\n mock__delete_workbook, mock__delete_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEFINITION, do_delete=True)\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n self.assertEqual(1, mock__delete_workbook.called)\n self.assertEqual(1, mock__delete_execution.called)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_workbook\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB)\n def test_create_delete_execution_with_wf_name(\n self, mock__create_workbook, mock__create_execution,\n mock__delete_workbook, mock__delete_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEFINITION, \"wf4\", do_delete=True)\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n self.assertEqual(1, mock__delete_workbook.called)\n self.assertEqual(1, mock__delete_execution.called)\n\n # we concatenate workbook name with the workflow name in the test\n # the workbook name is not random because we mock the method that\n # adds the random part\n mock__create_execution.assert_called_once_with(\"wb.wf4\", None)\n\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._delete_workbook\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_execution\" % BASE)\n @mock.patch(\"%s.CreateExecutionFromWorkbook._create_workbook\" % BASE,\n return_value=WB_ONE_WF)\n def test_create_delete_execution_without_wf_name(\n self, mock__create_workbook, mock__create_execution,\n mock__delete_workbook, mock__delete_execution):\n\n executions.CreateExecutionFromWorkbook(self.context).run(\n WB_DEF_ONE_WF, do_delete=True)\n\n self.assertEqual(1, mock__create_workbook.called)\n self.assertEqual(1, mock__create_execution.called)\n self.assertEqual(1, mock__delete_workbook.called)\n self.assertEqual(1, mock__delete_execution.called)\n\n # we concatenate workbook name with the workflow name in the test\n # the workbook name is not random because we mock the method that\n # adds the random part\n mock__create_execution.assert_called_once_with(\"wb.wf1\", None)\n" }, { "alpha_fraction": 0.5617510080337524, "alphanum_fraction": 0.5628991723060608, "avg_line_length": 40.47321319580078, "blob_id": "97f6f654e827d450516c0919f88e61da57670860", "content_id": "a382ebd90175f654ee29cff33c4719a9176fd7c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13935, "license_type": "permissive", "max_line_length": 79, "num_lines": 336, "path": "/rally_openstack/task/contexts/keystone/users.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport collections\nimport copy\nimport uuid\n\nfrom rally.common import broker\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\nRESOURCE_MANAGEMENT_WORKERS_DESCR = (\n \"The number of concurrent threads to use for serving users context.\")\nPROJECT_DOMAIN_DESCR = \"ID of domain in which projects will be created.\"\nUSER_DOMAIN_DESCR = \"ID of domain in which users will be created.\"\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"users\", platform=\"openstack\", order=100)\nclass UserGenerator(context.OpenStackContext):\n \"\"\"Creates specified amount of keystone users and tenants.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"anyOf\": [\n {\"description\": \"Create new temporary users and tenants.\",\n \"properties\": {\n \"tenants\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n \"description\": \"The number of tenants to create.\"\n },\n \"users_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n \"description\": \"The number of users to create per one \"\n \"tenant.\"},\n \"user_password\": {\n \"type\": \"string\",\n \"description\": \"Specify custom user password instead of \"\n \"randomly generated in case of password \"\n \"requirements.\"},\n \"resource_management_workers\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n \"description\": RESOURCE_MANAGEMENT_WORKERS_DESCR},\n \"project_domain\": {\n \"type\": \"string\",\n \"description\": PROJECT_DOMAIN_DESCR},\n \"user_domain\": {\n \"type\": \"string\",\n \"description\": USER_DOMAIN_DESCR},\n \"user_choice_method\": {\n \"$ref\": \"#/definitions/user_choice_method\"}},\n \"additionalProperties\": False},\n # TODO(andreykurilin): add ability to specify users here.\n {\"description\": \"Use existing users and tenants.\",\n \"properties\": {\n \"user_choice_method\": {\n \"$ref\": \"#/definitions/user_choice_method\"}\n },\n \"additionalProperties\": False}\n ],\n \"definitions\": {\n \"user_choice_method\": {\n \"enum\": [\"random\", \"round_robin\"],\n \"description\": \"The mode of balancing usage of users between \"\n \"scenario iterations.\"}\n\n }\n }\n\n DEFAULT_CONFIG = {\"user_choice_method\": \"random\"}\n\n DEFAULT_FOR_NEW_USERS = {\n \"tenants\": 1,\n \"users_per_tenant\": 1,\n \"resource_management_workers\":\n cfg.CONF.openstack.users_context_resource_management_workers,\n }\n\n def __init__(self, context):\n super(UserGenerator, self).__init__(context)\n\n creds = self.env[\"platforms\"][\"openstack\"]\n if creds.get(\"admin\"):\n admin_cred = copy.deepcopy(creds[\"admin\"])\n api_info = copy.deepcopy(creds.get(\"api_info\", {}))\n if \"api_info\" in admin_cred:\n api_info.update(creds[\"admin\"][\"api_info\"])\n admin_cred[\"api_info\"] = api_info\n context[\"admin\"] = {\n \"credential\": credential.OpenStackCredential(**admin_cred)\n }\n\n if creds[\"users\"] and not (set(self.config) - {\"user_choice_method\"}):\n self.existing_users = creds[\"users\"]\n else:\n self.existing_users = []\n self.credential = context[\"admin\"][\"credential\"]\n project_domain = (self.credential[\"project_domain_name\"]\n or cfg.CONF.openstack.project_domain)\n user_domain = (self.credential[\"user_domain_name\"]\n or cfg.CONF.openstack.user_domain)\n self.DEFAULT_FOR_NEW_USERS[\"project_domain\"] = project_domain\n self.DEFAULT_FOR_NEW_USERS[\"user_domain\"] = user_domain\n with self.config.unlocked():\n for key, value in self.DEFAULT_FOR_NEW_USERS.items():\n self.config.setdefault(key, value)\n\n def _create_tenants(self, threads):\n tenants = collections.deque()\n\n def publish(queue):\n for i in range(self.config[\"tenants\"]):\n args = (self.config[\"project_domain\"], self.task[\"uuid\"], i)\n queue.append(args)\n\n def consume(cache, args):\n domain, task_id, i = args\n if \"client\" not in cache:\n clients = osclients.Clients(self.credential)\n cache[\"client\"] = identity.Identity(\n clients, name_generator=self.generate_random_name)\n tenant = cache[\"client\"].create_project(domain_name=domain)\n tenant_dict = {\"id\": tenant.id, \"name\": tenant.name, \"users\": []}\n tenants.append(tenant_dict)\n\n # NOTE(msdubov): consume() will fill the tenants list in the closure.\n broker.run(publish, consume, threads)\n tenants_dict = {}\n for t in tenants:\n tenants_dict[t[\"id\"]] = t\n\n return tenants_dict\n\n def _create_users(self, threads):\n # NOTE(msdubov): This should be called after _create_tenants().\n users_per_tenant = self.config[\"users_per_tenant\"]\n default_role = cfg.CONF.openstack.keystone_default_role\n\n users = collections.deque()\n\n def publish(queue):\n for tenant_id in self.context[\"tenants\"]:\n for user_id in range(users_per_tenant):\n username = self.generate_random_name()\n password = (str(uuid.uuid4())\n if self.config.get(\"user_password\") is None\n else self.config[\"user_password\"])\n args = (username, password, self.config[\"project_domain\"],\n self.config[\"user_domain\"], tenant_id)\n queue.append(args)\n\n def consume(cache, args):\n username, password, project_dom, user_dom, tenant_id = args\n if \"client\" not in cache:\n clients = osclients.Clients(self.credential)\n cache[\"client\"] = identity.Identity(\n clients, name_generator=self.generate_random_name)\n client = cache[\"client\"]\n user = client.create_user(username, password=password,\n project_id=tenant_id,\n domain_name=user_dom,\n default_role=default_role)\n user_credential = credential.OpenStackCredential(\n auth_url=self.credential[\"auth_url\"],\n username=user.name,\n password=password,\n tenant_name=self.context[\"tenants\"][tenant_id][\"name\"],\n permission=consts.EndpointPermission.USER,\n project_domain_name=project_dom,\n user_domain_name=user_dom,\n endpoint_type=self.credential[\"endpoint_type\"],\n https_insecure=self.credential[\"https_insecure\"],\n https_cacert=self.credential[\"https_cacert\"],\n region_name=self.credential[\"region_name\"],\n profiler_hmac_key=self.credential[\"profiler_hmac_key\"],\n profiler_conn_str=self.credential[\"profiler_conn_str\"],\n api_info=self.credential[\"api_info\"])\n users.append({\"id\": user.id,\n \"credential\": user_credential,\n \"tenant_id\": tenant_id})\n\n # NOTE(msdubov): consume() will fill the users list in the closure.\n broker.run(publish, consume, threads)\n return list(users)\n\n def create_users(self):\n \"\"\"Create tenants and users, using the broker pattern.\"\"\"\n\n threads = min(self.config[\"resource_management_workers\"],\n self.config[\"tenants\"])\n\n LOG.debug(\"Creating %(tenants)d tenants using %(threads)s threads\"\n % {\"tenants\": self.config[\"tenants\"], \"threads\": threads})\n self.context[\"tenants\"] = self._create_tenants(threads)\n\n if len(self.context[\"tenants\"]) < self.config[\"tenants\"]:\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Failed to create the requested number of tenants.\")\n\n users_num = self.config[\"users_per_tenant\"] * self.config[\"tenants\"]\n threads = min(self.config[\"resource_management_workers\"], users_num)\n LOG.debug(\"Creating %(users)d users using %(threads)s threads\"\n % {\"users\": users_num, \"threads\": threads})\n self.context[\"users\"] = self._create_users(threads)\n for user in self.context[\"users\"]:\n self.context[\"tenants\"][user[\"tenant_id\"]][\"users\"].append(user)\n\n if len(self.context[\"users\"]) < users_num:\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Failed to create the requested number of users.\")\n\n def use_existing_users(self):\n LOG.debug(\"Using existing users for OpenStack platform.\")\n api_info = copy.deepcopy(self.env[\"platforms\"][\"openstack\"].get(\n \"api_info\", {}))\n for user_credential in self.existing_users:\n user_credential = copy.deepcopy(user_credential)\n if \"api_info\" in user_credential:\n api_info.update(user_credential[\"api_info\"])\n user_credential[\"api_info\"] = api_info\n user_credential = credential.OpenStackCredential(**user_credential)\n user_clients = osclients.Clients(user_credential)\n user_id = user_clients.keystone.auth_ref.user_id\n tenant_id = user_clients.keystone.auth_ref.project_id\n\n if tenant_id not in self.context[\"tenants\"]:\n self.context[\"tenants\"][tenant_id] = {\n \"id\": tenant_id,\n \"name\": user_credential.tenant_name\n }\n\n self.context[\"users\"].append({\n \"credential\": user_credential,\n \"id\": user_id,\n \"tenant_id\": tenant_id\n })\n\n def setup(self):\n self.context[\"users\"] = []\n self.context[\"tenants\"] = {}\n self.context[\"user_choice_method\"] = self.config[\"user_choice_method\"]\n\n if self.existing_users:\n self.use_existing_users()\n else:\n self.create_users()\n\n def _remove_default_security_group(self):\n \"\"\"Delete default security group for tenants.\"\"\"\n\n admin_client = neutron.NeutronService(\n clients=osclients.Clients(self.credential),\n atomic_inst=self.atomic_actions()\n )\n\n if not admin_client.supports_extension(\"security-group\", silent=True):\n LOG.debug(\"Security group context is disabled.\")\n return\n\n security_groups = admin_client.list_security_groups(name=\"default\")\n for security_group in security_groups:\n if security_group[\"tenant_id\"] not in self.context[\"tenants\"]:\n continue\n admin_client.delete_security_group(security_group[\"id\"])\n\n def _get_consumer_for_deletion(self, func_name):\n def consume(cache, resource_id):\n if \"client\" not in cache:\n clients = osclients.Clients(self.credential)\n cache[\"client\"] = identity.Identity(clients)\n getattr(cache[\"client\"], func_name)(resource_id)\n return consume\n\n def _delete_tenants(self):\n threads = self.config[\"resource_management_workers\"]\n\n def publish(queue):\n for tenant_id in self.context[\"tenants\"]:\n queue.append(tenant_id)\n\n broker.run(publish, self._get_consumer_for_deletion(\"delete_project\"),\n threads)\n self.context[\"tenants\"] = {}\n\n def _delete_users(self):\n threads = self.config[\"resource_management_workers\"]\n\n def publish(queue):\n for user in self.context[\"users\"]:\n queue.append(user[\"id\"])\n\n broker.run(publish, self._get_consumer_for_deletion(\"delete_user\"),\n threads)\n self.context[\"users\"] = []\n\n def cleanup(self):\n \"\"\"Delete tenants and users, using the broker pattern.\"\"\"\n if self.existing_users:\n # nothing to do here.\n return\n else:\n self._remove_default_security_group()\n self._delete_users()\n self._delete_tenants()\n" }, { "alpha_fraction": 0.5623027086257935, "alphanum_fraction": 0.5645573139190674, "avg_line_length": 35.55494689941406, "blob_id": "b9f4d1dfb9a7d74c8e3879143aa55165aacb17db", "content_id": "edc761275b42d33b6e97ca20fb42cfc9589af342", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6653, "license_type": "permissive", "max_line_length": 78, "num_lines": 182, "path": "/tests/unit/task/contexts/sahara/test_sahara_image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.sahara import sahara_image\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nBASE_CTX = \"rally.task.context\"\nCTX = \"rally_openstack.task.contexts.sahara.sahara_image\"\nBASE_SCN = \"rally.task.scenarios\"\n\n\nclass SaharaImageTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(SaharaImageTestCase, self).setUp()\n self.tenants_num = 2\n self.users_per_tenant = 2\n self.users = self.tenants_num * self.users_per_tenant\n self.task = mock.MagicMock()\n\n self.tenants = {}\n self.users_key = []\n\n for i in range(self.tenants_num):\n self.tenants[str(i)] = {\"id\": str(i), \"name\": str(i),\n \"sahara\": {\"image\": \"42\"}}\n for j in range(self.users_per_tenant):\n self.users_key.append({\"id\": \"%s_%s\" % (str(i), str(j)),\n \"tenant_id\": str(i),\n \"credential\": fakes.FakeCredential()})\n\n @property\n def url_image_context(self):\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n },\n \"sahara_image\": {\n \"image_url\": \"http://somewhere\",\n \"plugin_name\": \"test_plugin\",\n \"hadoop_version\": \"test_version\",\n \"username\": \"test_user\"\n }\n },\n \"admin\": {\"credential\": fakes.FakeCredential()},\n \"users\": self.users_key,\n \"tenants\": self.tenants\n })\n return self.context\n\n @property\n def existing_image_context(self):\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n },\n \"sahara_image\": {\n \"image_uuid\": \"some_id\"\n }\n },\n \"admin\": {\"credential\": fakes.FakeCredential()},\n \"users\": self.users_key,\n \"tenants\": self.tenants,\n })\n return self.context\n\n @mock.patch(\"rally_openstack.common.services.\"\n \"image.image.Image\")\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_and_cleanup_url_image(self, mock_clients,\n mock_cleanup, mock_image):\n\n ctx = self.url_image_context\n sahara_ctx = sahara_image.SaharaImage(ctx)\n sahara_ctx.generate_random_name = mock.Mock()\n image_service = mock.Mock()\n mock_image.return_value = image_service\n image_service.create_image.return_value = mock.Mock(id=42)\n clients = mock.Mock()\n mock_clients.return_value = clients\n sahara_client = mock.Mock()\n clients.sahara.return_value = sahara_client\n\n glance_calls = []\n\n for i in range(self.tenants_num):\n glance_calls.append(\n mock.call(container_format=\"bare\",\n image_location=\"http://somewhere\",\n disk_format=\"qcow2\"))\n\n sahara_update_image_calls = []\n sahara_update_tags_calls = []\n\n for i in range(self.tenants_num):\n sahara_update_image_calls.append(mock.call(image_id=42,\n user_name=\"test_user\",\n desc=\"\"))\n sahara_update_tags_calls.append(mock.call(\n image_id=42,\n new_tags=[\"test_plugin\", \"test_version\"]))\n\n sahara_ctx.setup()\n image_service.create_image.assert_has_calls(glance_calls)\n sahara_client.images.update_image.assert_has_calls(\n sahara_update_image_calls)\n sahara_client.images.update_tags.assert_has_calls(\n sahara_update_tags_calls)\n\n sahara_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"glance.images\"],\n users=ctx[\"users\"],\n superclass=sahara_ctx.__class__,\n task_id=ctx[\"owner_id\"])\n\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n @mock.patch(\"%s.osclients.Clients\" % CTX)\n def test_setup_and_cleanup_existing_image(\n self, mock_clients, mock_cleanup):\n\n mock_clients.glance.images.get.return_value = mock.MagicMock(\n is_public=True)\n\n ctx = self.existing_image_context\n sahara_ctx = sahara_image.SaharaImage(ctx)\n sahara_ctx._create_image = mock.Mock()\n\n sahara_ctx.setup()\n for tenant_id in sahara_ctx.context[\"tenants\"]:\n image_id = (\n sahara_ctx.context[\"tenants\"][tenant_id][\"sahara\"][\"image\"])\n self.assertEqual(\"some_id\", image_id)\n\n self.assertFalse(sahara_ctx._create_image.called)\n\n sahara_ctx.cleanup()\n self.assertFalse(mock_cleanup.called)\n\n @mock.patch(\"%s.osclients.Glance.create_client\" % CTX)\n def test_check_existing_image(self, mock_glance_create_client):\n\n ctx = self.existing_image_context\n sahara_ctx = sahara_image.SaharaImage(ctx)\n sahara_ctx.setup()\n\n mock_glance_create_client.images.get.asser_called_once_with(\"some_id\")\n\n @mock.patch(\"%s.osclients.Glance.create_client\" % CTX)\n def test_check_existing_private_image_fail(self,\n mock_glance_create_client):\n\n mock_glance_create_client.return_value.images.get.return_value = (\n mock.MagicMock(is_public=False))\n\n ctx = self.existing_image_context\n sahara_ctx = sahara_image.SaharaImage(ctx)\n self.assertRaises(exceptions.ContextSetupFailure,\n sahara_ctx.setup)\n\n mock_glance_create_client.images.get.asser_called_once_with(\"some_id\")\n" }, { "alpha_fraction": 0.6408498883247375, "alphanum_fraction": 0.6417637467384338, "avg_line_length": 40.29245376586914, "blob_id": "520d1d2ac46f2e63928780ab618d6988f467060d", "content_id": "78129f8a97d3cee9b96d2e8d6862f400202431bd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4377, "license_type": "permissive", "max_line_length": 79, "num_lines": 106, "path": "/rally_openstack/task/scenarios/mistral/executions.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\n\nfrom rally.task import types\nfrom rally.task import validation\nimport yaml\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.mistral import utils\n\n\n\"\"\"Scenarios for Mistral execution.\"\"\"\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_services\",\n services=[consts.Service.MISTRAL])\[email protected](name=\"MistralExecutions.list_executions\",\n platform=\"openstack\")\nclass ListExecutions(utils.MistralScenario):\n\n def run(self, marker=\"\", limit=None, sort_keys=\"\", sort_dirs=\"\"):\n \"\"\"Scenario test mistral execution-list command.\n\n This simple scenario tests the Mistral execution-list\n command by listing all the executions.\n :param marker: The last execution uuid of the previous page, displays\n list of executions after \"marker\".\n :param limit: number Maximum number of executions to return in a single\n result.\n :param sort_keys: id,description\n :param sort_dirs: [SORT_DIRS] Comma-separated list of sort directions.\n Default: asc.\n \"\"\"\n self._list_executions(marker=marker, limit=limit,\n sort_keys=sort_keys, sort_dirs=sort_dirs)\n\n\[email protected](definition={\"type\": \"file\"})\[email protected](params={\"type\": \"file\"})\[email protected](wf_input={\"type\": \"file\"})\[email protected](\"file_exists\", param_name=\"definition\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_services\",\n services=[consts.Service.MISTRAL])\[email protected](\"workbook_contains_workflow\",\n workbook_param=\"definition\",\n workflow_param=\"workflow_name\")\[email protected](name=\"MistralExecutions.create_execution_from_workbook\",\n context={\"cleanup@openstack\": [\"mistral\"]},\n platform=\"openstack\")\nclass CreateExecutionFromWorkbook(utils.MistralScenario):\n\n def run(self, definition, workflow_name=None, wf_input=None, params=None,\n do_delete=False):\n \"\"\"Scenario tests execution creation and deletion.\n\n This scenario is a very useful tool to measure the\n \"mistral execution-create\" and \"mistral execution-delete\"\n commands performance.\n :param definition: string (yaml string) representation of given file\n content (Mistral workbook definition)\n :param workflow_name: string the workflow name to execute. Should be\n one of the to workflows in the definition. If no\n workflow_name is passed, one of the workflows in\n the definition will be taken.\n :param wf_input: file containing a json string of mistral workflow\n input\n :param params: file containing a json string of mistral params\n (the string is the place to pass the environment)\n :param do_delete: if False than it allows to check performance\n in \"create only\" mode.\n \"\"\"\n\n wb = self._create_workbook(definition)\n wb_def = yaml.safe_load(wb.definition)\n\n if not workflow_name:\n workflow_name = next(iter(wb_def[\"workflows\"].keys()))\n\n workflow_identifier = \".\".join([wb.name, workflow_name])\n\n if not params:\n params = {}\n else:\n params = json.loads(params)\n\n ex = self._create_execution(workflow_identifier, wf_input, **params)\n\n if do_delete:\n self._delete_workbook(wb.name)\n self._delete_execution(ex)\n" }, { "alpha_fraction": 0.6560534834861755, "alphanum_fraction": 0.6603773832321167, "avg_line_length": 31.615385055541992, "blob_id": "631714c3b989bab1e7453d7ec6efd4b42d6c9c14", "content_id": "ff5634d3d4ee8ae194895866ee118519f01199dc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2544, "license_type": "permissive", "max_line_length": 78, "num_lines": 78, "path": "/rally_openstack/task/hooks/fault_injection.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import objects\nfrom rally.task import hook\n\nfrom rally_openstack.common import consts\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](name=\"fault_injection\", platform=\"openstack\")\nclass FaultInjectionHook(hook.HookAction):\n \"\"\"Performs fault injection using os-faults library.\n\n Configuration:\n\n * action - string that represents an action (more info in [1])\n * verify - whether to verify connection to cloud nodes or not\n\n This plugin discovers extra config of ExistingCloud\n and looks for \"cloud_config\" field. If cloud_config is present then\n it will be used to connect to the cloud by os-faults.\n\n Another option is to provide os-faults config file through\n OS_FAULTS_CONFIG env variable. Format of the config can\n be found in [1].\n\n [1] http://os-faults.readthedocs.io/en/latest/usage.html\n \"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"action\": {\"type\": \"string\"},\n \"verify\": {\"type\": \"boolean\"},\n },\n \"required\": [\n \"action\",\n ],\n \"additionalProperties\": False,\n }\n\n def get_cloud_config(self):\n deployment = objects.Deployment.get(self.task[\"deployment_uuid\"])\n deployment_config = deployment[\"config\"]\n extra_config = deployment_config.get(\"extra\", {})\n return extra_config.get(\"cloud_config\")\n\n def run(self):\n import os_faults\n\n # get cloud configuration\n cloud_config = self.get_cloud_config()\n\n # connect to the cloud\n injector = os_faults.connect(cloud_config)\n\n # verify that all nodes are available\n if self.config.get(\"verify\"):\n injector.verify()\n\n LOG.debug(\"Injecting fault: %s\" % self.config[\"action\"])\n os_faults.human_api(injector, self.config[\"action\"])\n" }, { "alpha_fraction": 0.5640292167663574, "alphanum_fraction": 0.5670479536056519, "avg_line_length": 40.13725662231445, "blob_id": "bfc8dcd45ad13b8be6c5c1548c148d7b8c02fdce", "content_id": "3d2c99019d269c49ab2c861f6e7aa3a752129f32", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6294, "license_type": "permissive", "max_line_length": 78, "num_lines": 153, "path": "/tests/unit/task/scenarios/loadbalancer/test_pools.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.octavia import pools\nfrom tests.unit import test\n\n\nclass PoolsTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(PoolsTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.loadbalancer.octavia.Octavia\")\n self.addCleanup(patch.stop)\n self.mock_loadbalancers = patch.start()\n\n def _get_context(self):\n context = super(PoolsTestCase, self).get_test_context()\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant\",\n \"networks\": [{\"id\": \"fake_net\",\n \"subnets\": [\"fake_subnet\"]}]}})\n return context\n\n def test_create_and_list_pools(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = pools.CreateAndListPools(self._get_context())\n scenario.run(protocol=\"HTTP\", lb_algorithm=\"ROUND_ROBIN\")\n loadbalancer = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n subnets = []\n mock_has_calls = []\n networks = self._get_context()[\"tenant\"][\"networks\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet in subnets:\n mock_has_calls.append(mock.call(subnet_id=\"fake_subnet\",\n project_id=\"fake_tenant\"))\n loadbalancer_service.load_balancer_create.assert_has_calls(\n mock_has_calls)\n for lb in loadbalancer:\n self.assertEqual(\n 1, loadbalancer_service.wait_for_loadbalancer_prov_status\n .call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_create.call_count)\n loadbalancer_service.pool_list.assert_called_once_with()\n\n def test_create_and_delete_pools(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = pools.CreateAndDeletePools(self._get_context())\n scenario.run(protocol=\"HTTP\", lb_algorithm=\"ROUND_ROBIN\")\n loadbalancer = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n subnets = []\n mock_has_calls = []\n networks = self._get_context()[\"tenant\"][\"networks\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet in subnets:\n mock_has_calls.append(mock.call(subnet_id=\"fake_subnet\",\n project_id=\"fake_tenant\"))\n loadbalancer_service.load_balancer_create.assert_has_calls(\n mock_has_calls)\n for lb in loadbalancer:\n self.assertEqual(\n 1, loadbalancer_service.wait_for_loadbalancer_prov_status\n .call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_create.call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_delete.call_count)\n\n def test_create_and_update_pools(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = pools.CreateAndUpdatePools(self._get_context())\n scenario.run(protocol=\"HTTP\", lb_algorithm=\"ROUND_ROBIN\")\n loadbalancer = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n subnets = []\n mock_has_calls = []\n networks = self._get_context()[\"tenant\"][\"networks\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet in subnets:\n mock_has_calls.append(mock.call(subnet_id=\"fake_subnet\",\n project_id=\"fake_tenant\"))\n loadbalancer_service.load_balancer_create.assert_has_calls(\n mock_has_calls)\n for lb in loadbalancer:\n self.assertEqual(\n 1, loadbalancer_service.wait_for_loadbalancer_prov_status\n .call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_create.call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_set.call_count)\n\n def test_create_and_show_pools(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = pools.CreateAndShowPools(self._get_context())\n scenario.run(protocol=\"HTTP\", lb_algorithm=\"ROUND_ROBIN\")\n loadbalancer = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n subnets = []\n mock_has_calls = []\n networks = self._get_context()[\"tenant\"][\"networks\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet in subnets:\n mock_has_calls.append(mock.call(subnet_id=\"fake_subnet\",\n project_id=\"fake_tenant\"))\n loadbalancer_service.load_balancer_create.assert_has_calls(\n mock_has_calls)\n for lb in loadbalancer:\n self.assertEqual(\n 1, loadbalancer_service.wait_for_loadbalancer_prov_status\n .call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_create.call_count)\n self.assertEqual(1,\n loadbalancer_service.pool_show.call_count)\n" }, { "alpha_fraction": 0.5289255976676941, "alphanum_fraction": 0.5322313904762268, "avg_line_length": 36.477874755859375, "blob_id": "e58d22e23f211b382257f510ad8bb8a565ba063a", "content_id": "9e400e1e1b3eea91332939e6b2667c0081283393", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4235, "license_type": "permissive", "max_line_length": 79, "num_lines": 113, "path": "/rally_openstack/task/contexts/network/routers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Orange\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True,\n users=True)\[email protected](name=\"router\", platform=\"openstack\", order=351)\nclass Router(context.OpenStackContext):\n \"\"\"Create networking resources.\n\n This creates router for all tenants.\n \"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"routers_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"admin_state_up \": {\n \"description\": \"A human-readable description for the resource\",\n \"type\": \"boolean\",\n },\n \"external_gateway_info\": {\n \"description\": \"The external gateway information .\",\n \"type\": \"object\",\n \"properties\": {\n \"network_id\": {\"type\": \"string\"},\n \"enable_snat\": {\"type\": \"boolean\"}\n },\n \"additionalProperties\": False\n },\n \"network_id\": {\n \"description\": \"Network ID\",\n \"type\": \"string\"\n },\n \"external_fixed_ips\": {\n \"description\": \"Ip(s) of the external gateway interface.\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"ip_address\": {\"type\": \"string\"},\n \"subnet_id\": {\"type\": \"string\"}\n },\n \"additionalProperties\": False,\n }\n },\n \"distributed\": {\n \"description\": \"Distributed router. Require dvr extension.\",\n \"type\": \"boolean\"\n },\n \"ha\": {\n \"description\": \"Highly-available router. Require l3-ha.\",\n \"type\": \"boolean\"\n },\n \"availability_zone_hints\": {\n \"description\": \"Require router_availability_zone extension.\",\n \"type\": \"boolean\"\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"routers_per_tenant\": 1,\n }\n\n def setup(self):\n kwargs = {}\n parameters = (\"admin_state_up\", \"external_gateway_info\", \"network_id\",\n \"external_fixed_ips\", \"distributed\", \"ha\",\n \"availability_zone_hints\")\n for parameter in parameters:\n if parameter in self.config:\n kwargs[parameter] = self.config[parameter]\n for user, tenant_id in self._iterate_per_tenants():\n self.context[\"tenants\"][tenant_id][\"routers\"] = []\n scenario = neutron_utils.NeutronScenario(\n context={\"user\": user, \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]}\n )\n for i in range(self.config[\"routers_per_tenant\"]):\n router = scenario._create_router(kwargs)\n self.context[\"tenants\"][tenant_id][\"routers\"].append(router)\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"neutron.router\"],\n users=self.context.get(\"users\", []),\n superclass=neutron_utils.NeutronScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5850846767425537, "alphanum_fraction": 0.5933483242988586, "avg_line_length": 42.371681213378906, "blob_id": "915077850fa4ee2eaeb01a085a9bf3792b5b68c5", "content_id": "c72de780df9b9655b77b37e5be705a07c72c4e3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9802, "license_type": "permissive", "max_line_length": 79, "num_lines": 226, "path": "/tests/unit/task/scenarios/neutron/test_bgpvpn.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.neutron import bgpvpn\nfrom tests.unit import test\n\n\[email protected]\nclass NeutronBgpvpnTestCase(test.TestCase):\n\n def _get_context(self, resource=None):\n context = test.get_test_context()\n if resource in (\"network\", \"router\"):\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()}\n })\n if resource == \"network\":\n context.update(\n {\"tenant\": {\"id\": \"fake_tenant\",\n resource + \"s\": [{\"id\": \"fake_net\",\n \"tenant_id\": \"fake_tenant\",\n \"router_id\": \"fake_router\"}]}\n })\n elif resource == \"router\":\n context.update(\n {\"tenant\": {\"id\": \"fake_tenant\",\n resource + \"s\": [\n {resource: {\"id\": \"fake_net\",\n \"tenant_id\": \"fake_tenant\"}}]}\n })\n return context\n\n def _get_bgpvpn_create_data(self):\n return {\n \"route_targets\": None,\n \"import_targets\": None,\n \"export_targets\": None,\n \"route_distinguishers\": None}\n\n def _get_bgpvpn_update_data(self):\n return {\n \"route_targets\": None,\n \"import_targets\": None,\n \"export_targets\": None,\n \"route_distinguishers\": None}\n\n @ddt.data(\n {},\n {\"bgpvpn_create_args\": None},\n {\"bgpvpn_create_args\": {}},\n )\n @ddt.unpack\n def test_create_and_delete_bgpvpns(self, bgpvpn_create_args=None):\n scenario = bgpvpn.CreateAndDeleteBgpvpns(self._get_context())\n bgpvpn_create_data = bgpvpn_create_args or {}\n create_data = self._get_bgpvpn_create_data()\n create_data.update(bgpvpn_create_data)\n scenario._create_bgpvpn = mock.Mock()\n scenario._delete_bgpvpn = mock.Mock()\n scenario.run(**create_data)\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._delete_bgpvpn.assert_called_once_with(\n scenario._create_bgpvpn.return_value)\n\n @ddt.data(\n {},\n {\"bgpvpn_create_args\": None},\n {\"bgpvpn_create_args\": {}},\n )\n @ddt.unpack\n def test_create_and_list_bgpvpns(self, bgpvpn_create_args=None):\n scenario = bgpvpn.CreateAndListBgpvpns(self._get_context())\n bgpvpn_create_data = bgpvpn_create_args or {}\n create_data = self._get_bgpvpn_create_data()\n create_data.update(bgpvpn_create_data)\n bgpvpn_created = {\"bgpvpn\": {\"id\": 1, \"name\": \"b1\"}}\n bgpvpn_listed = [{\"id\": 1}]\n scenario._create_bgpvpn = mock.Mock(return_value=bgpvpn_created)\n scenario._list_bgpvpns = mock.Mock(return_value=bgpvpn_listed)\n scenario.run(**create_data)\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._list_bgpvpns.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"bgpvpn_create_args\": {}},\n {\"bgpvpn_update_args\": {}},\n {\"bgpvpn_update_args\": {\"update_name\": True}},\n {\"bgpvpn_update_args\": {\"update_name\": False}},\n )\n @ddt.unpack\n def test_create_and_update_bgpvpns(self, bgpvpn_create_args=None,\n bgpvpn_update_args=None):\n scenario = bgpvpn.CreateAndUpdateBgpvpns(self._get_context())\n bgpvpn_create_data = bgpvpn_create_args or {}\n bgpvpn_update_data = bgpvpn_update_args or {}\n create_data = self._get_bgpvpn_create_data()\n create_data.update(bgpvpn_create_data)\n update_data = self._get_bgpvpn_update_data()\n update_data.update(bgpvpn_update_data)\n if \"update_name\" not in update_data:\n update_data[\"update_name\"] = False\n bgpvpn_data = {}\n bgpvpn_data.update(bgpvpn_create_data)\n bgpvpn_data.update(bgpvpn_update_data)\n scenario._create_bgpvpn = mock.Mock()\n scenario._update_bgpvpn = mock.Mock()\n scenario.run(**bgpvpn_data)\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._update_bgpvpn.assert_called_once_with(\n scenario._create_bgpvpn.return_value, **update_data)\n\n @mock.patch.object(bgpvpn, \"random\")\n def test_create_and_associate_disassociate_networks(self, mock_random):\n scenario = bgpvpn.CreateAndAssociateDissassociateNetworks(\n self._get_context(\"network\"))\n create_data = self._get_bgpvpn_create_data()\n networks = self._get_context(\"network\")[\"tenant\"][\"networks\"]\n create_data[\"tenant_id\"] = networks[0][\"tenant_id\"]\n mock_random.randint.return_value = 12345\n create_data[\"route_targets\"] = \"12345:12345\"\n scenario._create_bgpvpn = mock.Mock()\n scenario._create_bgpvpn_network_assoc = mock.Mock()\n scenario._delete_bgpvpn_network_assoc = mock.Mock()\n scenario.run()\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n\n scenario._create_bgpvpn_network_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value, networks[0])\n scenario._delete_bgpvpn_network_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value,\n scenario._create_bgpvpn_network_assoc.return_value)\n\n @mock.patch.object(bgpvpn, \"random\")\n def test_create_and_associate_disassociate_routers(self, mock_random):\n scenario = bgpvpn.CreateAndAssociateDissassociateRouters(\n self._get_context(\"network\"))\n create_data = self._get_bgpvpn_create_data()\n router = {\"id\": self._get_context(\n \"network\")[\"tenant\"][\"networks\"][0][\"router_id\"]}\n create_data[\"tenant_id\"] = self._get_context(\"network\")[\"tenant\"][\"id\"]\n mock_random.randint.return_value = 12345\n create_data[\"route_targets\"] = \"12345:12345\"\n scenario._create_bgpvpn = mock.Mock()\n scenario._create_bgpvpn_router_assoc = mock.Mock()\n scenario._delete_bgpvpn_router_assoc = mock.Mock()\n scenario.run()\n\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._create_bgpvpn_router_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value, router)\n scenario._delete_bgpvpn_router_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value,\n scenario._create_bgpvpn_router_assoc.return_value)\n\n @mock.patch.object(bgpvpn, \"random\")\n def test_create_and_list_networks_assocs(self, mock_random):\n scenario = bgpvpn.CreateAndListNetworksAssocs(\n self._get_context(\"network\"))\n create_data = self._get_bgpvpn_create_data()\n networks = self._get_context(\"network\")[\"tenant\"][\"networks\"]\n create_data[\"tenant_id\"] = networks[0][\"tenant_id\"]\n network_assocs = {\n \"network_associations\": [{\"network_id\": networks[0][\"id\"]}]\n }\n mock_random.randint.return_value = 12345\n create_data[\"route_targets\"] = \"12345:12345\"\n scenario._create_bgpvpn = mock.Mock()\n scenario._create_bgpvpn_network_assoc = mock.Mock()\n scenario._list_bgpvpn_network_assocs = mock.Mock(\n return_value=network_assocs)\n scenario.run()\n\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._create_bgpvpn_network_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value, networks[0])\n scenario._list_bgpvpn_network_assocs.assert_called_once_with(\n scenario._create_bgpvpn.return_value)\n\n @mock.patch.object(bgpvpn, \"random\")\n def test_create_and_list_routers_assocs(self, mock_random):\n scenario = bgpvpn.CreateAndListRoutersAssocs(\n self._get_context(\"network\"))\n create_data = self._get_bgpvpn_create_data()\n router = {\"id\": self._get_context(\n \"network\")[\"tenant\"][\"networks\"][0][\"router_id\"]}\n create_data[\"tenant_id\"] = self._get_context(\"network\")[\"tenant\"][\"id\"]\n router_assocs = {\n \"router_associations\": [{\"router_id\": router[\"id\"]}]\n }\n mock_random.randint.return_value = 12345\n create_data[\"route_targets\"] = \"12345:12345\"\n scenario._create_bgpvpn = mock.Mock()\n scenario._create_bgpvpn_router_assoc = mock.Mock()\n scenario._list_bgpvpn_router_assocs = mock.Mock(\n return_value=router_assocs)\n scenario.run()\n\n scenario._create_bgpvpn.assert_called_once_with(\n type=\"l3\", **create_data)\n scenario._create_bgpvpn_router_assoc.assert_called_once_with(\n scenario._create_bgpvpn.return_value, router)\n scenario._list_bgpvpn_router_assocs.assert_called_once_with(\n scenario._create_bgpvpn.return_value)\n" }, { "alpha_fraction": 0.5698369741439819, "alphanum_fraction": 0.5719479322433472, "avg_line_length": 43.76115417480469, "blob_id": "1fb120bd013b60ff1d9f779f1fe541e549440e00", "content_id": "4ee07778555344228cfd5b9f84496ddafb177e9e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17054, "license_type": "permissive", "max_line_length": 79, "num_lines": 381, "path": "/rally_openstack/verification/tempest/context.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport configparser\nimport os\nimport re\n\nimport requests\n\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import utils as task_utils\nfrom rally.verification import context\nfrom rally.verification import utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.verification.tempest import config as conf\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"tempest\", order=900)\nclass TempestContext(context.VerifierContext):\n \"\"\"Context class to create/delete resources needed for Tempest.\"\"\"\n\n RESOURCE_NAME_FORMAT = \"rally_verify_XXXXXXXX_XXXXXXXX\"\n\n def __init__(self, ctx):\n super(TempestContext, self).__init__(ctx)\n\n openstack_platform = self.verifier.env.data[\"platforms\"][\"openstack\"]\n admin_creds = credential.OpenStackCredential(\n permission=consts.EndpointPermission.ADMIN,\n **openstack_platform[\"platform_data\"][\"admin\"])\n\n self.clients = admin_creds.clients()\n self.available_services = self.clients.services().values()\n\n self.conf = configparser.ConfigParser(allow_no_value=True)\n self.conf.optionxform = str\n self.conf_path = self.verifier.manager.configfile\n\n self.data_dir = self.verifier.manager.home_dir\n self.image_name = \"tempest-image\"\n\n self._created_roles = []\n self._created_images = []\n self._created_flavors = []\n self._created_networks = []\n\n def _configure_img_options(self):\n try:\n tempest_major_version = int(self.verifier.version.split(\".\", 1)[0])\n except ValueError:\n # use latest flow by default\n tempest_major_version = 27\n if tempest_major_version < 27:\n self._configure_option(\"scenario\", \"img_dir\", self.data_dir)\n img_file = self.image_name\n else:\n img_file = self.data_dir + \"/\" + self.image_name\n self._configure_option(\"scenario\", \"img_file\", img_file,\n helper_method=self._download_image)\n\n def setup(self):\n self.conf.read(self.conf_path)\n\n utils.create_dir(self.data_dir)\n\n self._create_tempest_roles()\n\n self._configure_option(\"DEFAULT\", \"log_file\",\n os.path.join(self.data_dir, \"tempest.log\"))\n self._configure_option(\"oslo_concurrency\", \"lock_path\",\n os.path.join(self.data_dir, \"lock_files\"))\n self._configure_img_options()\n self._configure_option(\"compute\", \"image_ref\",\n helper_method=self._discover_or_create_image)\n self._configure_option(\"compute\", \"image_ref_alt\",\n helper_method=self._discover_or_create_image)\n self._configure_option(\"compute\", \"flavor_ref\",\n helper_method=self._discover_or_create_flavor,\n flv_ram=conf.CONF.openstack.flavor_ref_ram,\n flv_disk=conf.CONF.openstack.flavor_ref_disk)\n self._configure_option(\"compute\", \"flavor_ref_alt\",\n helper_method=self._discover_or_create_flavor,\n flv_ram=conf.CONF.openstack.flavor_ref_alt_ram,\n flv_disk=conf.CONF.openstack.flavor_ref_alt_disk\n )\n if \"neutron\" in self.available_services:\n neutronclient = self.clients.neutron()\n if neutronclient.list_networks(shared=True)[\"networks\"]:\n # If the OpenStack cloud has some shared networks, we will\n # create our own shared network and specify its name in the\n # Tempest config file. Such approach will allow us to avoid\n # failures of Tempest tests with error \"Multiple possible\n # networks found\". Otherwise the default behavior defined in\n # Tempest will be used and Tempest itself will manage network\n # resources.\n LOG.debug(\"Shared networks found. \"\n \"'fixed_network_name' option should be configured.\")\n self._configure_option(\n \"compute\", \"fixed_network_name\",\n helper_method=self._create_network_resources)\n if \"heat\" in self.available_services:\n self._configure_option(\n \"orchestration\", \"instance_type\",\n helper_method=self._discover_or_create_flavor,\n flv_ram=conf.CONF.openstack.heat_instance_type_ram,\n flv_disk=conf.CONF.openstack.heat_instance_type_disk)\n\n with open(self.conf_path, \"w\") as configfile:\n self.conf.write(configfile)\n\n def cleanup(self):\n # Tempest tests may take more than 1 hour and we should remove all\n # cached clients sessions to avoid tokens expiration when deleting\n # Tempest resources.\n self.clients.clear()\n\n self._cleanup_tempest_roles()\n self._cleanup_images()\n self._cleanup_flavors()\n if \"neutron\" in self.available_services:\n self._cleanup_network_resources()\n\n with open(self.conf_path, \"w\") as configfile:\n self.conf.write(configfile)\n\n def _create_tempest_roles(self):\n keystoneclient = self.clients.verified_keystone()\n roles = [conf.CONF.openstack.swift_operator_role,\n conf.CONF.openstack.swift_reseller_admin_role,\n conf.CONF.openstack.heat_stack_owner_role,\n conf.CONF.openstack.heat_stack_user_role]\n existing_roles = set(role.name.lower()\n for role in keystoneclient.roles.list())\n\n for role in roles:\n if role.lower() not in existing_roles:\n LOG.debug(\"Creating role '%s'.\" % role)\n self._created_roles.append(keystoneclient.roles.create(role))\n\n def _configure_option(self, section, option, value=None,\n helper_method=None, *args, **kwargs):\n option_value = self.conf.get(section, option)\n if not option_value:\n LOG.debug(\"Option '%s' from '%s' section is not configured.\"\n % (option, section))\n if helper_method:\n res = helper_method(*args, **kwargs)\n if res:\n value = res[\"network\"][\"name\"] if (\"network\" in\n option) else res.id\n LOG.debug(\"Setting value '%s' to option '%s'.\" % (value, option))\n self.conf.set(section, option, value)\n LOG.debug(\"Option '{opt}' is configured. \"\n \"{opt} = {value}\".format(opt=option, value=value))\n else:\n LOG.debug(\"Option '{opt}' is already configured \"\n \"in Tempest config file. {opt} = {opt_val}\"\n .format(opt=option, opt_val=option_value))\n\n def _discover_image(self):\n LOG.debug(\"Trying to discover a public image with name matching \"\n \"regular expression '%s'. Note that case insensitive \"\n \"matching is performed.\"\n % conf.CONF.openstack.img_name_regex)\n image_service = image.Image(self.clients)\n images = image_service.list_images(status=\"active\",\n visibility=\"public\")\n for image_obj in images:\n if image_obj.name and re.match(conf.CONF.openstack.img_name_regex,\n image_obj.name, re.IGNORECASE):\n LOG.debug(\"The following public image discovered: '%s'.\"\n % image_obj.name)\n return image_obj\n\n LOG.debug(\"There is no public image with name matching regular \"\n \"expression '%s'.\" % conf.CONF.openstack.img_name_regex)\n\n def _download_image_from_source(self, target_path, image=None):\n if image:\n LOG.debug(\"Downloading image '%s' from Glance to %s.\"\n % (image.name, target_path))\n with open(target_path, \"wb\") as image_file:\n for chunk in self.clients.glance().images.data(image.id):\n image_file.write(chunk)\n else:\n LOG.debug(\"Downloading image from %s to %s.\"\n % (conf.CONF.openstack.img_url, target_path))\n try:\n response = requests.get(conf.CONF.openstack.img_url,\n stream=True)\n except requests.ConnectionError as err:\n msg = (\"Failed to download image. Possibly there is no \"\n \"connection to Internet. Error: %s.\"\n % (str(err) or \"unknown\"))\n raise exceptions.RallyException(msg)\n\n if response.status_code == 200:\n with open(target_path, \"wb\") as image_file:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n image_file.write(chunk)\n image_file.flush()\n else:\n if response.status_code == 404:\n msg = \"Failed to download image. Image was not found.\"\n else:\n msg = (\"Failed to download image. HTTP error code %d.\"\n % response.status_code)\n raise exceptions.RallyException(msg)\n\n LOG.debug(\"The image has been successfully downloaded!\")\n\n def _download_image(self):\n image_path = os.path.join(self.data_dir, self.image_name)\n if os.path.isfile(image_path):\n LOG.debug(\"Image is already downloaded to %s.\" % image_path)\n return\n\n if conf.CONF.openstack.img_name_regex:\n image = self._discover_image()\n if image:\n return self._download_image_from_source(image_path, image)\n\n self._download_image_from_source(image_path)\n\n def _discover_or_create_image(self):\n if conf.CONF.openstack.img_name_regex:\n image_obj = self._discover_image()\n if image_obj:\n LOG.debug(\"Using image '%s' (ID = %s) for the tests.\"\n % (image_obj.name, image_obj.id))\n return image_obj\n\n params = {\n \"image_name\": self.generate_random_name(),\n \"disk_format\": conf.CONF.openstack.img_disk_format,\n \"container_format\": conf.CONF.openstack.img_container_format,\n \"image_location\": os.path.join(self.data_dir, self.image_name),\n \"visibility\": \"public\"\n }\n LOG.debug(\"Creating image '%s'.\" % params[\"image_name\"])\n image_service = image.Image(self.clients)\n image_obj = image_service.create_image(**params)\n LOG.debug(\"Image '%s' (ID = %s) has been successfully created!\"\n % (image_obj.name, image_obj.id))\n self._created_images.append(image_obj)\n\n return image_obj\n\n def _discover_or_create_flavor(self, flv_ram, flv_disk):\n novaclient = self.clients.nova()\n\n LOG.debug(\"Trying to discover a flavor with the following properties: \"\n \"RAM = %(ram)dMB, VCPUs = 1, disk >= %(disk)dGiB.\" %\n {\"ram\": flv_ram, \"disk\": flv_disk})\n for flavor in novaclient.flavors.list():\n if (flavor.ram == flv_ram\n and flavor.vcpus == 1 and flavor.disk >= flv_disk):\n LOG.debug(\"The following flavor discovered: '{0}'. \"\n \"Using flavor '{0}' (ID = {1}) for the tests.\"\n .format(flavor.name, flavor.id))\n return flavor\n\n LOG.debug(\"There is no flavor with the mentioned properties.\")\n\n params = {\n \"name\": self.generate_random_name(),\n \"ram\": flv_ram,\n \"vcpus\": 1,\n \"disk\": flv_disk\n }\n LOG.debug(\"Creating flavor '%s' with the following properties: RAM \"\n \"= %dMB, VCPUs = 1, disk = %dGB.\" %\n (params[\"name\"], flv_ram, flv_disk))\n flavor = novaclient.flavors.create(**params)\n LOG.debug(\"Flavor '%s' (ID = %s) has been successfully created!\"\n % (flavor.name, flavor.id))\n self._created_flavors.append(flavor)\n\n return flavor\n\n def _create_network_resources(self):\n client = neutron.NeutronService(\n clients=self.clients,\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n tenant_id = self.clients.keystone.auth_ref.project_id\n router_create_args = {\"project_id\": tenant_id}\n public_net = None\n if self.conf.has_section(\"network\"):\n public_net = self.conf.get(\"network\", \"public_network_id\")\n if public_net:\n external_gateway_info = {\n \"network_id\": public_net\n }\n if client.supports_extension(\"ext-gw-mode\", silent=True):\n external_gateway_info[\"enable_snat\"] = True\n router_create_args[\"external_gateway_info\"] = external_gateway_info\n LOG.debug(\"Creating network resources: network, subnet, router.\")\n net = client.create_network_topology(\n subnets_count=1,\n router_create_args=router_create_args,\n subnet_create_args={\"project_id\": tenant_id},\n network_create_args={\"shared\": True, \"project_id\": tenant_id})\n LOG.debug(\"Network resources have been successfully created!\")\n self._created_networks.append(net)\n\n return net\n\n def _cleanup_tempest_roles(self):\n keystoneclient = self.clients.keystone()\n for role in self._created_roles:\n LOG.debug(\"Deleting role '%s'.\" % role.name)\n keystoneclient.roles.delete(role.id)\n LOG.debug(\"Role '%s' has been deleted.\" % role.name)\n\n def _cleanup_images(self):\n image_service = image.Image(self.clients)\n for image_obj in self._created_images:\n LOG.debug(\"Deleting image '%s'.\" % image_obj.name)\n self.clients.glance().images.delete(image_obj.id)\n task_utils.wait_for_status(\n image_obj, [\"deleted\", \"pending_delete\"],\n check_deletion=True,\n update_resource=image_service.get_image,\n timeout=conf.CONF.openstack.glance_image_delete_timeout,\n check_interval=conf.CONF.openstack.\n glance_image_delete_poll_interval)\n LOG.debug(\"Image '%s' has been deleted.\" % image_obj.name)\n self._remove_opt_value_from_config(\"compute\", image_obj.id)\n\n def _cleanup_flavors(self):\n novaclient = self.clients.nova()\n for flavor in self._created_flavors:\n LOG.debug(\"Deleting flavor '%s'.\" % flavor.name)\n novaclient.flavors.delete(flavor.id)\n LOG.debug(\"Flavor '%s' has been deleted.\" % flavor.name)\n self._remove_opt_value_from_config(\"compute\", flavor.id)\n self._remove_opt_value_from_config(\"orchestration\", flavor.id)\n\n def _cleanup_network_resources(self):\n client = neutron.NeutronService(\n clients=self.clients,\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n for topo in self._created_networks:\n LOG.debug(\"Deleting network resources: router, subnet, network.\")\n client.delete_network_topology(topo)\n self._remove_opt_value_from_config(\"compute\",\n topo[\"network\"][\"name\"])\n LOG.debug(\"Network resources have been deleted.\")\n\n def _remove_opt_value_from_config(self, section, opt_value):\n for option, value in self.conf.items(section):\n if opt_value == value:\n LOG.debug(\"Removing value '%s' of option '%s' \"\n \"from Tempest config file.\" % (opt_value, option))\n self.conf.set(section, option, \"\")\n LOG.debug(\"Value '%s' has been removed.\" % opt_value)\n" }, { "alpha_fraction": 0.6322457790374756, "alphanum_fraction": 0.6334197521209717, "avg_line_length": 45.408626556396484, "blob_id": "a78ab3a79c6695d21a34b6f406e6d63fbd7c9c18", "content_id": "169907f87cfed5f8528085cec9a13305108732f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40886, "license_type": "permissive", "max_line_length": 79, "num_lines": 881, "path": "/rally_openstack/task/scenarios/cinder/volumes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013 Huawei Technologies Co.,LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\nfrom rally_openstack.task.scenarios.glance import images\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\n\nLOG = logging.getLogger(__name__)\n\n\"\"\"Scenarios for Cinder Volumes.\"\"\"\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_list_volume\",\n platform=\"openstack\")\nclass CreateAndListVolume(cinder_utils.CinderBasic):\n\n def run(self, size, detailed=True, image=None, **kwargs):\n \"\"\"Create a volume and list all volumes.\n\n Measure the \"cinder volume-list\" command performance.\n\n If you have only 1 user in your context, you will\n add 1 volume on every iteration. So you will have more\n and more volumes and will be able to measure the\n performance of the \"cinder volume-list\" command depending on\n the number of images owned by users.\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param detailed: determines whether the volume listing should contain\n detailed information about all of them\n :param image: image to be used to create volume\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n\n self.cinder.create_volume(size, **kwargs)\n self.cinder.list_volumes(detailed)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_get_volume\",\n platform=\"openstack\")\nclass CreateAndGetVolume(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, **kwargs):\n \"\"\"Create a volume and get the volume.\n\n Measure the \"cinder show\" command performance.\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: image to be used to create volume\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n\n volume = self.cinder.create_volume(size, **kwargs)\n self.cinder.get_volume(volume.id)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"CinderVolumes.list_volumes\",\n platform=\"openstack\")\nclass ListVolumes(cinder_utils.CinderBasic):\n\n def run(self, detailed=True, search_opts=None, marker=None,\n limit=None, sort=None):\n \"\"\"List all volumes.\n\n This simple scenario tests the cinder list command by listing\n all the volumes.\n\n :param detailed: True if detailed information about volumes\n should be listed\n :param search_opts: Search options to filter out volumes.\n :param marker: Begin returning volumes that appear later in the volume\n list than that represented by this volume id.(For V2 or\n higher)\n :param limit: Maximum number of volumes to return.\n :param sort: Sort information\n \"\"\"\n\n self.cinder.list_volumes(detailed, search_opts=search_opts,\n marker=marker, limit=limit, sort=sort)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"CinderVolumes.list_types\", platform=\"openstack\")\nclass ListTypes(cinder_utils.CinderBasic):\n\n def run(self, search_opts=None, is_public=None):\n \"\"\"List all volume types.\n\n This simple scenario tests the cinder type-list command by listing\n all the volume types.\n\n :param search_opts: Options used when search for volume types\n :param is_public: If query public volume type\n \"\"\"\n\n self.cinder.list_types(search_opts, is_public=is_public)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"CinderVolumes.list_transfers\", platform=\"openstack\")\nclass ListTransfers(cinder_utils.CinderBasic):\n\n def run(self, detailed=True, search_opts=None):\n \"\"\"List all transfers.\n\n This simple scenario tests the \"cinder transfer-list\" command by\n listing all the volume transfers.\n\n :param detailed: If True, detailed information about volume transfer\n should be listed\n :param search_opts: Search options to filter out volume transfers.\n \"\"\"\n\n self.cinder.list_transfers(detailed, search_opts=search_opts)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"update_volume_kwargs\")\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_update_volume\",\n platform=\"openstack\")\nclass CreateAndUpdateVolume(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, create_volume_kwargs=None,\n update_volume_kwargs=None):\n \"\"\"Create a volume and update its name and description.\n\n :param size: volume size (integer, in GB)\n :param image: image to be used to create volume\n :param create_volume_kwargs: dict, to be used to create volume\n :param update_volume_kwargs: dict, to be used to update volume\n update_volume_kwargs[\"update_name\"]=True, if updating the\n name of volume.\n update_volume_kwargs[\"description\"]=\"desp\", if updating the\n description of volume.\n \"\"\"\n create_volume_kwargs = create_volume_kwargs or {}\n update_volume_kwargs = update_volume_kwargs or {}\n if image:\n create_volume_kwargs[\"imageRef\"] = image\n\n if update_volume_kwargs.pop(\"update_name\", False):\n update_volume_kwargs[\"name\"] = self.generate_random_name()\n\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n self.cinder.update_volume(volume, **update_volume_kwargs)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_delete_volume\",\n platform=\"openstack\")\nclass CreateAndDeleteVolume(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Create and then delete a volume.\n\n Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'\n and 'max_sleep' parameters allow the scenario to simulate a pause\n between volume creation and deletion (of random duration from\n [min_sleep, max_sleep]).\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: image to be used to create volume\n :param min_sleep: minimum sleep time between volume creation and\n deletion (in seconds)\n :param max_sleep: maximum sleep time between volume creation and\n deletion (in seconds)\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n\n volume = self.cinder.create_volume(size, **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.delete_volume(volume)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_volume\",\n platform=\"openstack\")\nclass CreateVolume(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, **kwargs):\n \"\"\"Create a volume.\n\n Good test to check how influence amount of active volumes on\n performance of creating new.\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: image to be used to create volume\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n\n self.cinder.create_volume(size, **kwargs)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"volumes\"))\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.modify_volume_metadata\",\n platform=\"openstack\")\nclass ModifyVolumeMetadata(cinder_utils.CinderBasic):\n\n def run(self, sets=10, set_size=3, deletes=5, delete_size=3):\n \"\"\"Modify a volume's metadata.\n\n This requires a volume to be created with the volumes\n context. Additionally, ``sets * set_size`` must be greater\n than or equal to ``deletes * delete_size``.\n\n :param sets: how many set_metadata operations to perform\n :param set_size: number of metadata keys to set in each\n set_metadata operation\n :param deletes: how many delete_metadata operations to perform\n :param delete_size: number of metadata keys to delete in each\n delete_metadata operation\n \"\"\"\n if sets * set_size < deletes * delete_size:\n raise exceptions.InvalidArgumentsException(\n \"Not enough metadata keys will be created: \"\n \"Setting %(num_keys)s keys, but deleting %(num_deletes)s\" %\n {\"num_keys\": sets * set_size,\n \"num_deletes\": deletes * delete_size})\n\n volume = random.choice(self.context[\"tenant\"][\"volumes\"])\n keys = self.cinder.set_metadata(volume[\"id\"], sets=sets,\n set_size=set_size)\n self.cinder.delete_metadata(volume[\"id\"], keys=keys,\n deletes=deletes,\n delete_size=delete_size)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_extend_volume\",\n platform=\"openstack\")\nclass CreateAndExtendVolume(cinder_utils.CinderBasic):\n\n def run(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Create and extend a volume and then delete it.\n\n\n :param size: volume size (in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param new_size: volume new size (in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n to extend.\n Notice: should be bigger volume size\n :param min_sleep: minimum sleep time between volume extension and\n deletion (in seconds)\n :param max_sleep: maximum sleep time between volume extension and\n deletion (in seconds)\n :param kwargs: optional args to extend the volume\n \"\"\"\n volume = self.cinder.create_volume(size, **kwargs)\n self.cinder.extend_volume(volume, new_size=new_size)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.delete_volume(volume)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_contexts\", contexts=(\"volumes\"))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_from_volume_and_delete_volume\",\n platform=\"openstack\")\nclass CreateFromVolumeAndDeleteVolume(cinder_utils.CinderBasic):\n\n def run(self, size, min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Create volume from volume and then delete it.\n\n Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'\n parameters allow the scenario to simulate a pause between volume\n creation and deletion (of random duration from [min_sleep, max_sleep]).\n\n :param size: volume size (in GB), or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n Should be equal or bigger source volume size\n\n :param min_sleep: minimum sleep time between volume creation and\n deletion (in seconds)\n :param max_sleep: maximum sleep time between volume creation and\n deletion (in seconds)\n :param kwargs: optional args to create a volume\n \"\"\"\n source_vol = random.choice(self.context[\"tenant\"][\"volumes\"])\n volume = self.cinder.create_volume(size, source_volid=source_vol[\"id\"],\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.delete_volume(volume)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_contexts\", contexts=(\"volumes\"))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_delete_snapshot\",\n platform=\"openstack\")\nclass CreateAndDeleteSnapshot(cinder_utils.CinderBasic):\n\n def run(self, force=False, min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Create and then delete a volume-snapshot.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between snapshot creation and deletion\n (of random duration from [min_sleep, max_sleep]).\n\n :param force: when set to True, allows snapshot of a volume when\n the volume is attached to an instance\n :param min_sleep: minimum sleep time between snapshot creation and\n deletion (in seconds)\n :param max_sleep: maximum sleep time between snapshot creation and\n deletion (in seconds)\n :param kwargs: optional args to create a snapshot\n \"\"\"\n volume = random.choice(self.context[\"tenant\"][\"volumes\"])\n snapshot = self.cinder.create_snapshot(volume[\"id\"], force=force,\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.delete_snapshot(snapshot)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_params\")\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"CinderVolumes.create_and_attach_volume\",\n platform=\"openstack\")\nclass CreateAndAttachVolume(cinder_utils.CinderBasic,\n nova_utils.NovaScenario):\n\n @logging.log_deprecated_args(\n \"Use 'create_vm_params' for additional instance parameters.\",\n \"0.2.0\", [\"kwargs\"], once=True)\n def run(self, size, image, flavor, create_volume_params=None,\n create_vm_params=None, **kwargs):\n \"\"\"Create a VM and attach a volume to it.\n\n Simple test to create a VM and attach a volume, then\n detach the volume and delete volume/VM.\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param create_volume_params: optional arguments for volume creation\n :param create_vm_params: optional arguments for VM creation\n :param kwargs: (deprecated) optional arguments for VM creation\n \"\"\"\n\n create_volume_params = create_volume_params or {}\n\n if kwargs and create_vm_params:\n raise ValueError(\"You can not set both 'kwargs' \"\n \"and 'create_vm_params' attributes.\"\n \"Please use 'create_vm_params'.\")\n\n create_vm_params = create_vm_params or kwargs or {}\n\n server = self._boot_server(image, flavor, **create_vm_params)\n volume = self.cinder.create_volume(size, **create_volume_params)\n\n self._attach_volume(server, volume)\n self._detach_volume(server, volume)\n\n self.cinder.delete_volume(volume)\n self._delete_server(server)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_vm_params\")\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"volume_type_exists\", param_name=\"volume_type\", nullable=True)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"CinderVolumes.create_snapshot_and_attach_volume\",\n platform=\"openstack\")\nclass CreateSnapshotAndAttachVolume(cinder_utils.CinderBasic,\n nova_utils.NovaScenario):\n\n def run(self, image, flavor, volume_type=None, size=None,\n create_vm_params=None, **kwargs):\n \"\"\"Create vm, volume, snapshot and attach/detach volume.\n\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param volume_type: Name of volume type to use\n :param size: Volume size - dictionary, contains two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n default values: {\"min\": 1, \"max\": 5}\n :param create_vm_params: optional arguments for VM creation\n :param kwargs: Optional parameters used during volume\n snapshot creation.\n \"\"\"\n if size is None:\n size = {\"min\": 1, \"max\": 5}\n\n volume = self.cinder.create_volume(size, volume_type=volume_type)\n snapshot = self.cinder.create_snapshot(volume.id, force=False,\n **kwargs)\n create_vm_params = create_vm_params or {}\n\n server = self._boot_server(image, flavor, **create_vm_params)\n\n self._attach_volume(server, volume)\n self._detach_volume(server, volume)\n\n self.cinder.delete_snapshot(snapshot)\n self.cinder.delete_volume(volume)\n self._delete_server(server)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_snapshot_kwargs\")\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_vm_params\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"CinderVolumes.create_nested_snapshots\"\n \"_and_attach_volume\",\n platform=\"openstack\")\nclass CreateNestedSnapshotsAndAttachVolume(cinder_utils.CinderBasic,\n nova_utils.NovaScenario):\n\n def run(self, image, flavor, size=None, nested_level=1,\n create_volume_kwargs=None, create_snapshot_kwargs=None,\n create_vm_params=None):\n \"\"\"Create a volume from snapshot and attach/detach the volume\n\n This scenario create vm, volume, create it's snapshot, attach volume,\n then create new volume from existing snapshot and so on,\n with defined nested level, after all detach and delete them.\n volume->snapshot->volume->snapshot->volume ...\n\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param size: Volume size - dictionary, contains two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n default values: {\"min\": 1, \"max\": 5}\n :param nested_level: amount of nested levels\n :param create_volume_kwargs: optional args to create a volume\n :param create_snapshot_kwargs: optional args to create a snapshot\n :param create_vm_params: optional arguments for VM creation\n \"\"\"\n if size is None:\n size = {\"min\": 1, \"max\": 5}\n\n # NOTE: Volume size cannot be smaller than the snapshot size, so\n # volume with specified size should be created to avoid\n # size mismatching between volume and snapshot due random\n # size in _create_volume method.\n size = random.randint(size[\"min\"], size[\"max\"])\n\n create_volume_kwargs = create_volume_kwargs or {}\n create_snapshot_kwargs = create_snapshot_kwargs or {}\n create_vm_params = create_vm_params or {}\n\n server = self._boot_server(image, flavor, **create_vm_params)\n\n source_vol = self.cinder.create_volume(size, **create_volume_kwargs)\n snapshot = self.cinder.create_snapshot(source_vol.id, force=False,\n **create_snapshot_kwargs)\n self._attach_volume(server, source_vol)\n\n nes_objs = [(server, source_vol, snapshot)]\n for i in range(nested_level - 1):\n volume = self.cinder.create_volume(size, snapshot_id=snapshot.id)\n snapshot = self.cinder.create_snapshot(volume.id, force=False,\n **create_snapshot_kwargs)\n self._attach_volume(server, volume)\n\n nes_objs.append((server, volume, snapshot))\n\n nes_objs.reverse()\n for server, volume, snapshot in nes_objs:\n self._detach_volume(server, volume)\n self.cinder.delete_snapshot(snapshot)\n self.cinder.delete_volume(volume)\n self._delete_server(server)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_contexts\", contexts=(\"volumes\"))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_list_snapshots\",\n platform=\"openstack\")\nclass CreateAndListSnapshots(cinder_utils.CinderBasic,\n nova_utils.NovaScenario):\n\n def run(self, force=False, detailed=True, **kwargs):\n \"\"\"Create and then list a volume-snapshot.\n\n :param force: when set to True, allows snapshot of a volume when\n the volume is attached to an instance\n :param detailed: True if detailed information about snapshots\n should be listed\n :param kwargs: optional args to create a snapshot\n \"\"\"\n volume = random.choice(self.context[\"tenant\"][\"volumes\"])\n self.cinder.create_snapshot(volume[\"id\"], force=force, **kwargs)\n self.cinder.list_snapshots(detailed)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"required_services\", services=[consts.Service.CINDER,\n consts.Service.GLANCE])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\", \"glance\"]},\n name=\"CinderVolumes.create_and_upload_volume_to_image\",\n platform=\"openstack\")\nclass CreateAndUploadVolumeToImage(cinder_utils.CinderBasic,\n images.GlanceBasic):\n\n def run(self, size, image=None, force=False, container_format=\"bare\",\n disk_format=\"raw\", do_delete=True, **kwargs):\n \"\"\"Create and upload a volume to image.\n\n :param size: volume size (integers, in GB), or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: image to be used to create volume.\n :param force: when set to True volume that is attached to an instance\n could be uploaded to image\n :param container_format: image container format\n :param disk_format: disk format for image\n :param do_delete: deletes image and volume after uploading if True\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n volume = self.cinder.create_volume(size, **kwargs)\n image = self.cinder.upload_volume_to_image(\n volume, force=force, container_format=container_format,\n disk_format=disk_format\n )\n\n if do_delete:\n self.cinder.delete_volume(volume)\n self.glance.delete_image(image.id)\n\n\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=\"name\",\n subdict=\"create_backup_kwargs\")\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_cinder_services\", services=\"cinder-backup\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_volume_backup\",\n platform=\"openstack\")\nclass CreateVolumeBackup(cinder_utils.CinderBasic):\n\n def run(self, size, do_delete=True, create_volume_kwargs=None,\n create_backup_kwargs=None):\n \"\"\"Create a volume backup.\n\n :param size: volume size in GB\n :param do_delete: if True, a volume and a volume backup will\n be deleted after creation.\n :param create_volume_kwargs: optional args to create a volume\n :param create_backup_kwargs: optional args to create a volume backup\n \"\"\"\n create_volume_kwargs = create_volume_kwargs or {}\n create_backup_kwargs = create_backup_kwargs or {}\n\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)\n\n if do_delete:\n self.cinder.delete_volume(volume)\n self.cinder.delete_backup(backup)\n\n\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=\"name\",\n subdict=\"create_backup_kwargs\")\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_cinder_services\", services=\"cinder-backup\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_restore_volume_backup\",\n platform=\"openstack\")\nclass CreateAndRestoreVolumeBackup(cinder_utils.CinderBasic):\n\n def run(self, size, do_delete=True, create_volume_kwargs=None,\n create_backup_kwargs=None):\n \"\"\"Restore volume backup.\n\n :param size: volume size in GB\n :param do_delete: if True, the volume and the volume backup will\n be deleted after creation.\n :param create_volume_kwargs: optional args to create a volume\n :param create_backup_kwargs: optional args to create a volume backup\n \"\"\"\n create_volume_kwargs = create_volume_kwargs or {}\n create_backup_kwargs = create_backup_kwargs or {}\n\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)\n self.cinder.restore_backup(backup.id)\n\n if do_delete:\n self.cinder.delete_volume(volume)\n self.cinder.delete_backup(backup)\n\n\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=\"name\",\n subdict=\"create_backup_kwargs\")\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_cinder_services\", services=\"cinder-backup\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_list_volume_backups\",\n platform=\"openstack\")\nclass CreateAndListVolumeBackups(cinder_utils.CinderBasic):\n\n def run(self, size, detailed=True, do_delete=True,\n create_volume_kwargs=None, create_backup_kwargs=None):\n \"\"\"Create and then list a volume backup.\n\n :param size: volume size in GB\n :param detailed: True if detailed information about backup\n should be listed\n :param do_delete: if True, a volume backup will be deleted\n :param create_volume_kwargs: optional args to create a volume\n :param create_backup_kwargs: optional args to create a volume backup\n \"\"\"\n create_volume_kwargs = create_volume_kwargs or {}\n create_backup_kwargs = create_backup_kwargs or {}\n\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)\n self.cinder.list_backups(detailed)\n\n if do_delete:\n self.cinder.delete_volume(volume)\n self.cinder.delete_backup(backup)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_volume_and_clone\",\n platform=\"openstack\")\nclass CreateVolumeAndClone(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, nested_level=1, **kwargs):\n \"\"\"Create a volume, then clone it to another volume.\n\n This creates a volume, then clone it to anothor volume,\n and then clone the new volume to next volume...\n\n 1. create source volume (from image)\n 2. clone source volume to volume1\n 3. clone volume1 to volume2\n 4. clone volume2 to volume3\n 5. ...\n\n :param size: volume size (integer, in GB) or\n dictionary, must contain two values:\n min - minimum size volumes will be created as;\n max - maximum size volumes will be created as.\n :param image: image to be used to create initial volume\n :param nested_level: amount of nested levels\n :param kwargs: optional args to create volumes\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n\n source_vol = self.cinder.create_volume(size, **kwargs)\n\n kwargs.pop(\"imageRef\", None)\n for i in range(nested_level):\n with atomic.ActionTimer(self, \"cinder.clone_volume\"):\n source_vol = self.cinder.create_volume(\n source_vol.size, source_volid=source_vol.id,\n **kwargs)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_snapshot_kwargs\")\[email protected](\"required_contexts\", contexts=(\"volumes\"))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_volume_from_snapshot\",\n platform=\"openstack\")\nclass CreateVolumeFromSnapshot(cinder_utils.CinderBasic):\n\n def run(self, do_delete=True, create_snapshot_kwargs=None, **kwargs):\n \"\"\"Create a volume-snapshot, then create a volume from this snapshot.\n\n :param do_delete: if True, a snapshot and a volume will\n be deleted after creation.\n :param create_snapshot_kwargs: optional args to create a snapshot\n :param kwargs: optional args to create a volume\n \"\"\"\n create_snapshot_kwargs = create_snapshot_kwargs or {}\n src_volume = random.choice(self.context[\"tenant\"][\"volumes\"])\n\n snapshot = self.cinder.create_snapshot(src_volume[\"id\"],\n **create_snapshot_kwargs)\n volume = self.cinder.create_volume(src_volume[\"size\"],\n snapshot_id=snapshot.id,\n **kwargs)\n\n if do_delete:\n self.cinder.delete_volume(volume)\n self.cinder.delete_snapshot(snapshot)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_volume_\"\n \"and_update_readonly_flag\",\n platform=\"openstack\")\nclass CreateVolumeAndUpdateReadonlyFlag(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, read_only=True, **kwargs):\n \"\"\"Create a volume and then update its readonly flag.\n\n :param size: volume size (integer, in GB)\n :param image: image to be used to create volume\n :param read_only: The value to indicate whether to update volume to\n read-only access mode\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n volume = self.cinder.create_volume(size, **kwargs)\n self.cinder.update_readonly_flag(volume.id, read_only=read_only)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"])\[email protected](\"image_exists\", param_name=\"image\", nullable=True)\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumes.create_and_accept_transfer\",\n platform=\"openstack\")\nclass CreateAndAcceptTransfer(cinder_utils.CinderBasic):\n\n def run(self, size, image=None, **kwargs):\n \"\"\"Create a volume transfer, then accept it\n\n Measure the \"cinder transfer-create\" and \"cinder transfer-accept\"\n command performace.\n :param size: volume size (integer, in GB)\n :param image: image to be used to create initial volume\n :param kwargs: optional args to create a volume\n \"\"\"\n if image:\n kwargs[\"imageRef\"] = image\n volume = self.cinder.create_volume(size, **kwargs)\n transfer = self.cinder.transfer_create(volume.id)\n self.cinder.transfer_accept(transfer.id, auth_key=transfer.auth_key)\n" }, { "alpha_fraction": 0.5599755048751831, "alphanum_fraction": 0.5618115067481995, "avg_line_length": 37.904762268066406, "blob_id": "e638409cdd8d0c28fb562ee5244eb1fa8bcd237a", "content_id": "4e93b9b95f06381ca76ba68aaa2042e580b2786b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4902, "license_type": "permissive", "max_line_length": 77, "num_lines": 126, "path": "/rally_openstack/task/contexts/sahara/sahara_image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.image import image as image_services\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"sahara_image\", platform=\"openstack\", order=440)\nclass SaharaImage(context.OpenStackContext):\n \"\"\"Context class for adding and tagging Sahara images.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"image_uuid\": {\n \"type\": \"string\"\n },\n \"image_url\": {\n \"type\": \"string\",\n },\n \"username\": {\n \"type\": \"string\"\n },\n \"plugin_name\": {\n \"type\": \"string\",\n },\n \"hadoop_version\": {\n \"type\": \"string\",\n }\n },\n \"oneOf\": [\n {\"description\": \"Create an image.\",\n \"required\": [\"image_url\", \"username\", \"plugin_name\",\n \"hadoop_version\"]},\n {\"description\": \"Use an existing image.\",\n \"required\": [\"image_uuid\"]}\n ],\n \"additionalProperties\": False\n }\n\n def _create_image(self, hadoop_version, image_url, plugin_name, user,\n user_name):\n clients = osclients.Clients(user[\"credential\"])\n image_service = image_services.Image(\n clients, name_generator=self.generate_random_name)\n image = image_service.create_image(container_format=\"bare\",\n image_location=image_url,\n disk_format=\"qcow2\")\n clients.sahara().images.update_image(\n image_id=image.id, user_name=user_name, desc=\"\")\n clients.sahara().images.update_tags(\n image_id=image.id, new_tags=[plugin_name, hadoop_version])\n return image.id\n\n def setup(self):\n utils.init_sahara_context(self)\n self.context[\"sahara\"][\"images\"] = {}\n\n # The user may want to use the existing image. In this case he should\n # make sure that the image is public and has all required metadata.\n image_uuid = self.config.get(\"image_uuid\")\n\n self.context[\"sahara\"][\"need_image_cleanup\"] = not image_uuid\n\n if image_uuid:\n # Using the first user to check the existing image.\n user = self.context[\"users\"][0]\n clients = osclients.Clients(user[\"credential\"])\n\n image = clients.glance().images.get(image_uuid)\n\n visibility = None\n if hasattr(image, \"is_public\"):\n visibility = \"public\" if image.is_public else \"private\"\n else:\n visibility = image[\"visibility\"]\n\n if visibility != \"public\":\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Use only public image for sahara_image context\"\n )\n image_id = image_uuid\n\n for user, tenant_id in self._iterate_per_tenants():\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"image\"] = (\n image_id)\n else:\n for user, tenant_id in self._iterate_per_tenants():\n\n image_id = self._create_image(\n hadoop_version=self.config[\"hadoop_version\"],\n image_url=self.config[\"image_url\"],\n plugin_name=self.config[\"plugin_name\"],\n user=user,\n user_name=self.config[\"username\"])\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"image\"] = (\n image_id)\n\n def cleanup(self):\n if self.context[\"sahara\"][\"need_image_cleanup\"]:\n resource_manager.cleanup(names=[\"glance.images\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5889328122138977, "alphanum_fraction": 0.5941015481948853, "avg_line_length": 36.804595947265625, "blob_id": "ea1b1a74722cfe29532f772df3b85bcdc8f7aa38", "content_id": "d028e8f760693a12bcced1fb444832f835f2774b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3289, "license_type": "permissive", "max_line_length": 79, "num_lines": 87, "path": "/tests/unit/doc/test_docker_readme.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom docutils import frontend\nfrom docutils import nodes\nfrom docutils.parsers import rst\nfrom docutils import utils\nimport os\nimport re\nimport sys\nfrom unittest import mock\n\nimport rally_openstack\nfrom tests.unit import test\n\n\nROOT_DIR = os.path.dirname(os.path.dirname(rally_openstack.__file__))\n\n\nclass DockerReadmeTestCase(test.TestCase):\n RE_RELEASE = re.compile(r\"\\[(?P<version>[0-9]+\\.[0-9]+.[0-9]+)\\]\")\n\n def get_releases(self):\n full_path = os.path.join(ROOT_DIR, \"CHANGELOG.rst\")\n with open(full_path) as f:\n changelog = f.read()\n with mock.patch.object(sys, \"stderr\"):\n parser = rst.Parser()\n settings = frontend.OptionParser(\n components=(rst.Parser,)).get_default_values()\n document = utils.new_document(changelog, settings)\n parser.parse(changelog, document)\n changelog = document.children\n if len(changelog) != 1:\n self.fail(\"'%s' file should contain one global section \"\n \"with subsections for each release.\" % full_path)\n\n releases = []\n for node in changelog[0].children:\n if not isinstance(node, nodes.section):\n continue\n title = node.astext().split(\"\\n\", 1)[0]\n result = self.RE_RELEASE.match(title)\n if result:\n releases.append(result.groupdict()[\"version\"])\n if not releases:\n self.fail(\"'%s' doesn't mention any releases...\" % full_path)\n return releases\n\n def test_mentioned_latest_version(self):\n full_path = os.path.join(ROOT_DIR, \"DOCKER_README.md\")\n with open(full_path) as f:\n readme = f.read()\n\n releases = self.get_releases()\n latest_release = releases[0]\n previous_release = releases[1]\n print(\"All discovered releases: %s\" % \", \".join(releases))\n\n found = False\n for i, line in enumerate(readme.split(\"\\n\"), 1):\n if latest_release in line:\n found = True\n elif previous_release in line:\n self.fail(\n \"You need to change %s to %s in all places where the \"\n \"latest release is mentioned.\"\n \"\\n Filename: %s\"\n \"\\n Line Number: %s\"\n \"\\n Line: %s\" %\n (previous_release, latest_release, full_path, i, line))\n\n if not found:\n self.fail(\"No latest nor previous release is found at README file \"\n \"for our Docker image. It looks like the format of it \"\n \"had changed. Please adopt the current test suite.\")\n" }, { "alpha_fraction": 0.597455620765686, "alphanum_fraction": 0.6039115190505981, "avg_line_length": 36.752689361572266, "blob_id": "751d8d8890b504c6e1c53ba18a1594b9fa667302", "content_id": "49b1ffc25bf4caba513b4fc02aadef75524dce77", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10533, "license_type": "permissive", "max_line_length": 78, "num_lines": 279, "path": "/rally_openstack/task/scenarios/magnum/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport random\nimport string\nimport time\n\nfrom kubernetes import client as k8s_config\nfrom kubernetes.client.api import core_v1_api\nfrom kubernetes.client import api_client\nfrom kubernetes.client.rest import ApiException\n\nfrom rally.common import cfg\nfrom rally.common import utils as common_utils\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass MagnumScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Magnum scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"magnum.list_cluster_templates\")\n def _list_cluster_templates(self, **kwargs):\n \"\"\"Return list of cluster_templates.\n\n :param limit: (Optional) The maximum number of results to return\n per request, if:\n\n 1) limit > 0, the maximum number of cluster_templates to return.\n 2) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Magnum API\n (see Magnum's api.max_limit option).\n :param kwargs: Optional additional arguments for cluster_templates\n listing\n\n :returns: cluster_templates list\n \"\"\"\n\n return self.clients(\"magnum\").cluster_templates.list(**kwargs)\n\n @atomic.action_timer(\"magnum.create_cluster_template\")\n def _create_cluster_template(self, **kwargs):\n \"\"\"Create a cluster_template\n\n :param kwargs: optional additional arguments for cluster_template\n creation\n :returns: magnum cluster_template\n \"\"\"\n\n kwargs[\"name\"] = self.generate_random_name()\n\n return self.clients(\"magnum\").cluster_templates.create(**kwargs)\n\n @atomic.action_timer(\"magnum.get_cluster_template\")\n def _get_cluster_template(self, cluster_template):\n \"\"\"Return details of the specify cluster template.\n\n :param cluster_template: ID or name of the cluster template to show\n :returns: clustertemplate detail\n \"\"\"\n return self.clients(\"magnum\").cluster_templates.get(cluster_template)\n\n @atomic.action_timer(\"magnum.list_clusters\")\n def _list_clusters(self, limit=None, **kwargs):\n \"\"\"Return list of clusters.\n\n :param limit: Optional, the maximum number of results to return\n per request, if:\n\n 1) limit > 0, the maximum number of clusters to return.\n 2) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Magnum API\n (see Magnum's api.max_limit option).\n :param kwargs: Optional additional arguments for clusters listing\n\n :returns: clusters list\n \"\"\"\n return self.clients(\"magnum\").clusters.list(limit=limit, **kwargs)\n\n @atomic.action_timer(\"magnum.create_cluster\")\n def _create_cluster(self, cluster_template, node_count, **kwargs):\n \"\"\"Create a cluster\n\n :param cluster_template: cluster_template for the cluster\n :param node_count: the cluster node count\n :param kwargs: optional additional arguments for cluster creation\n :returns: magnum cluster\n \"\"\"\n\n name = self.generate_random_name()\n cluster = self.clients(\"magnum\").clusters.create(\n name=name, cluster_template_id=cluster_template,\n node_count=node_count, **kwargs)\n\n common_utils.interruptable_sleep(\n CONF.openstack.magnum_cluster_create_prepoll_delay)\n cluster = utils.wait_for_status(\n cluster,\n ready_statuses=[\"CREATE_COMPLETE\"],\n failure_statuses=[\"CREATE_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.magnum_cluster_create_timeout,\n check_interval=CONF.openstack.magnum_cluster_create_poll_interval,\n id_attr=\"uuid\"\n )\n return cluster\n\n @atomic.action_timer(\"magnum.get_cluster\")\n def _get_cluster(self, cluster):\n \"\"\"Return details of the specify cluster.\n\n :param cluster: ID or name of the cluster to show\n :returns: cluster detail\n \"\"\"\n return self.clients(\"magnum\").clusters.get(cluster)\n\n @atomic.action_timer(\"magnum.get_ca_certificate\")\n def _get_ca_certificate(self, cluster_uuid):\n \"\"\"Get CA certificate for this cluster\n\n :param cluster_uuid: uuid of the cluster\n \"\"\"\n return self.clients(\"magnum\").certificates.get(cluster_uuid)\n\n @atomic.action_timer(\"magnum.create_ca_certificate\")\n def _create_ca_certificate(self, csr_req):\n \"\"\"Send csr to Magnum to have it signed\n\n :param csr_req: {\"cluster_uuid\": <uuid>, \"csr\": <csr file content>}\n \"\"\"\n return self.clients(\"magnum\").certificates.create(**csr_req)\n\n def _get_k8s_api_client(self):\n cluster_uuid = self.context[\"tenant\"][\"cluster\"]\n cluster = self._get_cluster(cluster_uuid)\n cluster_template = self._get_cluster_template(\n cluster.cluster_template_id)\n key_file = None\n cert_file = None\n ca_certs = None\n if not cluster_template.tls_disabled:\n dir = self.context[\"ca_certs_directory\"]\n key_file = cluster_uuid + \".key\"\n key_file = os.path.join(dir, key_file)\n cert_file = cluster_uuid + \".crt\"\n cert_file = os.path.join(dir, cert_file)\n ca_certs = cluster_uuid + \"_ca.crt\"\n ca_certs = os.path.join(dir, ca_certs)\n if hasattr(k8s_config, \"ConfigurationObject\"):\n # k8sclient < 4.0.0\n config = k8s_config.ConfigurationObject()\n else:\n config = k8s_config.Configuration()\n config.host = cluster.api_address\n config.ssl_ca_cert = ca_certs\n config.cert_file = cert_file\n config.key_file = key_file\n if hasattr(k8s_config, \"ConfigurationObject\"):\n # k8sclient < 4.0.0\n client = api_client.ApiClient(config=config)\n else:\n client = api_client.ApiClient(config)\n\n return core_v1_api.CoreV1Api(client)\n\n @atomic.action_timer(\"magnum.k8s_list_v1pods\")\n def _list_v1pods(self):\n \"\"\"List all pods.\n\n \"\"\"\n k8s_api = self._get_k8s_api_client()\n return k8s_api.list_node(namespace=\"default\")\n\n @atomic.action_timer(\"magnum.k8s_create_v1pod\")\n def _create_v1pod(self, manifest):\n \"\"\"Create a pod on the specify cluster.\n\n :param manifest: manifest use to create the pod\n \"\"\"\n k8s_api = self._get_k8s_api_client()\n podname = manifest[\"metadata\"][\"name\"] + \"-\"\n for i in range(5):\n podname = podname + random.choice(string.ascii_lowercase)\n manifest[\"metadata\"][\"name\"] = podname\n\n for i in range(150):\n try:\n k8s_api.create_namespaced_pod(body=manifest,\n namespace=\"default\")\n break\n except ApiException as e:\n if e.status != 403:\n raise\n time.sleep(2)\n\n start = time.time()\n while True:\n resp = k8s_api.read_namespaced_pod(\n name=podname, namespace=\"default\")\n\n if resp.status.conditions:\n for condition in resp.status.conditions:\n if condition.type.lower() == \"ready\" and \\\n condition.status.lower() == \"true\":\n return resp\n\n if (time.time() - start > CONF.openstack.k8s_pod_create_timeout):\n raise exceptions.TimeoutException(\n desired_status=\"Ready\",\n resource_name=podname,\n resource_type=\"Pod\",\n resource_id=resp.metadata.uid,\n resource_status=resp.status,\n timeout=CONF.openstack.k8s_pod_create_timeout)\n common_utils.interruptable_sleep(\n CONF.openstack.k8s_pod_create_poll_interval)\n\n @atomic.action_timer(\"magnum.k8s_list_v1rcs\")\n def _list_v1rcs(self):\n \"\"\"List all rcs.\n\n \"\"\"\n k8s_api = self._get_k8s_api_client()\n return k8s_api.list_namespaced_replication_controller(\n namespace=\"default\")\n\n @atomic.action_timer(\"magnum.k8s_create_v1rc\")\n def _create_v1rc(self, manifest):\n \"\"\"Create rc on the specify cluster.\n\n :param manifest: manifest use to create the replication controller\n \"\"\"\n k8s_api = self._get_k8s_api_client()\n suffix = \"-\"\n for i in range(5):\n suffix = suffix + random.choice(string.ascii_lowercase)\n rcname = manifest[\"metadata\"][\"name\"] + suffix\n manifest[\"metadata\"][\"name\"] = rcname\n resp = k8s_api.create_namespaced_replication_controller(\n body=manifest,\n namespace=\"default\")\n expectd_status = resp.spec.replicas\n start = time.time()\n while True:\n resp = k8s_api.read_namespaced_replication_controller(\n name=rcname,\n namespace=\"default\")\n status = resp.status.replicas\n if status == expectd_status:\n return resp\n else:\n if time.time() - start > CONF.openstack.k8s_rc_create_timeout:\n raise exceptions.TimeoutException(\n desired_status=expectd_status,\n resource_name=rcname,\n resource_type=\"ReplicationController\",\n resource_id=resp.metadata.uid,\n resource_status=status,\n timeout=CONF.openstack.k8s_rc_create_timeout)\n common_utils.interruptable_sleep(\n CONF.openstack.k8s_rc_create_poll_interval)\n" }, { "alpha_fraction": 0.7206266522407532, "alphanum_fraction": 0.7241079211235046, "avg_line_length": 30.91666603088379, "blob_id": "fd8399b769b7cd205deedfd93c5cc411b7e64692", "content_id": "6d98c6b9a6f293b131054c5a9d498495acffcb37", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "permissive", "max_line_length": 75, "num_lines": 36, "path": "/tests/unit/task/scenarios/monasca/test_metrics.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.monasca import metrics\nfrom tests.unit import test\n\n\[email protected]\nclass MonascaMetricsTestCase(test.ScenarioTestCase):\n\n @ddt.data(\n {\"region\": None},\n {\"region\": \"fake_region\"},\n )\n @ddt.unpack\n def test_list_metrics(self, region=None):\n scenario = metrics.ListMetrics(self.context)\n self.region = region\n scenario._list_metrics = mock.MagicMock()\n scenario.run(region=self.region)\n scenario._list_metrics.assert_called_once_with(region=self.region)\n" }, { "alpha_fraction": 0.6955113410949707, "alphanum_fraction": 0.6992133259773254, "avg_line_length": 39.77358627319336, "blob_id": "6aa46683da5650f7b38050cb44fba2292d262952", "content_id": "c21e3658953d356aeca36258db45d3a7de1e06ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2161, "license_type": "permissive", "max_line_length": 78, "num_lines": 53, "path": "/rally_openstack/task/scenarios/gnocchi/resource.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils\n\n\"\"\"Scenarios for Gnocchi resource.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"gnocchi.resource\"]},\n name=\"GnocchiResource.create_resource\")\nclass CreateResource(gnocchiutils.GnocchiBase):\n\n def run(self, resource_type=\"generic\"):\n \"\"\"Create resource.\n\n :param resource_type: Type of the resource\n \"\"\"\n name = self.generate_random_name()\n self.gnocchi.create_resource(name, resource_type=resource_type)\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"gnocchi.resource\"]},\n name=\"GnocchiResource.create_delete_resource\")\nclass CreateDeleteResource(gnocchiutils.GnocchiBase):\n\n def run(self, resource_type=\"generic\"):\n \"\"\"Create resource and then delete it.\n\n :param resource_type: Type of the resource\n \"\"\"\n name = self.generate_random_name()\n resource = self.gnocchi.create_resource(name,\n resource_type=resource_type)\n self.gnocchi.delete_resource(resource[\"id\"])\n" }, { "alpha_fraction": 0.58415287733078, "alphanum_fraction": 0.5906153321266174, "avg_line_length": 36.463157653808594, "blob_id": "0883b09c17797603d26cf862b28f483b7ab1dec0", "content_id": "74f5bdefb018dad87e02bb93bf80f8a9e41bf689", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3559, "license_type": "permissive", "max_line_length": 78, "num_lines": 95, "path": "/tests/unit/task/contexts/vm/test_image_command_customizer.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Tests for the image customizer using a command execution.\"\"\"\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task.contexts.vm import image_command_customizer\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.contexts.vm.image_command_customizer\"\n\n\nclass ImageCommandCustomizerContextVMTestCase(test.TestCase):\n\n def setUp(self):\n super(ImageCommandCustomizerContextVMTestCase, self).setUp()\n\n self.context = {\n \"task\": mock.MagicMock(),\n \"config\": {\n \"image_command_customizer\": {\n \"image\": {\"name\": \"image\"},\n \"flavor\": {\"name\": \"flavor\"},\n \"username\": \"fedora\",\n \"password\": \"foo_password\",\n \"floating_network\": \"floating\",\n \"port\": 1022,\n \"command\": {\n \"interpreter\": \"foo_interpreter\",\n \"script_file\": \"foo_script\"\n }\n }\n },\n \"admin\": {\n \"credential\": \"credential\",\n }\n }\n\n self.user = {\"keypair\": {\"private\": \"foo_private\"}}\n self.fip = {\"ip\": \"foo_ip\"}\n\n @mock.patch(\"%s.vm_utils.VMScenario\" % BASE)\n def test_customize_image(self, mock_vm_scenario):\n mock_vm_scenario.return_value._run_command.return_value = (\n 0, \"foo_stdout\", \"foo_stderr\")\n\n customizer = image_command_customizer.ImageCommandCustomizerContext(\n self.context)\n\n retval = customizer.customize_image(server=None, ip=self.fip,\n user=self.user)\n\n mock_vm_scenario.assert_called_once_with(customizer.context)\n mock_vm_scenario.return_value._run_command.assert_called_once_with(\n \"foo_ip\", 1022, \"fedora\", \"foo_password\", pkey=\"foo_private\",\n command={\"interpreter\": \"foo_interpreter\",\n \"script_file\": \"foo_script\"})\n\n self.assertEqual((0, \"foo_stdout\", \"foo_stderr\"), retval)\n\n @mock.patch(\"%s.vm_utils.VMScenario\" % BASE)\n def test_customize_image_fail(self, mock_vm_scenario):\n mock_vm_scenario.return_value._run_command.return_value = (\n 1, \"foo_stdout\", \"foo_stderr\")\n\n customizer = image_command_customizer.ImageCommandCustomizerContext(\n self.context)\n\n exc = self.assertRaises(\n exceptions.ScriptError, customizer.customize_image,\n server=None, ip=self.fip, user=self.user)\n\n str_exc = str(exc)\n self.assertIn(\"foo_stdout\", str_exc)\n self.assertIn(\"foo_stderr\", str_exc)\n\n mock_vm_scenario.return_value._run_command.assert_called_once_with(\n \"foo_ip\", 1022, \"fedora\", \"foo_password\", pkey=\"foo_private\",\n command={\"interpreter\": \"foo_interpreter\",\n \"script_file\": \"foo_script\"})\n" }, { "alpha_fraction": 0.4591040015220642, "alphanum_fraction": 0.46526920795440674, "avg_line_length": 28.670732498168945, "blob_id": "004497553c087d63b8f6f1f3f627adf74a1efafe", "content_id": "ebb221b8b08c81fdf75b1244f0e468e07b201180", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2433, "license_type": "permissive", "max_line_length": 78, "num_lines": 82, "path": "/rally_openstack/task/contexts/quotas/neutron_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass NeutronQuotas(object):\n \"\"\"Management of Neutron quotas.\"\"\"\n\n QUOTAS_SCHEMA = {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"network\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"subnet\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"port\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"router\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"floatingip\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"security_group\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"security_group_rule\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"pool\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"vip\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"health_monitor\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"trunk\": {\n \"type\": \"integer\",\n \"minimum\": -1\n }\n }\n }\n\n def __init__(self, clients):\n self.clients = clients\n\n def update(self, tenant_id, **kwargs):\n body = {\"quota\": kwargs}\n self.clients.neutron().update_quota(tenant_id, body=body)\n\n def delete(self, tenant_id):\n # Reset quotas to defaults and tag database objects as deleted\n self.clients.neutron().delete_quota(tenant_id)\n\n def get(self, tenant_id):\n return self.clients.neutron().show_quota(tenant_id)[\"quota\"]\n" }, { "alpha_fraction": 0.5082412362098694, "alphanum_fraction": 0.5125683546066284, "avg_line_length": 44.3946418762207, "blob_id": "82a94ec8bea31f2881864733c237efa04b68e7d5", "content_id": "10c2ccc6dee03b84bd086d1b06bf02661415f19b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25421, "license_type": "permissive", "max_line_length": 79, "num_lines": 560, "path": "/tests/unit/task/scenarios/vm/test_vmtasks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Rackspace UK\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.vm import vmtasks\nfrom tests.unit import test\n\n\nBASE = \"rally_openstack.task.scenarios.vm.vmtasks\"\n\n\[email protected]\nclass VMTasksTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(VMTasksTestCase, self).setUp()\n self.context.update({\"user\": {\"keypair\": {\"name\": \"keypair_name\"},\n \"credential\": mock.MagicMock()}})\n\n cinder_patcher = mock.patch(\n \"rally_openstack.common.services.storage.block.BlockStorage\")\n self.cinder = cinder_patcher.start().return_value\n self.cinder.create_volume.return_value = mock.Mock(id=\"foo_volume\")\n self.addCleanup(cinder_patcher.stop)\n\n def create_env(self, scenario):\n self.ip = {\"id\": \"foo_id\", \"ip\": \"foo_ip\", \"is_floating\": True}\n scenario._boot_server_with_fip = mock.Mock(\n return_value=(\"foo_server\", self.ip))\n scenario._wait_for_ping = mock.Mock()\n scenario._delete_server_with_fip = mock.Mock()\n scenario._run_command = mock.MagicMock(\n return_value=(0, \"{\\\"foo\\\": 42}\", \"foo_err\"))\n scenario.add_output = mock.Mock()\n return scenario\n\n def test_boot_runcommand_delete(self):\n scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))\n scenario._run_command = mock.MagicMock(\n return_value=(0, \"{\\\"foo\\\": 42}\", \"foo_err\"))\n scenario.run(\"foo_flavor\", image=\"foo_image\",\n command={\"script_file\": \"foo_script\",\n \"interpreter\": \"foo_interpreter\"},\n username=\"foo_username\",\n password=\"foo_password\",\n use_floating_ip=\"use_fip\",\n floating_network=\"ext_network\",\n force_delete=\"foo_force\",\n volume_args={\"size\": 16},\n foo_arg=\"foo_value\")\n\n self.cinder.create_volume.assert_called_once_with(16, imageRef=None)\n scenario._boot_server_with_fip.assert_called_once_with(\n \"foo_image\", \"foo_flavor\", key_name=\"keypair_name\",\n use_floating_ip=\"use_fip\", floating_network=\"ext_network\",\n block_device_mapping={\"vdrally\": \"foo_volume:::1\"},\n foo_arg=\"foo_value\")\n\n scenario._wait_for_ping.assert_called_once_with(\"foo_ip\")\n scenario._run_command.assert_called_once_with(\n \"foo_ip\", 22, \"foo_username\", \"foo_password\",\n command={\"script_file\": \"foo_script\",\n \"interpreter\": \"foo_interpreter\"})\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=\"foo_force\")\n scenario.add_output.assert_called_once_with(\n complete={\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: foo_err\",\n \"StdOut:\",\n \"{\\\"foo\\\": 42}\"],\n \"title\": \"Script Output\"})\n\n @ddt.data(\n {\"output\": (0, \"\", \"\"),\n \"expected\": [{\"complete\": {\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: (none)\",\n \"StdOut:\",\n \"\"],\n \"title\": \"Script Output\"}}]},\n {\"output\": (1, \"{\\\"foo\\\": 42}\", \"\"), \"raises\": exceptions.ScriptError},\n {\"output\": (\"\", 1, \"\"), \"raises\": TypeError},\n {\"output\": (0, \"{\\\"foo\\\": 42}\", \"\"),\n \"expected\": [{\"complete\": {\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: (none)\",\n \"StdOut:\",\n \"{\\\"foo\\\": 42}\"],\n \"title\": \"Script Output\"}}]},\n {\"output\": (0, \"{\\\"additive\\\": [1, 2]}\", \"\"),\n \"expected\": [{\"complete\": {\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: (none)\",\n \"StdOut:\", \"{\\\"additive\\\": [1, 2]}\"],\n \"title\": \"Script Output\"}}]},\n {\"output\": (0, \"{\\\"complete\\\": [3, 4]}\", \"\"),\n \"expected\": [{\"complete\": {\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: (none)\",\n \"StdOut:\",\n \"{\\\"complete\\\": [3, 4]}\"],\n \"title\": \"Script Output\"}}]},\n {\"output\": (0, \"{\\\"additive\\\": [1, 2], \\\"complete\\\": [3, 4]}\", \"\"),\n \"expected\": [{\"additive\": 1}, {\"additive\": 2},\n {\"complete\": 3}, {\"complete\": 4}]}\n )\n @ddt.unpack\n def test_boot_runcommand_delete_add_output(self, output,\n expected=None, raises=None):\n scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))\n\n scenario._run_command.return_value = output\n kwargs = {\"flavor\": \"foo_flavor\",\n \"image\": \"foo_image\",\n \"command\": {\"remote_path\": \"foo\"},\n \"username\": \"foo_username\",\n \"password\": \"foo_password\",\n \"use_floating_ip\": \"use_fip\",\n \"floating_network\": \"ext_network\",\n \"force_delete\": \"foo_force\",\n \"volume_args\": {\"size\": 16},\n \"foo_arg\": \"foo_value\"}\n if raises:\n self.assertRaises(raises, scenario.run, **kwargs)\n self.assertFalse(scenario.add_output.called)\n else:\n scenario.run(**kwargs)\n calls = [mock.call(**kw) for kw in expected]\n scenario.add_output.assert_has_calls(calls, any_order=True)\n\n self.cinder.create_volume.assert_called_once_with(\n 16, imageRef=None)\n scenario._boot_server_with_fip.assert_called_once_with(\n \"foo_image\", \"foo_flavor\", key_name=\"keypair_name\",\n use_floating_ip=\"use_fip\", floating_network=\"ext_network\",\n block_device_mapping={\"vdrally\": \"foo_volume:::1\"},\n foo_arg=\"foo_value\")\n\n scenario._run_command.assert_called_once_with(\n \"foo_ip\", 22, \"foo_username\", \"foo_password\",\n command={\"remote_path\": \"foo\"})\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=\"foo_force\")\n\n def test_boot_runcommand_delete_command_timeouts(self):\n scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))\n\n scenario._run_command.side_effect = exceptions.SSHTimeout()\n self.assertRaises(exceptions.SSHTimeout,\n scenario.run,\n \"foo_flavor\", \"foo_image\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n\n def test_boot_runcommand_delete_ping_wait_timeouts(self):\n scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))\n\n scenario._wait_for_ping.side_effect = exceptions.TimeoutException(\n resource_type=\"foo_resource\",\n resource_name=\"foo_name\",\n resource_id=\"foo_id\",\n desired_status=\"foo_desired_status\",\n resource_status=\"foo_resource_status\",\n timeout=2)\n exc = self.assertRaises(exceptions.TimeoutException,\n scenario.run,\n \"foo_image\", \"foo_flavor\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\",\n wait_for_ping=True)\n self.assertEqual(exc.kwargs[\"resource_type\"], \"foo_resource\")\n self.assertEqual(exc.kwargs[\"resource_name\"], \"foo_name\")\n self.assertEqual(exc.kwargs[\"resource_id\"], \"foo_id\")\n self.assertEqual(exc.kwargs[\"desired_status\"], \"foo_desired_status\")\n self.assertEqual(exc.kwargs[\"resource_status\"], \"foo_resource_status\")\n\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n\n @mock.patch(\"%s.json\" % BASE)\n def test_boot_runcommand_delete_json_fails(self, mock_json):\n scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))\n\n mock_json.loads.side_effect = ValueError()\n scenario.run(\"foo_image\", \"foo_flavor\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario.add_output.assert_called_once_with(complete={\n \"chart_plugin\": \"TextArea\", \"data\": [\"StdErr: foo_err\",\n \"StdOut:\", \"{\\\"foo\\\": 42}\"],\n \"title\": \"Script Output\"})\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n\n def test_boot_runcommand_delete_custom_image(self):\n context = {\n \"user\": {\n \"tenant_id\": \"tenant_id\",\n \"keypair\": {\"name\": \"foo_keypair_name\"},\n \"credential\": mock.Mock()\n },\n \"tenant\": {\n \"custom_image\": {\"id\": \"image_id\"}\n }\n }\n\n scenario = self.create_env(vmtasks.BootRuncommandDelete(context))\n scenario._run_command = mock.MagicMock(\n return_value=(0, \"{\\\"foo\\\": 42}\", \"foo_err\"))\n scenario.run(\"foo_flavor\",\n command={\"script_file\": \"foo_script\",\n \"interpreter\": \"foo_interpreter\"},\n username=\"foo_username\",\n password=\"foo_password\",\n use_floating_ip=\"use_fip\",\n floating_network=\"ext_network\",\n force_delete=\"foo_force\",\n volume_args={\"size\": 16},\n foo_arg=\"foo_value\")\n\n self.cinder.create_volume.assert_called_once_with(16, imageRef=None)\n scenario._boot_server_with_fip.assert_called_once_with(\n \"image_id\", \"foo_flavor\", key_name=\"foo_keypair_name\",\n use_floating_ip=\"use_fip\", floating_network=\"ext_network\",\n block_device_mapping={\"vdrally\": \"foo_volume:::1\"},\n foo_arg=\"foo_value\")\n\n scenario._wait_for_ping.assert_called_once_with(\"foo_ip\")\n scenario._run_command.assert_called_once_with(\n \"foo_ip\", 22, \"foo_username\", \"foo_password\",\n command={\"script_file\": \"foo_script\",\n \"interpreter\": \"foo_interpreter\"})\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=\"foo_force\")\n scenario.add_output.assert_called_once_with(\n complete={\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"StdErr: foo_err\",\n \"StdOut:\", \"{\\\"foo\\\": 42}\"],\n \"title\": \"Script Output\"})\n\n @mock.patch(\"%s.heat\" % BASE)\n @mock.patch(\"%s.sshutils\" % BASE)\n def test_runcommand_heat(self, mock_sshutils, mock_heat):\n fake_ssh = mock.Mock()\n fake_ssh.execute.return_value = [0, \"key:val\", \"\"]\n mock_sshutils.SSH.return_value = fake_ssh\n fake_stack = mock.Mock()\n fake_stack.stack.outputs = [{\"output_key\": \"gate_node\",\n \"output_value\": \"ok\"}]\n mock_heat.main.Stack.return_value = fake_stack\n context = {\n \"user\": {\"keypair\": {\"name\": \"name\", \"private\": \"pk\"},\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"networks\": [{\"router_id\": \"1\"}]}\n }\n scenario = vmtasks.RuncommandHeat(context)\n scenario.generate_random_name = mock.Mock(return_value=\"name\")\n scenario.add_output = mock.Mock()\n workload = {\"username\": \"admin\",\n \"resource\": [\"foo\", \"bar\"]}\n scenario.run(workload, \"template\",\n {\"file_key\": \"file_value\"},\n {\"param_key\": \"param_value\"})\n expected = {\"chart_plugin\": \"Table\",\n \"data\": {\"rows\": [[\"key\", \"val\"]],\n \"cols\": [\"key\", \"value\"]},\n \"description\": \"Data generated by workload\",\n \"title\": \"Workload summary\"}\n scenario.add_output.assert_called_once_with(complete=expected)\n\n def create_env_for_designate(self, zone_config=None):\n scenario = vmtasks.CheckDesignateDNSResolving(self.context)\n self.ip = {\"id\": \"foo_id\", \"ip\": \"foo_ip\", \"is_floating\": True}\n scenario._boot_server_with_fip = mock.Mock(\n return_value=(\"foo_server\", self.ip))\n scenario._delete_server_with_fip = mock.Mock()\n scenario._run_command = mock.MagicMock(\n return_value=(0, \"ANSWER SECTION\", \"foo_err\"))\n scenario.add_output = mock.Mock()\n if zone_config is None:\n zone_config = {\n \"test_existing_designate_from_VM\": {\n \"bind_ip\": \"192.168.1.123\"\n }\n }\n self.context.update(\n {\n \"config\": {\n \"zones@openstack\": zone_config\n },\n \"user\": {\n \"keypair\": {\"name\": \"keypair_name\"},\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\n \"id\": \"0\",\n \"name\": \"tenant1\",\n \"zones\": [\n {\"name\": \"zone1.com.\"}\n ],\n \"networks\": [\n {\n \"name\": \"net1\",\n \"subnets\": [\n {\n \"name\": \"subnet1\",\n \"dns_nameservers\": \"1.2.3.4\"\n }\n ]\n }\n ]\n }\n }\n )\n args = {\"image\": \"some_image\", \"flavor\": \"m1.small\",\n \"username\": \"chuck norris\"}\n return scenario, args\n\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_check_designate_dns_resolving_ok(\n self,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, args = self.create_env_for_designate()\n scenario.run(**args)\n\n scenario._boot_server_with_fip.assert_called_once_with(\n \"some_image\", \"m1.small\", floating_network=None,\n key_name=\"keypair_name\", use_floating_ip=True)\n mock_rally_task_utils_wait_for_status.assert_called_once_with(\n \"foo_server\", ready_statuses=[\"ACTIVE\"], update_resource=mock.ANY)\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", {\"id\": \"foo_id\", \"ip\": \"foo_ip\",\n \"is_floating\": True},\n force_delete=False)\n scenario.add_output.assert_called_with(\n complete={\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"foo_err\"],\n \"title\": \"Script StdErr\"})\n\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_test_existing_designate_from_vm_command_timeout(\n self,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, _ = self.create_env_for_designate()\n\n scenario._run_command.side_effect = exceptions.SSHTimeout()\n self.assertRaises(exceptions.SSHTimeout,\n scenario.run,\n \"foo_flavor\", \"foo_image\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_test_existing_designate_from_vm_wait_timeout(\n self,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, args = self.create_env_for_designate()\n\n mock_rally_task_utils_wait_for_status.side_effect = \\\n exceptions.TimeoutException(\n resource_type=\"foo_resource\",\n resource_name=\"foo_name\",\n resource_id=\"foo_id\",\n desired_status=\"foo_desired_status\",\n resource_status=\"foo_resource_status\",\n timeout=2)\n self.assertRaises(exceptions.TimeoutException,\n scenario.run,\n \"foo_flavor\", \"foo_image\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n\n @ddt.data(\n {\"output\": (1, \"x y z\", \"error message\"),\n \"raises\": exceptions.ScriptError},\n {\"output\": (0, \"[1, 2, 3, 4]\", \"\"),\n \"raises\": exceptions.ScriptError}\n )\n @ddt.unpack\n def test_test_existing_designate_from_vm_add_output(self, output,\n expected=None,\n raises=None):\n scenario, _ = self.create_env_for_designate()\n\n scenario._run_command.return_value = output\n kwargs = {\"flavor\": \"foo_flavor\",\n \"image\": \"foo_image\",\n \"username\": \"foo_username\",\n \"password\": \"foo_password\",\n \"use_floating_ip\": \"use_fip\",\n \"floating_network\": \"ext_network\",\n \"force_delete\": \"foo_force\"}\n\n self.assertRaises(raises, scenario.run, **kwargs)\n self.assertFalse(scenario.add_output.called)\n\n\[email protected]\nclass ValidCommandValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(ValidCommandValidatorTestCase, self).setUp()\n self.context = {\"admin\": {\"credential\": mock.MagicMock()},\n \"users\": [{\"credential\": mock.MagicMock()}]}\n\n @ddt.data({\"command\": {\"script_inline\": \"foobar\",\n \"interpreter\": [\"ENV=bar\", \"/bin/foo\"],\n \"local_path\": \"bar\",\n \"remote_path\": \"/bin/foo\"}},\n {\"command\": {\"script_inline\": \"foobar\", \"interpreter\": \"foo\"}})\n @ddt.unpack\n def test_check_command_dict(self, command=None):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n self.assertIsNone(validator.check_command_dict(command))\n\n @ddt.data({\"raises_message\": \"Command must be a dictionary\"},\n {\"command\": \"foo\",\n \"raises_message\": \"Command must be a dictionary\"},\n {\"command\": {\"interpreter\": \"foobar\", \"script_file\": \"foo\",\n \"script_inline\": \"bar\"},\n \"raises_message\": \"Exactly one of \"},\n {\"command\": {\"script_file\": \"foobar\"},\n \"raises_message\": \"Supplied dict specifies no\"},\n {\"command\": {\"script_inline\": \"foobar\",\n \"interpreter\": \"foo\",\n \"local_path\": \"bar\"},\n \"raises_message\": \"When uploading an interpreter its path\"},\n {\"command\": {\"interpreter\": \"/bin/bash\",\n \"script_path\": \"foo\"},\n \"raises_message\": (\"Unexpected command parameters: \"\n \"script_path\")})\n @ddt.unpack\n def test_check_command_dict_failed(\n self, command=None, raises_message=None):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n e = self.assertRaises(\n validation.ValidationError,\n validator.check_command_dict, command)\n self.assertIn(raises_message, e.message)\n\n @mock.patch(\"rally.plugins.common.validators.FileExistsValidator\"\n \"._file_access_ok\")\n def test_validate(self, mock__file_access_ok):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n mock__file_access_ok.return_value = None\n command = {\"script_file\": \"foobar\", \"interpreter\": \"foo\"}\n result = validator.validate(self.context, {\"args\": {\"p\": command}},\n None, None)\n self.assertIsNone(result)\n mock__file_access_ok.assert_called_once_with(\n filename=\"foobar\", mode=os.R_OK, param_name=\"p\",\n required=True)\n\n def test_valid_command_not_required(self):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=False)\n result = validator.validate(self.context, {\"args\": {\"p\": None}},\n None, None)\n self.assertIsNone(result)\n\n def test_valid_command_required(self):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n\n e = self.assertRaises(\n validation.ValidationError,\n validator.validate, {\"args\": {\"p\": None}},\n self.context, None, None)\n self.assertEqual(\"Command must be a dictionary\", e.message)\n\n @mock.patch(\"rally.plugins.common.validators.FileExistsValidator\"\n \"._file_access_ok\")\n def test_valid_command_unreadable_script_file(self, mock__file_access_ok):\n mock__file_access_ok.side_effect = validation.ValidationError(\"O_o\")\n\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n\n command = {\"script_file\": \"foobar\", \"interpreter\": \"foo\"}\n e = self.assertRaises(\n validation.ValidationError,\n validator.validate, self.context, {\"args\": {\"p\": command}},\n None, None)\n self.assertEqual(\"O_o\", e.message)\n\n @mock.patch(\"%s.ValidCommandValidator.check_command_dict\" % BASE)\n def test_valid_command_fail_check_command_dict(self,\n mock_check_command_dict):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n\n mock_check_command_dict.side_effect = validation.ValidationError(\n \"foobar\")\n e = self.assertRaises(\n validation.ValidationError,\n validator.validate, {\"args\": {\"p\": {\"foo\": \"bar\"}}},\n self.context, None, None)\n self.assertEqual(\"foobar\", e.message)\n\n def test_valid_command_script_inline(self):\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n\n command = {\"script_inline\": \"bar\", \"interpreter\": \"/bin/sh\"}\n result = validator.validate(self.context, {\"args\": {\"p\": command}},\n None, None)\n self.assertIsNone(result)\n\n @mock.patch(\"rally.plugins.common.validators.FileExistsValidator\"\n \"._file_access_ok\")\n def test_valid_command_local_path(self, mock__file_access_ok):\n mock__file_access_ok.side_effect = validation.ValidationError(\"\")\n\n validator = vmtasks.ValidCommandValidator(param_name=\"p\",\n required=True)\n\n command = {\"remote_path\": \"bar\", \"local_path\": \"foobar\"}\n self.assertRaises(\n validation.ValidationError,\n validator.validate, self.context, {\"args\": {\"p\": command}},\n None, None)\n mock__file_access_ok.assert_called_once_with(\n filename=\"foobar\", mode=os.R_OK, param_name=\"p\",\n required=True)\n" }, { "alpha_fraction": 0.512751042842865, "alphanum_fraction": 0.5179417729377747, "avg_line_length": 41.81159591674805, "blob_id": "005f7ba17639496b687d779c6e7f57bbeb969dfb", "content_id": "1ef4d50732db33b1f5c73636d5c289fd3df7916f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8862, "license_type": "permissive", "max_line_length": 79, "num_lines": 207, "path": "/rally_openstack/task/contexts/glance/images.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import utils as rutils\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\nCONF = cfg.CONF\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"images\", platform=\"openstack\", order=410)\nclass ImageGenerator(context.OpenStackContext):\n \"\"\"Uploads specified Glance images to every tenant.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"image_url\": {\n \"type\": \"string\",\n \"description\": \"Location of the source to create image from.\"\n },\n \"disk_format\": {\n \"description\": \"The format of the disk.\",\n \"enum\": [\"qcow2\", \"raw\", \"vhd\", \"vmdk\", \"vdi\", \"iso\", \"aki\",\n \"ari\", \"ami\"]\n },\n \"container_format\": {\n \"description\": \"Format of the image container.\",\n \"enum\": [\"aki\", \"ami\", \"ari\", \"bare\", \"docker\", \"ova\", \"ovf\"]\n },\n \"image_name\": {\n \"type\": \"string\",\n \"description\": \"The name of image to create. NOTE: it will be \"\n \"ignored in case when `images_per_tenant` is \"\n \"bigger then 1.\"\n },\n \"min_ram\": {\n \"description\": \"Amount of RAM in MB\",\n \"type\": \"integer\",\n \"minimum\": 0\n },\n \"min_disk\": {\n \"description\": \"Amount of disk space in GB\",\n \"type\": \"integer\",\n \"minimum\": 0\n },\n \"visibility\": {\n \"description\": \"Visibility for this image ('shared' and \"\n \"'community' are available only in case of \"\n \"Glance V2).\",\n \"enum\": [\"public\", \"private\", \"shared\", \"community\"]\n },\n \"images_per_tenant\": {\n \"description\": \"The number of images to create per one single \"\n \"tenant.\",\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"image_args\": {\n \"description\": \"This param is deprecated since Rally-0.10.0, \"\n \"specify exact arguments in a root section of \"\n \"context instead.\",\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"image_container\": {\n \"description\": \"This param is deprecated since Rally-0.10.0, \"\n \"use `container_format` instead.\",\n \"type\": \"string\",\n },\n \"image_type\": {\n \"description\": \"This param is deprecated since Rally-0.10.0, \"\n \"use `disk_format` instead.\",\n \"enum\": [\"qcow2\", \"raw\", \"vhd\", \"vmdk\", \"vdi\", \"iso\", \"aki\",\n \"ari\", \"ami\"],\n },\n },\n \"oneOf\": [{\"description\": \"It is been used since Rally 0.10.0\",\n \"required\": [\"image_url\", \"disk_format\",\n \"container_format\"]},\n {\"description\": \"One of backward compatible way\",\n \"required\": [\"image_url\", \"image_type\",\n \"container_format\"]},\n {\"description\": \"One of backward compatible way\",\n \"required\": [\"image_url\", \"disk_format\",\n \"image_container\"]},\n {\"description\": \"One of backward compatible way\",\n \"required\": [\"image_url\", \"image_type\",\n \"image_container\"]}],\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\"images_per_tenant\": 1}\n\n def setup(self):\n image_url = self.config.get(\"image_url\")\n disk_format = self.config.get(\"disk_format\")\n container_format = self.config.get(\"container_format\")\n images_per_tenant = self.config.get(\"images_per_tenant\")\n visibility = self.config.get(\"visibility\", \"private\")\n min_disk = self.config.get(\"min_disk\", 0)\n min_ram = self.config.get(\"min_ram\", 0)\n image_args = self.config.get(\"image_args\", {})\n\n if \"image_type\" in self.config:\n LOG.warning(\"The 'image_type' argument is deprecated since \"\n \"Rally 0.10.0, use disk_format argument instead\")\n if not disk_format:\n disk_format = self.config[\"image_type\"]\n\n if \"image_container\" in self.config:\n LOG.warning(\"The 'image_container' argument is deprecated since \"\n \"Rally 0.10.0; use container_format argument instead\")\n if not container_format:\n container_format = self.config[\"image_container\"]\n\n if image_args:\n LOG.warning(\n \"The 'image_args' argument is deprecated since Rally 0.10.0; \"\n \"specify arguments in a root section of context instead\")\n\n if \"is_public\" in image_args:\n if \"visibility\" not in self.config:\n visibility = (\"public\" if image_args[\"is_public\"]\n else \"private\")\n if \"min_ram\" in image_args:\n if \"min_ram\" not in self.config:\n min_ram = image_args[\"min_ram\"]\n\n if \"min_disk\" in image_args:\n if \"min_disk\" not in self.config:\n min_disk = image_args[\"min_disk\"]\n\n # None image_name means that image.Image will generate a random name\n image_name = None\n if \"image_name\" in self.config and images_per_tenant == 1:\n image_name = self.config[\"image_name\"]\n\n for user, tenant_id in self._iterate_per_tenants():\n current_images = []\n clients = osclients.Clients(user[\"credential\"])\n image_service = image.Image(\n clients, name_generator=self.generate_random_name)\n\n for i in range(images_per_tenant):\n image_obj = image_service.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=image_url,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram)\n current_images.append(image_obj.id)\n\n self.context[\"tenants\"][tenant_id][\"images\"] = current_images\n\n def cleanup(self):\n if self.context.get(\"admin\", {}):\n # NOTE(andreykurilin): Glance does not require the admin for\n # listing tenant images, but the admin is required for\n # discovering Cinder volumes which might be created for the\n # purpose of caching. Removing such volumes are optional step,\n # since Cinder should have own mechanism like garbage collector,\n # but if we can, let's remove everything and make the cloud as\n # close as possible to the original state.\n admin = self.context[\"admin\"]\n admin_required = None\n else:\n admin = None\n admin_required = False\n\n if \"image_name\" in self.config:\n matcher = rutils.make_name_matcher(self.config[\"image_name\"])\n else:\n matcher = self.__class__\n\n resource_manager.cleanup(names=[\"glance.images\",\n \"cinder.image_volumes_cache\"],\n admin=admin,\n admin_required=admin_required,\n users=self.context.get(\"users\", []),\n superclass=matcher,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6234643459320068, "alphanum_fraction": 0.6382063627243042, "avg_line_length": 39.197532653808594, "blob_id": "7ea16efd51857627ecbb402582287289804b5490", "content_id": "720fb623dfb20f02e37602bdda59a47254870707", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "permissive", "max_line_length": 79, "num_lines": 81, "path": "/tests/unit/task/scenarios/quotas/test_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Kylin Cloud\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.quotas import quotas\nfrom tests.unit import test\n\n\nclass QuotasTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(QuotasTestCase, self).setUp()\n self.context.update({\n \"user\": {\n \"tenant_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n\n def test_nova_get(self):\n scenario = quotas.NovaGet(self.context)\n scenario._get_quotas = mock.MagicMock()\n scenario.run()\n scenario._get_quotas.assert_called_once_with(\"nova\", \"fake\")\n\n def test_cinder_get(self):\n scenario = quotas.CinderGet(self.context)\n scenario._get_quotas = mock.MagicMock()\n scenario.run()\n scenario._get_quotas.assert_called_once_with(\"cinder\", \"fake\")\n\n def test_nova_update(self):\n scenario = quotas.NovaUpdate(self.context)\n scenario._update_quotas = mock.MagicMock()\n scenario.run(max_quota=1024)\n scenario._update_quotas.assert_called_once_with(\"nova\", \"fake\", 1024)\n\n def test_nova_update_and_delete(self):\n scenario = quotas.NovaUpdateAndDelete(self.context)\n scenario._update_quotas = mock.MagicMock()\n scenario._delete_quotas = mock.MagicMock()\n scenario.run(max_quota=1024)\n scenario._update_quotas.assert_called_once_with(\"nova\", \"fake\", 1024)\n scenario._delete_quotas.assert_called_once_with(\"nova\", \"fake\")\n\n def test_cinder_update(self):\n scenario = quotas.CinderUpdate(self.context)\n scenario._update_quotas = mock.MagicMock()\n scenario.run(max_quota=1024)\n scenario._update_quotas.assert_called_once_with(\"cinder\", \"fake\", 1024)\n\n def test_cinder_update_and_delete(self):\n scenario = quotas.CinderUpdateAndDelete(self.context)\n scenario._update_quotas = mock.MagicMock()\n scenario._delete_quotas = mock.MagicMock()\n scenario.run(max_quota=1024)\n scenario._update_quotas.assert_called_once_with(\"cinder\", \"fake\", 1024)\n scenario._delete_quotas.assert_called_once_with(\"cinder\", \"fake\")\n\n def test_neutron_update(self):\n scenario = quotas.NeutronUpdate(self.context)\n scenario._update_quotas = mock.MagicMock()\n mock_quota_update_fn = self.admin_clients(\"neutron\").update_quota\n scenario.run(max_quota=1024)\n scenario._update_quotas.assert_called_once_with(\"neutron\", \"fake\",\n 1024,\n mock_quota_update_fn)\n" }, { "alpha_fraction": 0.5666804313659668, "alphanum_fraction": 0.5689513087272644, "avg_line_length": 38.064517974853516, "blob_id": "76eaf5badaf29c469ebae49ae0d0167d6daea08d", "content_id": "3321ef14c00f2f92920ff59d8a56aa8728c18bdf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4844, "license_type": "permissive", "max_line_length": 79, "num_lines": 124, "path": "/rally_openstack/task/contexts/keystone/roles.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import broker\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"roles\", platform=\"openstack\", order=330)\nclass RoleGenerator(context.OpenStackContext):\n \"\"\"Context class for assigning roles for users.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"array\",\n \"$schema\": consts.JSON_SCHEMA,\n \"items\": {\n \"type\": \"string\",\n \"description\": \"The name of role to assign to user\"\n }\n }\n\n def __init__(self, ctx):\n super(RoleGenerator, self).__init__(ctx)\n self.credential = self.context[\"admin\"][\"credential\"]\n self.workers = (\n cfg.CONF.openstack.roles_context_resource_management_workers)\n\n def _get_role_object(self, context_role):\n \"\"\"Check if role exists.\n\n :param context_role: name of existing role.\n \"\"\"\n keystone = identity.Identity(osclients.Clients(self.credential))\n default_roles = keystone.list_roles()\n for def_role in default_roles:\n if str(def_role.name) == context_role:\n return def_role\n else:\n raise exceptions.NotFoundException(\n \"There is no role with name `%s`\" % context_role)\n\n def _get_user_role_ids(self, user_id, project_id):\n keystone = identity.Identity(osclients.Clients(self.credential))\n user_roles = keystone.list_roles(user_id=user_id,\n project_id=project_id)\n return [role.id for role in user_roles]\n\n def _get_consumer(self, func_name):\n def consume(cache, args):\n role_id, user_id, project_id = args\n if \"client\" not in cache:\n clients = osclients.Clients(self.credential)\n cache[\"client\"] = identity.Identity(clients)\n getattr(cache[\"client\"], func_name)(role_id=role_id,\n user_id=user_id,\n project_id=project_id)\n return consume\n\n def setup(self):\n \"\"\"Add all roles to users.\"\"\"\n threads = self.workers\n roles_dict = {}\n\n def publish(queue):\n for context_role in self.config:\n role = self._get_role_object(context_role)\n roles_dict[role.id] = role.name\n LOG.debug(\"Adding role %(role_name)s having ID %(role_id)s \"\n \"to all users using %(threads)s threads\"\n % {\"role_name\": role.name,\n \"role_id\": role.id,\n \"threads\": threads})\n for user in self.context[\"users\"]:\n if \"roles\" not in user:\n user[\"roles\"] = self._get_user_role_ids(\n user[\"id\"],\n user[\"tenant_id\"])\n user[\"assigned_roles\"] = []\n if role.id not in user[\"roles\"]:\n args = (role.id, user[\"id\"], user[\"tenant_id\"])\n queue.append(args)\n user[\"assigned_roles\"].append(role.id)\n\n broker.run(publish, self._get_consumer(\"add_role\"), threads)\n self.context[\"roles\"] = roles_dict\n\n def cleanup(self):\n \"\"\"Remove assigned roles from users.\"\"\"\n threads = self.workers\n\n def publish(queue):\n for role_id in self.context[\"roles\"]:\n LOG.debug(\"Removing assigned role %s from all users\" % role_id)\n for user in self.context[\"users\"]:\n if role_id in user[\"assigned_roles\"]:\n args = (role_id, user[\"id\"], user[\"tenant_id\"])\n queue.append(args)\n\n broker.run(publish, self._get_consumer(\"revoke_role\"), threads)\n" }, { "alpha_fraction": 0.7133617401123047, "alphanum_fraction": 0.7323973178863525, "avg_line_length": 32.598941802978516, "blob_id": "bf8113812d15730c6a8aca1c97d7fbad29b99f4a", "content_id": "abef6c9a83c145e4a1c20fbc97feef080240218e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 19017, "license_type": "permissive", "max_line_length": 151, "num_lines": 566, "path": "/CHANGELOG.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "=========\nChangelog\n=========\n\n.. Changelogs are for humans, not machines. The end users of Rally project are\n human beings who care about what's is changing, why and how it affects them.\n Please leave these notes as much as possible human oriented.\n\n.. Each release can use the next sections:\n - **Added** for new features.\n - **Changed** for changes in existing functionality.\n - **Deprecated** for soon-to-be removed features/plugins.\n - **Removed** for now removed features/plugins.\n - **Fixed** for any bug fixes.\n\n.. Release notes for existing releases are MUTABLE! If there is something that\n was missed or can be improved, feel free to change it!\n\n\n[2.3.0] - 2023-08-01\n--------------------\n\nDeprecated\n~~~~~~~~~~\nThis is the last release with support of Python 3.6 and Python 3.7\n\nChanged\n~~~~~~~\n\n* Bumped min required version of Rally to 3.4.0.\n* Switch docker image to rally 3.4.0 base.\n\nRemoved\n~~~~~~~\n\n* Nova API doesn't include listing agents for a long time, so no need to\n provide *NovaAgents.list_agents* scenario any more.\n\nFixed\n~~~~~\n\n* Two cinder scenarios *CinderVolumeTypes.create_and_update_volume_type* and\n *CinderVolumeTypes.create_volume_type_add_and_list_type_access* were\n incompatible with Cinder API v3\n\n[2.2.0] - 2021-10-25\n--------------------\n\nChanged\n~~~~~~~\n\n* Switch docker image to rally 3.3.0 base.\n* Update upper-constraints file\n\nRemoved\n~~~~~~~\n\nCeilometer service was splitted into Gnocchi, Aodh, Panko long time ago.\nWe kept ceilometer api plugins for backward compatibility, but it is time to\ngo forward (ceilometerclient was deprecated and is not compatible with modern\npython libraries).\n\nFixed\n~~~~~\n\nCompatibility with Tempest >= 27\n\n[2.1.0] - 2020-11-03\n--------------------\n\nAdded\n~~~~~\n\n* openstack.pre_newton_neutron config option can be configured via environment\n spec.\n\n .. code-block:: json\n\n {\n \"openstack\": {\n \"api_info\": {\n \"neutron\": {\"pre_newton\": true}\n },\n \"auth_url\": \"http://example.net:5000/v3/\",\n \"region_name\": \"RegionOne\",\n \"endpoint_type\": \"public\",\n \"admin\": {\n \"username\": \"admin\",\n \"password\": \"myadminpass\",\n \"user_domain_name\": \"admin\",\n \"project_name\": \"admin\",\n \"project_domain_name\": \"admin\"\n },\n \"https_insecure\": false,\n \"https_cacert\": \"\",\n }\n }\n\n Reminder: Based on this option, Rally will use or not some external fields\n that can help to identify own resources during cleanup.\n\n\nChanged\n~~~~~~~\n\n* Location of rally-openstack code with docs and samples inside the docker\n image changed from /rally/xrally_opentstack to /rally/xrally_openstack\n (there was a typo in work openstack)\n\n* *network@openstack* context does not require admin credentials anymore and\n work with regular users as well\n\nFixed\n~~~~~\n\n* Some neutron scenarios accepted *name* parameter for create or update actions\n on various entities. The value of the parameter was always ignored, but\n anyway it gave wrong assumption.\n\n* [verification component] Make config parser case sensitivity in\n TempestContext and TempestConfigfileManager.\n\n `Launchpad-bug #1881456 <https://launchpad.net/bugs/1881456>`_\n\nRemoved\n~~~~~~~\n\n* Remove deprecated api version 1 for designate\n\n[2.0.0] - 2020-05-08\n--------------------\n\nAdded\n~~~~~\n\n* The *rally_openstack.task.context.OpenStackContext* class which provides\n helpers for all OpenStack context.\n\n* Declare Python 3.8 support\n\n* ManilaShares.create_share_and_access_from_vm scenario which allows to check\n share access from within a VM.\n\n* Regular automated builds for `docker image\n <https://hub.docker.com/r/xrally/xrally-openstack>`_\n\n* VMTasks.check_designate_dns_resolving scenario which tests resolving\n hostname from within a VM using existing designate DNS.\n\nChanged\n~~~~~~~\n\n* `xrally-openstack docker image\n <https://hub.docker.com/r/xrally/xrally-openstack>`_ is switched to use\n `xrally/xrally docker image <https://hub.docker.com/r/xrally/xrally>`_ as\n a base user that brings use python 3.6 and ubuntu bionic.\n\n* Bump min supported Rally framework version to 3.1.0 (rally>=3.1.0)\n\n* Extend *network@openstack* context to save information about created subnets\n and *existing_network@openstack* context with listing subnets.\n\nDeprecated\n~~~~~~~~~~\n\n* a huge project restructure had happened. Old paths are deprecated now.\n\n rally_openstack.cfg -> rally_openstack.common.cfg\n\n rally_openstack.cleanup -> rally_openstack.task.cleanup\n\n rally_openstack.consts -> rally_openstack.common.consts\n\n rally_openstack.contexts -> rally_openstack.task.contexts\n\n rally_openstack.credential -> rally_openstack.common.credential\n\n rally_openstack.embedcharts -> rally_openstack.task.ui.charts\n\n rally_openstack.exceptions -> rally_openstack.common.exceptions\n\n rally_openstack.hook -> rally_openstack.task.hooks\n\n rally_openstack.osclients -> rally_openstack.common.osclients\n\n rally_openstack.platforms -> rally_openstack.environment.platforms\n\n rally_openstack.scenario -> rally_openstack.task.scenario\n\n rally_openstack.scenarios -> rally_openstack.task.scenarios\n\n rally_openstack.service -> rally_openstack.common.service\n\n rally_openstack.services -> rally_openstack.common.services\n\n rally_openstack.types -> rally_openstack.task.types\n\n rally_openstack.validators -> rally_openstack.common.validators\n\n rally_openstack.wrappers -> rally_openstack.common.wrappers\n\n\nRemoved\n~~~~~~~\n\n* Support for Python < 3.6\n\n* *required_clients* validator was deprecated since Rally 0.10.0 (at the time\n when OpenStack plugins were part of Rally framework).\n\n* `api_info` argument of OSClient plugins since it was merged into credentials\n object long time ago.\n\n* The keyword arguments for *GlanceImages.create_image_and_boot_instances*\n scenario. They were deprecated since Rally 0.8.0 (at the time when OpenStack\n plugins were part of Rally framework). Use *boot_server_kwargs* for\n additional parameters when booting servers.\n\n* *server_kwargs* alias for *boot_server_kwargs* of\n *NovaKeypair.boot_and_delete_server_with_keypair* scenario was deprecated\n since Rally 0.3.2 (at the time when OpenStack plugins were part of Rally\n framework).\n\n* *api_versions* argument of cleanup manager.\n\n[1.7.0] - 2019-12-25\n--------------------\n\nAdded\n~~~~~\n\n* An ability to specify Primary and Alternate reference flavor disk sized.\n\n* Support to upload an image from a https server\n\nFixed\n~~~~~\n\n* [tempest] Only volume-backed servers are allowed for flavors with zero disk\n on stein\n\n `Launchpad-bug #1841609 <https://launchpad.net/bugs/1841609>`_\n\n* [tempest] Failing to configure Tempest with nullable fields at Python 3 envs\n\n `Launchpad-bug #1863945 <https://launchpad.net/bugs/1863945>`_\n\n[1.6.0] - 2019-11-29\n--------------------\n\nPlease note that Python 2.7 will reach the end of its life on\nJanuary 1st, 2020. A future version of Rally will drop support for Python 2.7,\nit will happen soon. Also, the same will happen with support of Python 3.4 and\nPython 3.5\n\nAdded\n~~~~~\n\nScenarios:\n\n* NeutronNetworks.create_and_bind_ports\n* BarbicanOrders.list\n* BarbicanOrders.create_key_and_delete\n* BarbicanOrders.create_certificate_and_delete\n* BarbicanOrders.create_asymmetric_and_delete\n\nRemoved\n~~~~~~~\n\n* Removed the former multiattach support dropped in Cinder Train (5.0.0)\n* Removed the former ``sort_key`` and ``sort_dir`` support at listing cinder\n volumes.\n\nChanged\n~~~~~~~\n\n* Improved logging message for the number of used threads while creating\n keystone users and projects/tenants at *users@openstack* context.\n* Updated upper-constraints\n* Improved check for existing rules at *allow_ssh* context.\n\nFixed\n~~~~~\n\n* Handling of errors while cleaning up octavia resources\n* Missing project_id key for several Octavia API calls\n\n `Launchpad-bug #1833235 <https://launchpad.net/bugs/1833235>`_\n\n[1.5.0] - 2019-05-29\n--------------------\n\nAdded\n~~~~~\n\n* libpq-dev dependency to docker image for supporting external PostgreSQL\n backend\n\n* Extend configuration of identity section for tempest with endpoint type\n\n* A new option *user_password* is added to users context for specifying certain\n password for new users.\n\nChanged\n~~~~~~~\n\n* Default Cinder service type is switched to **block-storage** as it is\n new unversioned endpoint. ``api_versions@openstack`` context or ``api_info``\n property of environment configuration should be used for selecting another\n service type.\n\n* Rally 1.5.1 is used by default. Minimum required version is not changed.\n\n* Default source of tempest is switched from git.openstack.org to\n git.opendev.org due to recent infrastructure changes.\n\nFixed\n~~~~~~~\n\n* For performance optimization some calls from python-barbicanclient to\n Barbican API are lazy. In case of secret representation, until any property\n is invoked on it, no real call to API is made which affects timings of\n obtaining the resource and slows down cleanup process.\n\n `Launchpad-bug #1819284 <https://launchpad.net/bugs/1819284>`_\n\n* Tempest configurator was case sensitive while filtering roles by name.\n\n* python 3 incompatibility while uploading glance images\n\n `Launchpad-bug #1819274 <https://launchpad.net/bugs/1819274>`_\n\n[1.4.0] - 2019-03-07\n--------------------\n\nAdded\n~~~~~\n\n* Added neutron trunk scenarios\n* Added barbican scenarios\n * [scenario plugin] BarbicanContainers.list\n * [scenario plugin] BarbicanContainers.create_and_delete\n * [scenario plugin] BarbicanContainers.create_and_add\n * [scenario plugin] BarbicanContainers.create_certificate_and_delete\n * [scenario plugin] BarbicanContainers.create_rsa_and_delete\n * [scenario plugin] BarbicanSecrets.list\n * [scenario plugin] BarbicanSecrets.create\n * [scenario plugin] BarbicanSecrets.create_and_delete\n * [scenario plugin] BarbicanSecrets.create_and_get\n * [scenario plugin] BarbicanSecrets.get\n * [scenario plugin] BarbicanSecrets.create_and_list\n * [scenario plugin] BarbicanSecrets.create_symmetric_and_delete\n* Added octavia scenarios\n * [scenario plugin] Octavia.create_and_list_loadbalancers\n * [scenario plugin] Octavia.create_and_delete_loadbalancers\n * [scenario plugin] Octavia.create_and_update_loadbalancers\n * [scenario plugin] Octavia.create_and_stats_loadbalancers\n * [scenario plugin] Octavia.create_and_show_loadbalancers\n * [scenario plugin] Octavia.create_and_list_pools\n * [scenario plugin] Octavia.create_and_delete_pools\n * [scenario plugin] Octavia.create_and_update_pools\n * [scenario plugin] Octavia.create_and_show_pools\n* Support for osprofiler config in Devstack plugin.\n* Added property 'floating_ip_enabled' in magnum cluster_templates context.\n* Enhanced neutron trunk port scenario to create multiple trunks\n* Enhanced NeutronSecurityGroup.create_and_list_security_group_rules\n* Added three new trunk port related scenarios\n * [scenario plugin] NeutronTrunks.boot_server_with_subports\n * [scenario plugin] NeutronTrunks.boot_server_and_add_subports\n * [scenario plugin] NeutronTrunks.boot_server_and_batch_add_subports\n* Added neutron scenarios\n [scenario plugin] NeutronNetworks.associate_and_dissociate_floating_ips\n\nChanged\n~~~~~~~\n\n* Extend CinderVolumes.list_volumes scenario arguments.\n\nFixed\n~~~~~\n\n* Ignoring ``region_name`` from environment specification while\n initializing keystone client.\n* Fetching OSProfiler trace-info for some drivers.\n* ``https_insecure`` is not passed to manilaclient\n\n[1.3.0] - 2018-10-08\n--------------------\n\nAdded\n~~~~~\n\n* Support Python 3.7 environment.\n* New options ``https_cert`` and ``https_key`` are added to the spec for\n ``existing@openstack`` platform to represent client certificate bundle and\n key files. Also the support for appropriate system environment variables (\n ``OS_CERT``, ``OS_KEY``) is added.\n* ``existing@openstack`` plugin now supports a new field ``api_info`` for\n specifying not default API version/service_type to use. The format and\n purpose is similar to `api_versions\n <https://xrally.org/plugins/openstack/plugins/#api_versions-context>`_ task\n context.\n* Added Cinder V3 support and use it as the default version. You could use\n api_versions context or api_info option of the spec to choose the proper\n version.\n* The documentation for ``existing@openstack`` plugin is extended with\n information about accepted system environment variables via\n ``rally env create --from-sysenv`` command.\n\nChanged\n~~~~~~~\n\n* Our requirements are updated as like upper-constraints (the list of\n suggested tested versions to use)\n* Error messages become more user-friendly in ``rally env check``.\n* Deprecate api_info argument of all clients plugins which inherits from\n OSClient and deprecate api_version argument of\n ``rally_openstack.cleanup.manager.cleanup``. API information (not default\n version/service_type to use) has been included into credentials dictionary.\n* The proper packages are added to `docker image\n <https://hub.docker.com/r/xrally/xrally-openstack>`_ to support MySQL and\n PostgreSQL as DB backends.\n* Rename an action ``nova.create_image`` to ``nova.snapshot_server`` for better\n understanding for what is actually done.\n\nRemoved\n~~~~~~~\n\n* Remove deprecated wrappers (rally_openstack.wrappers) and\n helpers (scenario utils) for Keystone, Cinder, Glance\n services. The new service model should be used instead\n (see ``rally_openstack.services`` module for more details)\n while developing custom plugins. All the inner plugins have been using\n the new code for a long time.\n* Remove deprecated properties *insecure*, *cacert* (use *https_insecure* and\n *https_cacert* properties instead) and method *list_services* (use\n appropriate method of Clients object) from\n *rally_openstack.credentials.OpenStackCredentials* object.\n* Remove deprecated in Rally 0.10.0 ``NovaImages.list_images`` scenario.\n\nFixed\n~~~~~\n\n* Keypairs are now properly cleaned up after the execution of Magnum\n workloads.\n\n\n[1.2.0] - 2018-06-25\n--------------------\n\nRally 1.0.0 has released. This is a major release which doesn't contain\nin-tree OpenStack plugins. Also, this release extends flexibility of\nvalidating required platforms which means that logic of required admin/users\nfor the plugin can be implemented at **rally-openstack** side and this is\ndone in rally-openstack 1.2.0\n\nChanged\n~~~~~~~\n\nAlso, it is sad to mention, but due to OpenStack policies we need to stop\nduplicating release notes at ``git tag message``. At least for now.\n\n[1.1.0] - 2018-05-11\n--------------------\n\nAdded\n~~~~~\n\n* [scenario plugin] GnocchiMetric.list_metric\n* [scenario plugin] GnocchiMetric.create_metric\n* [scenario plugin] GnocchiMetric.create_delete_metric\n* [scenario plugin] GnocchiResource.create_resource\n* [scenario plugin] GnocchiResource.create_delete_resource\n* Introduce *__version__*, *__version_tuple__* at *rally_openstack* module.\n As like other python packages each release of *rally-openstack* package can\n introduce new things, deprecate or even remove other ones. To simplify\n integration with other plugins which depends on *rally-openstack*, the new\n properties can be used with proper checks.\n\nChanged\n~~~~~~~\n\n* `Docker image <https://hub.docker.com/r/xrally/xrally-openstack>`_ ported\n to publish images from `rally-openstack\n <https://github.com/openstack/rally-openstack>`_ repo instead of using the\n rally framework repository.\n Also, the CI is extended to check ability to build Docker image for any of\n changes.\n* An interface of ResourceType plugins is changed since Rally 0.12. All our\n plugins are adopted to support it.\n The port is done in a backward compatible way, so the minimum required\n version of Rally still is 0.11.0, but we suggest you to use the latest\n release of Rally.\n\nRemoved\n~~~~~~~\n\n* Calculation of the duration for \"nova.bind_actions\" action. It shows\n only duration of initialization Rally inner class and can be easily\n misunderstood as some kind of \"Nova operation\".\n Affects 1 inner scenario \"NovaServers.boot_and_bounce_server\".\n\nFixed\n~~~~~\n\n* ``required_services`` validator should not check services which are\n configured via ``api_versions@openstack`` context since the proper validation\n is done at the context itself.\n The inner check for ``api_versions@openstack`` in ``required_services``\n checked only ``api_versions@openstack``, but ``api_versions`` string is also\n valid name for the context (if there is no other ``api_versions`` contexts\n for other platforms, but the case of name conflict is covered by another\n check).\n\n[1.0.0] - 2018-03-28\n--------------------\nA start of a fork from `rally/plugins/openstack module of original OpenStack\nRally project\n<https://github.com/openstack/rally/tree/0.11.1/rally/plugins/openstack>`_\n\nAdded\n~~~~~\n\n* [scenario plugin] GnocchiArchivePolicy.list_archive_policy\n* [scenario plugin] GnocchiArchivePolicy.create_archive_policy\n* [scenario plugin] GnocchiArchivePolicy.create_delete_archive_policy\n* [scenario plugin] GnocchiResourceType.list_resource_type\n* [scenario plugin] GnocchiResourceType.create_resource_type\n* [scenario plugin] GnocchiResourceType.create_delete_resource_type\n* [scenario plugin] NeutronSubnets.delete_subnets\n* [ci] New Zuul V3 native jobs\n* Extend existing@openstack platform to support creating a specification based\n on system environment variables. This feature should be available with\n Rally>0.11.1\n\nChanged\n~~~~~~~\n\n* Methods for association and dissociation floating ips were deprecated in\n novaclient a year ago and latest major release (python-novaclient 10)\n `doesn't include them\n <https://github.com/openstack/python-novaclient/blob/10.0.0/releasenotes/notes/remove-virt-interfaces-add-rm-fixed-floating-398c905d9c91cca8.yaml>`_.\n These actions should be performed via neutronclient now. It is not as simple\n as it was via Nova-API and you can find more neutron-related atomic actions\n in results of workloads.\n\nRemoved\n~~~~~~~\n\n* *os-hosts* CLIs and python API bindings had been deprecated in\n python-novaclient 9.0.0 and became removed in `10.0.0 release\n <https://github.com/openstack/python-novaclient/blob/10.0.0/releasenotes/notes/remove-hosts-d08855550c40b9c6.yaml>`_.\n This decision affected 2 scenarios `NovaHosts.list_hosts\n <https://rally.readthedocs.io/en/0.11.1/plugins/plugin_reference.html#novahosts-list-hosts-scenario>`_\n and `NovaHosts.list_and_get_hosts\n <https://rally.readthedocs.io/en/0.11.1/plugins/plugin_reference.html#novahosts-list-and-get-hosts-scenario>`_\n which become redundant and we cannot leave them (python-novaclient doesn't\n have proper interfaces any more).\n\nFixed\n~~~~~\n\n* The support of `kubernetes python client\n <https://pypi.org/project/kubernetes>`_ (which is used by Magnum plugins)\n is not limited by 3.0.0 max version. You can use more modern releases of that\n library.\n" }, { "alpha_fraction": 0.4823744595050812, "alphanum_fraction": 0.4838709533214569, "avg_line_length": 46.730159759521484, "blob_id": "302e70a987b2cd40ae5e5dd7e597028daf180ad7", "content_id": "d61f3747e1805f5b75e1d96cb0d18f0d1a30bc03", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6014, "license_type": "permissive", "max_line_length": 78, "num_lines": 126, "path": "/tests/unit/task/scenarios/murano/test_environments.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.murano import environments\nfrom tests.unit import test\n\nMURANO_SCENARIO = (\"rally_openstack.task.scenarios.murano.\"\n \"environments\")\n\n\nclass MuranoEnvironmentsTestCase(test.ScenarioTestCase):\n\n def _get_context(self):\n self.context.update({\n \"tenant\": {\n \"packages\": [mock.MagicMock(fully_qualified_name=\"fake\")]\n },\n \"user\": {\n \"tenant_id\": \"fake_tenant_id\"\n },\n \"config\": {\n \"murano_packages\": {\n \"app_package\": (\n \"rally-jobs/extra/murano/\"\n \"applications/HelloReporter/\"\n \"io.murano.apps.HelloReporter.zip\")\n }\n }\n })\n return self.context\n\n def test_list_environments(self):\n TEST_TARGET = \"ListEnvironments\"\n list_env_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_list_environments\")\n scenario = environments.ListEnvironments(self.context)\n with mock.patch(list_env_module) as mock_list_env:\n scenario.run()\n mock_list_env.assert_called_once_with()\n\n def test_create_and_delete_environment(self):\n TEST_TARGET = \"CreateAndDeleteEnvironment\"\n generate_random_name_module = (\"{}.{}.{}\").format(\n MURANO_SCENARIO, TEST_TARGET, \"generate_random_name\")\n create_env_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_create_environment\")\n create_session_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_create_session\")\n delete_env_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_delete_environment\")\n scenario = environments.CreateAndDeleteEnvironment(self.context)\n with mock.patch(generate_random_name_module) as mock_random_name:\n with mock.patch(create_env_module) as mock_create_env:\n with mock.patch(create_session_module) as mock_create_session:\n with mock.patch(delete_env_module) as mock_delete_env:\n fake_env = mock.Mock(id=\"fake_id\")\n mock_create_env.return_value = fake_env\n mock_random_name.return_value = \"foo\"\n scenario.run()\n mock_create_env.assert_called_once_with()\n mock_create_session.assert_called_once_with(\n fake_env.id)\n mock_delete_env.assert_called_once_with(\n fake_env)\n\n def test_create_and_deploy_environment(self):\n TEST_TARGET = \"CreateAndDeployEnvironment\"\n create_env_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_create_environment\")\n create_session_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_create_session\")\n create_service_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_create_service\")\n deploy_env_module = (\"{}.{}.{}\").format(MURANO_SCENARIO,\n TEST_TARGET,\n \"_deploy_environment\")\n scenario = environments.CreateAndDeployEnvironment(self.context)\n with mock.patch(create_env_module) as mock_create_env:\n with mock.patch(create_session_module) as mock_create_session:\n with mock.patch(create_service_module) as mock_create_service:\n with mock.patch(deploy_env_module) as mock_deploy_env:\n fake_env = mock.MagicMock(id=\"fake_env_id\")\n mock_create_env.return_value = fake_env\n\n fake_session = mock.Mock(id=\"fake_session_id\")\n mock_create_session.return_value = fake_session\n\n scenario.context = self._get_context()\n scenario.context[\"tenants\"] = {\n \"fake_tenant_id\": {\n \"packages\": [mock.MagicMock()]\n }\n }\n\n scenario.run(1)\n\n mock_create_env.assert_called_once_with()\n mock_create_session.assert_called_once_with(\n fake_env.id)\n mock_create_service.assert_called_once_with(\n fake_env,\n fake_session,\n \"fake\")\n mock_deploy_env.assert_called_once_with(\n fake_env, fake_session)\n" }, { "alpha_fraction": 0.6183428764343262, "alphanum_fraction": 0.6224128007888794, "avg_line_length": 34.45293045043945, "blob_id": "5dfa8d6cc5fc4693aa8535c5f7c03a6c825250aa", "content_id": "0855090389b3c3b8ddbe30c910d5be6b9d07ba63", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45947, "license_type": "permissive", "max_line_length": 79, "num_lines": 1296, "path": "/tests/unit/task/cleanup/test_resources.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nimport ddt\nfrom neutronclient.common import exceptions as neutron_exceptions\nfrom novaclient import exceptions as nova_exc\nfrom watcherclient.common.apiclient import exceptions as watcher_exceptions\n\nfrom rally_openstack.task.cleanup import resources\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.cleanup.resources\"\nGLANCE_V2_PATH = (\"rally_openstack.common.services.image.glance_v2.\"\n \"GlanceV2Service\")\n\n\nclass SynchronizedDeletionTestCase(test.TestCase):\n\n def test_is_deleted(self):\n self.assertTrue(resources.SynchronizedDeletion().is_deleted())\n\n\nclass QuotaMixinTestCase(test.TestCase):\n\n @mock.patch(\"%s.identity.Identity\" % BASE)\n def test_list(self, mock_identity):\n quota = resources.QuotaMixin()\n quota.tenant_uuid = None\n quota.user = mock.MagicMock()\n self.assertEqual([], quota.list())\n self.assertFalse(mock_identity.called)\n\n quota.tenant_uuid = mock.MagicMock()\n self.assertEqual([mock_identity.return_value.get_project.return_value],\n quota.list())\n mock_identity.assert_called_once_with(quota.user)\n\n\nclass MagnumMixinTestCase(test.TestCase):\n\n def test_id(self):\n magnum = resources.MagnumMixin()\n magnum._service = \"magnum\"\n magnum.raw_resource = mock.MagicMock()\n self.assertEqual(magnum.raw_resource.uuid, magnum.id())\n\n def test_list(self):\n magnum = resources.MagnumMixin()\n magnum._service = \"magnum\"\n some_resources = [mock.MagicMock(), mock.MagicMock(),\n mock.MagicMock(), mock.MagicMock()]\n magnum._manager = mock.MagicMock()\n magnum._manager.return_value.list.side_effect = (\n some_resources[:2], some_resources[2:4], [])\n self.assertEqual(some_resources, magnum.list())\n self.assertEqual(\n [mock.call(marker=None), mock.call(marker=some_resources[1].uuid),\n mock.call(marker=some_resources[3].uuid)],\n magnum._manager.return_value.list.call_args_list)\n\n\nclass NovaServerTestCase(test.TestCase):\n\n def test_list(self):\n server = resources.NovaServer()\n server._manager = mock.MagicMock()\n\n server.list()\n\n server._manager.return_value.list.assert_called_once_with(limit=-1)\n\n def test_delete(self):\n server = resources.NovaServer()\n server.raw_resource = mock.Mock()\n server._manager = mock.Mock()\n server.delete()\n\n server._manager.return_value.delete.assert_called_once_with(\n server.raw_resource.id)\n\n def test_delete_locked(self):\n server = resources.NovaServer()\n server.raw_resource = mock.Mock()\n setattr(server.raw_resource, \"OS-EXT-STS:locked\", True)\n server._manager = mock.Mock()\n server.delete()\n\n server.raw_resource.unlock.assert_called_once_with()\n server._manager.return_value.delete.assert_called_once_with(\n server.raw_resource.id)\n\n\nclass NovaFlavorsTestCase(test.TestCase):\n\n @mock.patch(\"%s.base.ResourceManager._manager\" % BASE)\n def test_is_deleted(self, mock_resource_manager__manager):\n exc = nova_exc.NotFound(404)\n mock_resource_manager__manager().get.side_effect = exc\n flavor = resources.NovaFlavors()\n flavor.raw_resource = mock.MagicMock()\n self.assertTrue(flavor.is_deleted())\n\n @mock.patch(\"%s.base.ResourceManager._manager\" % BASE)\n def test_is_deleted_fail(self, mock_resource_manager__manager):\n mock_resource_manager__manager().get.side_effect = TypeError()\n flavor = resources.NovaFlavors()\n flavor.raw_resource = mock.MagicMock()\n self.assertRaises(TypeError, flavor.is_deleted)\n\n\nclass NovaServerGroupsTestCase(test.TestCase):\n\n @mock.patch(\"%s.base.ResourceManager._manager\" % BASE)\n @mock.patch(\"rally.common.utils.name_matches_object\")\n def test_list(self, mock_name_matches_object,\n mock_resource_manager__manager):\n server_groups = [mock.MagicMock(name=\"rally_foo1\"),\n mock.MagicMock(name=\"rally_foo2\"),\n mock.MagicMock(name=\"foo3\")]\n mock_name_matches_object.side_effect = [False, True, True]\n mock_resource_manager__manager().list.return_value = server_groups\n self.assertEqual(server_groups, resources.NovaServerGroups().list())\n\n\nclass NeutronMixinTestCase(test.TestCase):\n\n def get_neutron_mixin(self):\n neut = resources.NeutronMixin()\n neut._service = \"neutron\"\n return neut\n\n def test_manager(self):\n neut = self.get_neutron_mixin()\n neut.user = mock.MagicMock()\n self.assertEqual(neut.user.neutron.return_value, neut._manager())\n\n def test_id(self):\n neut = self.get_neutron_mixin()\n neut.raw_resource = {\"id\": \"test\"}\n self.assertEqual(\"test\", neut.id())\n\n def test_name(self):\n neutron = self.get_neutron_mixin()\n neutron.raw_resource = {\"id\": \"test_id\", \"name\": \"test_name\"}\n self.assertEqual(\"test_name\", neutron.name())\n\n def test_delete(self):\n neut = self.get_neutron_mixin()\n neut.user = mock.MagicMock()\n neut._resource = \"some_resource\"\n neut.raw_resource = {\"id\": \"42\"}\n\n neut.delete()\n neut.user.neutron().delete_some_resource.assert_called_once_with(\"42\")\n\n def test_list(self):\n neut = self.get_neutron_mixin()\n neut.user = mock.MagicMock()\n neut._resource = \"some_resource\"\n neut.tenant_uuid = \"user_tenant\"\n\n some_resources = [{\"tenant_id\": neut.tenant_uuid}, {\"tenant_id\": \"a\"}]\n neut.user.neutron().list_some_resources.return_value = {\n \"some_resources\": some_resources\n }\n\n self.assertEqual([some_resources[0]], list(neut.list()))\n\n neut.user.neutron().list_some_resources.assert_called_once_with(\n tenant_id=neut.tenant_uuid)\n\n\nclass NeutronLbaasV1MixinTestCase(test.TestCase):\n\n def get_neutron_lbaasv1_mixin(self, extensions=None):\n if extensions is None:\n extensions = []\n user = mock.MagicMock()\n neut = resources.NeutronLbaasV1Mixin(user=user)\n neut._service = \"neutron\"\n neut._resource = \"some_resource\"\n neut._manager = mock.Mock()\n user.neutron.return_value.list_extensions.return_value = {\n \"extensions\": [{\"alias\": ext} for ext in extensions]\n }\n return neut\n\n def test_list_lbaas_available(self):\n neut = self.get_neutron_lbaasv1_mixin(extensions=[\"lbaas\"])\n neut.tenant_uuid = \"user_tenant\"\n\n some_resources = [{\"tenant_id\": neut.tenant_uuid}, {\"tenant_id\": \"a\"}]\n neut._manager().list_some_resources.return_value = {\n \"some_resources\": some_resources\n }\n\n self.assertEqual([some_resources[0]], list(neut.list()))\n neut._manager().list_some_resources.assert_called_once_with(\n tenant_id=neut.tenant_uuid)\n\n def test_list_lbaas_unavailable(self):\n neut = self.get_neutron_lbaasv1_mixin()\n\n self.assertEqual([], list(neut.list()))\n self.assertFalse(neut._manager().list_some_resources.called)\n\n\nclass NeutronLbaasV2MixinTestCase(test.TestCase):\n\n def get_neutron_lbaasv2_mixin(self, extensions=None):\n if extensions is None:\n extensions = []\n\n user = mock.MagicMock()\n neut = resources.NeutronLbaasV2Mixin(user=user)\n neut._service = \"neutron\"\n neut._resource = \"some_resource\"\n neut._manager = mock.Mock()\n user.neutron.return_value.list_extensions.return_value = {\n \"extensions\": [{\"alias\": ext} for ext in extensions]\n }\n return neut\n\n def test_list_lbaasv2_available(self):\n neut = self.get_neutron_lbaasv2_mixin(extensions=[\"lbaasv2\"])\n neut.tenant_uuid = \"user_tenant\"\n\n some_resources = [{\"tenant_id\": neut.tenant_uuid}, {\"tenant_id\": \"a\"}]\n neut._manager().list_some_resources.return_value = {\n \"some_resources\": some_resources\n }\n\n self.assertEqual([some_resources[0]], list(neut.list()))\n neut._manager().list_some_resources.assert_called_once_with(\n tenant_id=neut.tenant_uuid)\n\n def test_list_lbaasv2_unavailable(self):\n neut = self.get_neutron_lbaasv2_mixin()\n\n self.assertEqual([], list(neut.list()))\n self.assertFalse(neut._manager().list_some_resources.called)\n\n\nclass NeutronV2LoadbalancerTestCase(test.TestCase):\n\n def get_neutron_lbaasv2_lb(self):\n neutron_lb = resources.NeutronV2Loadbalancer()\n neutron_lb.raw_resource = {\"id\": \"1\", \"name\": \"s_rally\"}\n neutron_lb._manager = mock.Mock()\n return neutron_lb\n\n def test_is_deleted_true(self):\n from neutronclient.common import exceptions as n_exceptions\n neutron_lb = self.get_neutron_lbaasv2_lb()\n neutron_lb._manager().show_loadbalancer.side_effect = (\n n_exceptions.NotFound)\n\n self.assertTrue(neutron_lb.is_deleted())\n\n neutron_lb._manager().show_loadbalancer.assert_called_once_with(\n neutron_lb.id())\n\n def test_is_deleted_false(self):\n from neutronclient.common import exceptions as n_exceptions\n neutron_lb = self.get_neutron_lbaasv2_lb()\n neutron_lb._manager().show_loadbalancer.return_value = (\n neutron_lb.raw_resource)\n\n self.assertFalse(neutron_lb.is_deleted())\n neutron_lb._manager().show_loadbalancer.assert_called_once_with(\n neutron_lb.id())\n\n neutron_lb._manager().show_loadbalancer.reset_mock()\n\n neutron_lb._manager().show_loadbalancer.side_effect = (\n n_exceptions.Forbidden)\n\n self.assertFalse(neutron_lb.is_deleted())\n neutron_lb._manager().show_loadbalancer.assert_called_once_with(\n neutron_lb.id())\n\n\nclass NeutronBgpvpnTestCase(test.TestCase):\n\n def get_neutron_bgpvpn_mixin(self, extensions=None):\n if extensions is None:\n extensions = []\n admin = mock.Mock()\n neut = resources.NeutronBgpvpn(admin=admin)\n neut._manager = mock.Mock()\n nc = admin.neutron.return_value\n nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": ext} for ext in extensions]\n }\n return neut\n\n def test_list_user(self):\n neut = self.get_neutron_bgpvpn_mixin(extensions=[\"bgpvpn\"])\n user_bgpvpns = {\"bgpvpns\": [{\"tenant_id\": \"foo\", \"id\": \"bgpvpn_id\"}]}\n neut._manager().list_bgpvpns.return_value = user_bgpvpns\n\n bgpvpns_list = neut.list()\n self.assertEqual(\"bgpvpn\", neut._resource)\n neut._manager().list_bgpvpns.assert_called_once_with()\n self.assertEqual(bgpvpns_list, user_bgpvpns[\"bgpvpns\"])\n\n def test_list_admin(self):\n neut = self.get_neutron_bgpvpn_mixin(extensions=[\"bgpvpn\"])\n admin_bgpvpns = {\"bgpvpns\": [{\"tenant_id\": \"foo\", \"id\": \"bgpvpn_id\"}]}\n neut._manager().list_bgpvpns.return_value = admin_bgpvpns\n\n self.assertEqual(\"bgpvpn\", neut._resource)\n self.assertEqual(neut.list(), admin_bgpvpns[\"bgpvpns\"])\n\n\nclass NeutronFloatingIPTestCase(test.TestCase):\n\n def test_name(self):\n fips = resources.NeutronFloatingIP({\"name\": \"foo\",\n \"description\": \"OoO\"})\n self.assertEqual(fips.name(), \"OoO\")\n\n def test_list(self):\n fips = {\"floatingips\": [{\"tenant_id\": \"foo\", \"id\": \"foo\"}]}\n\n user = mock.MagicMock()\n user.neutron.return_value.list_floatingips.return_value = fips\n\n self.assertEqual(fips[\"floatingips\"], list(\n resources.NeutronFloatingIP(user=user, tenant_uuid=\"foo\").list()))\n user.neutron.return_value.list_floatingips.assert_called_once_with(\n tenant_id=\"foo\")\n\n\nclass NeutronTrunkTestcase(test.TestCase):\n\n def test_list(self):\n user = mock.MagicMock()\n trunk = resources.NeutronTrunk(user=user)\n user.neutron().list_trunks.return_value = {\n \"trunks\": [\"trunk\"]}\n self.assertEqual([\"trunk\"], trunk.list())\n user.neutron().list_trunks.assert_called_once_with(\n tenant_id=None)\n\n def test_list_with_not_found(self):\n\n class NotFound(Exception):\n status_code = 404\n\n user = mock.MagicMock()\n trunk = resources.NeutronTrunk(user=user)\n user.neutron().list_trunks.side_effect = NotFound()\n\n self.assertEqual([], trunk.list())\n user.neutron().list_trunks.assert_called_once_with(\n tenant_id=None)\n\n\nclass NeutronPortTestCase(test.TestCase):\n\n def test_delete(self):\n raw_res = {\"device_owner\": \"abbabaab\", \"id\": \"some_id\"}\n user = mock.MagicMock()\n\n resources.NeutronPort(resource=raw_res, user=user).delete()\n\n user.neutron().delete_port.assert_called_once_with(raw_res[\"id\"])\n\n def test_delete_port_raise_exception(self):\n raw_res = {\"device_owner\": \"abbabaab\", \"id\": \"some_id\"}\n user = mock.MagicMock()\n user.neutron().delete_port.side_effect = (\n neutron_exceptions.PortNotFoundClient)\n\n resources.NeutronPort(resource=raw_res, user=user).delete()\n\n user.neutron().delete_port.assert_called_once_with(raw_res[\"id\"])\n\n def test_delete_port_device_owner(self):\n raw_res = {\n \"device_owner\": \"network:router_interface\",\n \"id\": \"some_id\",\n \"device_id\": \"dev_id\"\n }\n user = mock.MagicMock()\n\n resources.NeutronPort(resource=raw_res, user=user).delete()\n\n user.neutron().remove_interface_router.assert_called_once_with(\n raw_res[\"device_id\"], {\"port_id\": raw_res[\"id\"]})\n\n def test_name(self):\n raw_res = {\n \"id\": \"some_id\",\n \"device_id\": \"dev_id\",\n }\n\n # automatically created or manually created port. No name field\n self.assertEqual(\n resources.NeutronPort(resource=raw_res,\n user=mock.MagicMock()).name(),\n \"\")\n\n raw_res[\"name\"] = \"foo\"\n self.assertEqual(\"foo\", resources.NeutronPort(\n resource=raw_res, user=mock.MagicMock()).name())\n\n raw_res[\"parent_name\"] = \"bar\"\n self.assertEqual(\"bar\", resources.NeutronPort(\n resource=raw_res, user=mock.MagicMock()).name())\n\n del raw_res[\"name\"]\n self.assertEqual(\"bar\", resources.NeutronPort(\n resource=raw_res, user=mock.MagicMock()).name())\n\n def test_list(self):\n\n tenant_uuid = \"uuuu-uuuu-iiii-dddd\"\n\n ports = [\n # the case when 'name' is present, so 'device_owner' field is not\n # required\n {\"tenant_id\": tenant_uuid, \"id\": \"id1\", \"name\": \"foo\"},\n # 3 different cases when router_interface is an owner\n {\"tenant_id\": tenant_uuid, \"id\": \"id2\",\n \"device_owner\": \"network:router_interface\",\n \"device_id\": \"router-1\"},\n {\"tenant_id\": tenant_uuid, \"id\": \"id3\",\n \"device_owner\": \"network:router_interface_distributed\",\n \"device_id\": \"router-1\"},\n {\"tenant_id\": tenant_uuid, \"id\": \"id4\",\n \"device_owner\": \"network:ha_router_replicated_interface\",\n \"device_id\": \"router-2\"},\n # the case when gateway router is an owner\n {\"tenant_id\": tenant_uuid, \"id\": \"id5\",\n \"device_owner\": \"network:router_gateway\",\n \"device_id\": \"router-3\"},\n # the case when gateway router is an owner, but device_id is\n # invalid\n {\"tenant_id\": tenant_uuid, \"id\": \"id6\",\n \"device_owner\": \"network:router_gateway\",\n \"device_id\": \"aaaa\"},\n # the case when port was auto-created with floating-ip\n {\"tenant_id\": tenant_uuid, \"id\": \"id7\",\n \"device_owner\": \"network:dhcp\",\n \"device_id\": \"asdasdasd\"},\n # the case when port is from another tenant\n {\"tenant_id\": \"wrong tenant\", \"id\": \"id8\", \"name\": \"foo\"},\n # WTF port without any parent and name\n {\"tenant_id\": tenant_uuid, \"id\": \"id9\", \"device_owner\": \"\"},\n ]\n\n routers = [\n {\"id\": \"router-1\", \"name\": \"Router-1\", \"tenant_id\": tenant_uuid},\n {\"id\": \"router-2\", \"name\": \"Router-2\", \"tenant_id\": tenant_uuid},\n {\"id\": \"router-3\", \"name\": \"Router-3\", \"tenant_id\": tenant_uuid},\n {\"id\": \"router-4\", \"name\": \"Router-4\", \"tenant_id\": tenant_uuid},\n {\"id\": \"router-5\", \"name\": \"Router-5\", \"tenant_id\": tenant_uuid},\n ]\n\n expected_ports = []\n for port in ports:\n if port[\"tenant_id\"] == tenant_uuid:\n expected_ports.append(copy.deepcopy(port))\n if (\"device_id\" in port\n and port[\"device_id\"].startswith(\"router\")):\n expected_ports[-1][\"parent_name\"] = [\n r for r in routers\n if r[\"id\"] == port[\"device_id\"]][0][\"name\"]\n\n class FakeNeutronClient(object):\n\n list_ports = mock.Mock()\n list_routers = mock.Mock()\n\n neutron = FakeNeutronClient\n neutron.list_ports.return_value = {\"ports\": ports}\n neutron.list_routers.return_value = {\"routers\": routers}\n\n user = mock.Mock(neutron=neutron)\n self.assertEqual(expected_ports, resources.NeutronPort(\n user=user, tenant_uuid=tenant_uuid).list())\n neutron.list_ports.assert_called_once_with()\n neutron.list_routers.assert_called_once_with()\n\n\[email protected]\nclass NeutronSecurityGroupTestCase(test.TestCase):\n\n @ddt.data(\n {\"admin\": mock.Mock(), \"admin_required\": True},\n {\"admin\": None, \"admin_required\": False})\n @ddt.unpack\n def test_list(self, admin, admin_required):\n sg_list = [{\"tenant_id\": \"user_tenant\", \"name\": \"default\"},\n {\"tenant_id\": \"user_tenant\", \"name\": \"foo_sg\"}]\n\n neut = resources.NeutronSecurityGroup()\n neut.user = mock.MagicMock()\n neut._resource = \"security_group\"\n neut.tenant_uuid = \"user_tenant\"\n\n neut.user.neutron().list_security_groups.return_value = {\n \"security_groups\": sg_list\n }\n\n expected_result = [sg_list[1]]\n self.assertEqual(expected_result, list(neut.list()))\n\n neut.user.neutron().list_security_groups.assert_called_once_with(\n tenant_id=neut.tenant_uuid)\n\n def test_list_with_not_found(self):\n\n class NotFound(Exception):\n status_code = 404\n\n neut = resources.NeutronSecurityGroup()\n neut.user = mock.MagicMock()\n neut._resource = \"security_group\"\n neut.tenant_uuid = \"user_tenant\"\n\n neut.user.neutron().list_security_groups.side_effect = NotFound()\n\n expected_result = []\n self.assertEqual(expected_result, list(neut.list()))\n\n neut.user.neutron().list_security_groups.assert_called_once_with(\n tenant_id=neut.tenant_uuid)\n\n\nclass NeutronQuotaTestCase(test.TestCase):\n\n def test_delete(self):\n admin = mock.MagicMock()\n resources.NeutronQuota(admin=admin, tenant_uuid=\"fake\").delete()\n admin.neutron.return_value.delete_quota.assert_called_once_with(\"fake\")\n\n\[email protected]\nclass GlanceImageTestCase(test.TestCase):\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__client_admin(self, mock_image):\n admin = mock.Mock()\n glance = resources.GlanceImage(admin=admin)\n client = glance._client()\n\n mock_image.assert_called_once_with(admin)\n self.assertEqual(client, mock_image.return_value)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__client_user(self, mock_image):\n user = mock.Mock()\n glance = resources.GlanceImage(user=user)\n wrapper = glance._client()\n\n mock_image.assert_called_once_with(user)\n self.assertEqual(wrapper, mock_image.return_value)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__client_admin_preferred(self, mock_image):\n admin = mock.Mock()\n user = mock.Mock()\n glance = resources.GlanceImage(admin=admin, user=user)\n client = glance._client()\n\n mock_image.assert_called_once_with(admin)\n self.assertEqual(client, mock_image.return_value)\n\n def test_list(self):\n glance = resources.GlanceImage()\n glance._client = mock.Mock()\n list_images = glance._client.return_value.list_images\n list_images.side_effect = (\n [\"active-image1\", \"active-image2\"],\n [\"deactivated-image1\"])\n glance.tenant_uuid = mock.Mock()\n\n self.assertEqual(\n glance.list(),\n [\"active-image1\", \"active-image2\", \"deactivated-image1\"])\n list_images.assert_has_calls([\n mock.call(owner=glance.tenant_uuid),\n mock.call(status=\"deactivated\", owner=glance.tenant_uuid)])\n\n def test_delete(self):\n glance = resources.GlanceImage()\n glance._client = mock.Mock()\n glance._wrapper = mock.Mock()\n glance.raw_resource = mock.Mock()\n\n client = glance._client.return_value\n\n deleted_image = mock.Mock(status=\"DELETED\")\n client.get_image.side_effect = [glance.raw_resource, deleted_image]\n\n glance.delete()\n client.delete_image.assert_called_once_with(glance.raw_resource.id)\n self.assertFalse(client.reactivate_image.called)\n\n @mock.patch(\"%s.reactivate_image\" % GLANCE_V2_PATH)\n def test_delete_deactivated_image(self, mock_reactivate_image):\n glance = resources.GlanceImage()\n glance._client = mock.Mock()\n glance._wrapper = mock.Mock()\n glance.raw_resource = mock.Mock(status=\"deactivated\")\n\n client = glance._client.return_value\n\n deleted_image = mock.Mock(status=\"DELETED\")\n client.get_image.side_effect = [glance.raw_resource, deleted_image]\n\n glance.delete()\n\n mock_reactivate_image.assert_called_once_with(glance.raw_resource.id)\n client.delete_image.assert_called_once_with(glance.raw_resource.id)\n\n\nclass CeilometerTestCase(test.TestCase):\n\n def test_id(self):\n ceil = resources.CeilometerAlarms()\n ceil.raw_resource = mock.MagicMock()\n self.assertEqual(ceil.raw_resource.alarm_id, ceil.id())\n\n @mock.patch(\"%s.CeilometerAlarms._manager\" % BASE)\n def test_list(self, mock_ceilometer_alarms__manager):\n\n ceil = resources.CeilometerAlarms()\n ceil.tenant_uuid = mock.MagicMock()\n mock_ceilometer_alarms__manager().list.return_value = [\"a\", \"b\", \"c\"]\n mock_ceilometer_alarms__manager.reset_mock()\n\n self.assertEqual([\"a\", \"b\", \"c\"], ceil.list())\n mock_ceilometer_alarms__manager().list.assert_called_once_with(\n q=[{\"field\": \"project_id\", \"op\": \"eq\", \"value\": ceil.tenant_uuid}])\n\n\nclass ZaqarQueuesTestCase(test.TestCase):\n\n def test_list(self):\n user = mock.Mock()\n zaqar = resources.ZaqarQueues(user=user)\n zaqar.list()\n user.zaqar().queues.assert_called_once_with()\n\n\nclass KeystoneMixinTestCase(test.TestCase):\n\n def test_is_deleted(self):\n self.assertTrue(resources.KeystoneMixin().is_deleted())\n\n def get_keystone_mixin(self):\n kmixin = resources.KeystoneMixin()\n kmixin._service = \"keystone\"\n return kmixin\n\n @mock.patch(\"%s.identity\" % BASE)\n def test_manager(self, mock_identity):\n keystone_mixin = self.get_keystone_mixin()\n keystone_mixin.admin = mock.MagicMock()\n self.assertEqual(mock_identity.Identity.return_value,\n keystone_mixin._manager())\n mock_identity.Identity.assert_called_once_with(\n keystone_mixin.admin)\n\n @mock.patch(\"%s.identity\" % BASE)\n def test_delete(self, mock_identity):\n keystone_mixin = self.get_keystone_mixin()\n keystone_mixin._resource = \"some_resource\"\n keystone_mixin.id = lambda: \"id_a\"\n keystone_mixin.admin = mock.MagicMock()\n\n keystone_mixin.delete()\n mock_identity.Identity.assert_called_once_with(keystone_mixin.admin)\n identity_service = mock_identity.Identity.return_value\n identity_service.delete_some_resource.assert_called_once_with(\"id_a\")\n\n @mock.patch(\"%s.identity\" % BASE)\n def test_list(self, mock_identity):\n keystone_mixin = self.get_keystone_mixin()\n keystone_mixin._resource = \"some_resource2\"\n keystone_mixin.admin = mock.MagicMock()\n identity = mock_identity.Identity\n\n self.assertSequenceEqual(\n identity.return_value.list_some_resource2s.return_value,\n keystone_mixin.list())\n identity.assert_called_once_with(keystone_mixin.admin)\n identity.return_value.list_some_resource2s.assert_called_once_with()\n\n\nclass KeystoneEc2TestCase(test.TestCase):\n def test_user_id_property(self):\n user_client = mock.Mock()\n admin_client = mock.Mock()\n\n manager = resources.KeystoneEc2(user=user_client, admin=admin_client)\n\n self.assertEqual(user_client.keystone.auth_ref.user_id,\n manager.user_id)\n\n def test_list(self):\n user_client = mock.Mock()\n admin_client = mock.Mock()\n\n with mock.patch(\"%s.identity.Identity\" % BASE, autospec=True) as p:\n identity = p.return_value\n manager = resources.KeystoneEc2(user=user_client,\n admin=admin_client)\n self.assertEqual(identity.list_ec2credentials.return_value,\n manager.list())\n p.assert_called_once_with(user_client)\n identity.list_ec2credentials.assert_called_once_with(\n manager.user_id)\n\n def test_delete(self):\n user_client = mock.Mock()\n admin_client = mock.Mock()\n raw_resource = mock.Mock()\n\n with mock.patch(\"%s.identity.Identity\" % BASE, autospec=True) as p:\n manager = resources.KeystoneEc2(user=user_client,\n admin=admin_client,\n resource=raw_resource)\n manager.delete()\n\n p.assert_called_once_with(user_client)\n p.return_value.delete_ec2credential.assert_called_once_with(\n manager.user_id, access=raw_resource.access)\n\n\nclass SwiftMixinTestCase(test.TestCase):\n\n def get_swift_mixin(self):\n swift_mixin = resources.SwiftMixin()\n swift_mixin._service = \"swift\"\n return swift_mixin\n\n def test_manager(self):\n swift_mixin = self.get_swift_mixin()\n swift_mixin.user = mock.MagicMock()\n self.assertEqual(swift_mixin.user.swift.return_value,\n swift_mixin._manager())\n\n def test_id(self):\n swift_mixin = self.get_swift_mixin()\n swift_mixin.raw_resource = mock.MagicMock()\n self.assertEqual(swift_mixin.raw_resource, swift_mixin.id())\n\n def test_name(self):\n swift = self.get_swift_mixin()\n swift.raw_resource = [\"name1\", \"name2\"]\n self.assertEqual(\"name2\", swift.name())\n\n def test_delete(self):\n swift_mixin = self.get_swift_mixin()\n swift_mixin.user = mock.MagicMock()\n swift_mixin._resource = \"some_resource\"\n swift_mixin.raw_resource = mock.MagicMock()\n swift_mixin.delete()\n swift_mixin.user.swift().delete_some_resource.assert_called_once_with(\n *swift_mixin.raw_resource)\n\n\nclass SwiftObjectTestCase(test.TestCase):\n\n @mock.patch(\"%s.SwiftMixin._manager\" % BASE)\n def test_list(self, mock_swift_mixin__manager):\n containers = [mock.MagicMock(), mock.MagicMock()]\n objects = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]\n mock_swift_mixin__manager().get_account.return_value = (\n \"header\", containers)\n mock_swift_mixin__manager().get_container.return_value = (\n \"header\", objects)\n self.assertEqual(len(containers),\n len(resources.SwiftContainer().list()))\n self.assertEqual(len(containers) * len(objects),\n len(resources.SwiftObject().list()))\n\n\nclass SwiftContainerTestCase(test.TestCase):\n\n @mock.patch(\"%s.SwiftMixin._manager\" % BASE)\n def test_list(self, mock_swift_mixin__manager):\n containers = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]\n mock_swift_mixin__manager().get_account.return_value = (\n \"header\", containers)\n self.assertEqual(len(containers),\n len(resources.SwiftContainer().list()))\n\n\nclass ManilaShareTestCase(test.TestCase):\n\n def test_list(self):\n share_resource = resources.ManilaShare()\n share_resource._manager = mock.MagicMock()\n\n share_resource.list()\n\n self.assertEqual(\"shares\", share_resource._resource)\n share_resource._manager.return_value.list.assert_called_once_with()\n\n def test_delete(self):\n share_resource = resources.ManilaShare()\n share_resource._manager = mock.MagicMock()\n share_resource.id = lambda: \"fake_id\"\n\n share_resource.delete()\n\n self.assertEqual(\"shares\", share_resource._resource)\n share_resource._manager.return_value.delete.assert_called_once_with(\n \"fake_id\")\n\n\nclass ManilaShareNetworkTestCase(test.TestCase):\n\n def test_list(self):\n sn_resource = resources.ManilaShareNetwork()\n sn_resource._manager = mock.MagicMock()\n\n sn_resource.list()\n\n self.assertEqual(\"share_networks\", sn_resource._resource)\n sn_resource._manager.return_value.list.assert_called_once_with()\n\n def test_delete(self):\n sn_resource = resources.ManilaShareNetwork()\n sn_resource._manager = mock.MagicMock()\n sn_resource.id = lambda: \"fake_id\"\n\n sn_resource.delete()\n\n self.assertEqual(\"share_networks\", sn_resource._resource)\n sn_resource._manager.return_value.delete.assert_called_once_with(\n \"fake_id\")\n\n\nclass ManilaSecurityServiceTestCase(test.TestCase):\n\n def test_list(self):\n ss_resource = resources.ManilaSecurityService()\n ss_resource._manager = mock.MagicMock()\n\n ss_resource.list()\n\n self.assertEqual(\"security_services\", ss_resource._resource)\n ss_resource._manager.return_value.list.assert_called_once_with()\n\n def test_delete(self):\n ss_resource = resources.ManilaSecurityService()\n ss_resource._manager = mock.MagicMock()\n ss_resource.id = lambda: \"fake_id\"\n\n ss_resource.delete()\n\n self.assertEqual(\"security_services\", ss_resource._resource)\n ss_resource._manager.return_value.delete.assert_called_once_with(\n \"fake_id\")\n\n\nclass MistralWorkbookTestCase(test.TestCase):\n\n def test_delete(self):\n clients = mock.MagicMock()\n resource = mock.Mock()\n resource.name = \"TEST_NAME\"\n\n mistral = resources.MistralWorkbooks(\n user=clients,\n resource=resource)\n\n mistral.delete()\n\n clients.mistral().workbooks.delete.assert_called_once_with(\n \"TEST_NAME\")\n\n\nclass MistralExecutionsTestCase(test.TestCase):\n\n def test_name(self):\n execution = mock.MagicMock(workflow_name=\"bar\")\n execution.name = \"foo\"\n self.assertEqual(\"bar\", resources.MistralExecutions(execution).name())\n\n\nclass SenlinMixinTestCase(test.TestCase):\n\n def test_id(self):\n senlin = resources.SenlinMixin()\n senlin.raw_resource = {\"id\": \"TEST_ID\"}\n self.assertEqual(\"TEST_ID\", senlin.id())\n\n def test__manager(self):\n senlin = resources.SenlinMixin()\n senlin._service = \"senlin\"\n senlin.user = mock.MagicMock()\n self.assertEqual(senlin.user.senlin.return_value, senlin._manager())\n\n def test_list(self):\n senlin = resources.SenlinMixin()\n senlin._service = \"senlin\"\n senlin.user = mock.MagicMock()\n senlin._resource = \"some_resources\"\n\n some_resources = [{\"name\": \"resource1\"}, {\"name\": \"resource2\"}]\n senlin.user.senlin().some_resources.return_value = some_resources\n\n self.assertEqual(some_resources, senlin.list())\n senlin.user.senlin().some_resources.assert_called_once_with()\n\n def test_delete(self):\n senlin = resources.SenlinMixin()\n senlin._service = \"senlin\"\n senlin.user = mock.MagicMock()\n senlin._resource = \"some_resources\"\n senlin.raw_resource = {\"id\": \"TEST_ID\"}\n senlin.user.senlin().delete_some_resource.return_value = None\n\n senlin.delete()\n senlin.user.senlin().delete_some_resource.assert_called_once_with(\n \"TEST_ID\")\n\n\nclass WatcherTemplateTestCase(test.TestCase):\n\n def test_id(self):\n watcher = resources.WatcherTemplate()\n watcher.raw_resource = mock.MagicMock(uuid=100)\n self.assertEqual(100, watcher.id())\n\n @mock.patch(\"%s.WatcherTemplate._manager\" % BASE)\n def test_is_deleted(self, mock__manager):\n mock__manager.return_value.get.return_value = None\n watcher = resources.WatcherTemplate()\n watcher.id = mock.Mock()\n self.assertFalse(watcher.is_deleted())\n mock__manager.side_effect = [watcher_exceptions.NotFound()]\n self.assertTrue(watcher.is_deleted())\n\n def test_list(self):\n watcher = resources.WatcherTemplate()\n watcher._manager = mock.MagicMock()\n\n watcher.list()\n\n self.assertEqual(\"audit_template\", watcher._resource)\n watcher._manager().list.assert_called_once_with(limit=0)\n\n\nclass WatcherAuditTestCase(test.TestCase):\n\n def test_id(self):\n watcher = resources.WatcherAudit()\n watcher.raw_resource = mock.MagicMock(uuid=100)\n self.assertEqual(100, watcher.id())\n\n def test_name(self):\n watcher = resources.WatcherAudit()\n watcher.raw_resource = mock.MagicMock(uuid=\"name\")\n self.assertEqual(\"name\", watcher.name())\n\n @mock.patch(\"%s.WatcherAudit._manager\" % BASE)\n def test_is_deleted(self, mock__manager):\n mock__manager.return_value.get.return_value = None\n watcher = resources.WatcherAudit()\n watcher.id = mock.Mock()\n self.assertFalse(watcher.is_deleted())\n mock__manager.side_effect = [watcher_exceptions.NotFound()]\n self.assertTrue(watcher.is_deleted())\n\n def test_list(self):\n watcher = resources.WatcherAudit()\n watcher._manager = mock.MagicMock()\n\n watcher.list()\n\n self.assertEqual(\"audit\", watcher._resource)\n watcher._manager().list.assert_called_once_with(limit=0)\n\n\nclass WatcherActionPlanTestCase(test.TestCase):\n\n def test_id(self):\n watcher = resources.WatcherActionPlan()\n watcher.raw_resource = mock.MagicMock(uuid=100)\n self.assertEqual(100, watcher.id())\n\n def test_name(self):\n watcher = resources.WatcherActionPlan()\n self.assertIsInstance(watcher.name(), resources.base.NoName)\n\n @mock.patch(\"%s.WatcherActionPlan._manager\" % BASE)\n def test_is_deleted(self, mock__manager):\n mock__manager.return_value.get.return_value = None\n watcher = resources.WatcherActionPlan()\n watcher.id = mock.Mock()\n self.assertFalse(watcher.is_deleted())\n mock__manager.side_effect = [watcher_exceptions.NotFound()]\n self.assertTrue(watcher.is_deleted())\n\n def test_list(self):\n watcher = resources.WatcherActionPlan()\n watcher._manager = mock.MagicMock()\n\n watcher.list()\n\n self.assertEqual(\"action_plan\", watcher._resource)\n watcher._manager().list.assert_called_once_with(limit=0)\n\n\nclass CinderImageVolumeCacheTestCase(test.TestCase):\n\n class Resource(object):\n def __init__(self, id=None, name=None):\n self.id = id\n self.name = name\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test_list(self, mock_image):\n admin = mock.Mock()\n\n glance = mock_image.return_value\n cinder = admin.cinder.return_value\n\n image_1 = self.Resource(\"foo\", name=\"foo-name\")\n image_2 = self.Resource(\"bar\", name=\"bar-name\")\n glance.list_images.return_value = [image_1, image_2]\n volume_1 = self.Resource(name=\"v1\")\n volume_2 = self.Resource(name=\"image-foo\")\n volume_3 = self.Resource(name=\"foo\")\n volume_4 = self.Resource(name=\"bar\")\n cinder.volumes.list.return_value = [volume_1, volume_2, volume_3,\n volume_4]\n\n manager = resources.CinderImageVolumeCache(admin=admin)\n\n self.assertEqual([{\"volume\": volume_2, \"image\": image_1}],\n manager.list())\n\n mock_image.assert_called_once_with(admin)\n glance.list_images.assert_called_once_with()\n cinder.volumes.list.assert_called_once_with(\n search_opts={\"all_tenants\": 1})\n\n def test_id_and_name(self):\n\n res = resources.CinderImageVolumeCache(\n {\"volume\": self.Resource(\"volume-id\", \"volume-name\"),\n \"image\": self.Resource(\"image-id\", \"image-name\")})\n\n self.assertEqual(\"volume-id\", res.id())\n self.assertEqual(\"image-name\", res.name())\n\n\nclass GnocchiMixinTestCase(test.TestCase):\n\n def get_gnocchi(self):\n gnocchi = resources.GnocchiMixin()\n gnocchi._service = \"gnocchi\"\n return gnocchi\n\n def test_id(self):\n gnocchi = self.get_gnocchi()\n gnocchi.raw_resource = {\"name\": \"test_name\"}\n self.assertEqual(\"test_name\", gnocchi.id())\n\n def test_name(self):\n gnocchi = self.get_gnocchi()\n gnocchi.raw_resource = {\"name\": \"test_name\"}\n self.assertEqual(\"test_name\", gnocchi.name())\n\n\nclass GnocchiMetricTestCase(test.TestCase):\n\n def get_gnocchi(self):\n gnocchi = resources.GnocchiMetric()\n gnocchi._service = \"gnocchi\"\n return gnocchi\n\n def test_id(self):\n gnocchi = self.get_gnocchi()\n gnocchi.raw_resource = {\"id\": \"test_id\"}\n self.assertEqual(\"test_id\", gnocchi.id())\n\n def test_list(self):\n gnocchi = self.get_gnocchi()\n gnocchi._manager = mock.MagicMock()\n metrics = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),\n mock.MagicMock()]\n gnocchi._manager.return_value.list.side_effect = (\n metrics[:2], metrics[2:4], [])\n self.assertEqual(metrics, gnocchi.list())\n self.assertEqual(\n [mock.call(marker=None), mock.call(marker=metrics[1][\"id\"]),\n mock.call(marker=metrics[3][\"id\"])],\n gnocchi._manager.return_value.list.call_args_list)\n\n\nclass GnocchiResourceTestCase(test.TestCase):\n\n def get_gnocchi(self):\n gnocchi = resources.GnocchiResource()\n gnocchi._service = \"gnocchi\"\n return gnocchi\n\n def test_id(self):\n gnocchi = self.get_gnocchi()\n gnocchi.raw_resource = {\"id\": \"test_id\"}\n self.assertEqual(\"test_id\", gnocchi.id())\n\n def test_list(self):\n gnocchi = self.get_gnocchi()\n gnocchi._manager = mock.MagicMock()\n res = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),\n mock.MagicMock()]\n gnocchi._manager.return_value.list.side_effect = (\n res[:2], res[2:4], [])\n self.assertEqual(res, gnocchi.list())\n self.assertEqual(\n [mock.call(marker=None), mock.call(marker=res[1][\"id\"]),\n mock.call(marker=res[3][\"id\"])],\n gnocchi._manager.return_value.list.call_args_list)\n\n\nclass BarbicanSecretsTestCase(test.TestCase):\n\n def test_id(self):\n barbican = resources.BarbicanSecrets()\n barbican.raw_resource = mock.MagicMock(secret_ref=\"fake_uuid\")\n self.assertEqual(\"fake_uuid\", barbican.id())\n\n def test_list(self):\n barbican = resources.BarbicanSecrets()\n barbican._manager = mock.MagicMock()\n\n barbican.list()\n barbican._manager.assert_called_once_with()\n\n def test_delete(self):\n barbican = resources.BarbicanSecrets()\n barbican._manager = mock.MagicMock()\n barbican.raw_resource = mock.MagicMock(uuid=\"fake_uuid\")\n\n barbican.delete()\n barbican._manager.assert_called_once_with()\n\n def test_is_deleted(self):\n barbican = resources.BarbicanSecrets()\n barbican._manager = mock.MagicMock()\n barbican.raw_resource = mock.MagicMock(uuid=\"fake_uuid\")\n self.assertFalse(barbican.is_deleted())\n\n\[email protected](\"octavia\", \"some\", order=3)\nclass OctaviaSimpleResource(resources.OctaviaMixIn):\n pass\n\n\nclass OctaviaResourceTestCase(test.TestCase):\n\n def test_name(self):\n resource = OctaviaSimpleResource({\"name\": \"test_name\"})\n self.assertEqual(\"test_name\", resource.name())\n\n def test_id(self):\n resource = OctaviaSimpleResource({\"id\": \"test_id\"})\n self.assertEqual(\"test_id\", resource.id())\n\n def test_delete(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n resource = OctaviaSimpleResource(\n user=clients, resource={\"id\": \"test_id\"})\n\n resource.delete()\n\n octavia_client.some_delete.assert_called_once_with(\"test_id\")\n\n def test_delete_load_balancers(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n resource = resources.OctaviaLoadBalancers(\n user=clients, resource={\"id\": \"test_id\"})\n\n resource.delete()\n\n octavia_client.load_balancer_delete.assert_called_once_with(\n \"test_id\", cascade=True)\n\n def test_delete_with_exception(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n resource = OctaviaSimpleResource(\n user=clients, resource={\"id\": \"test_id\"})\n\n # case #1: random exception is raised\n octavia_client.some_delete.side_effect = ValueError(\"asd\")\n\n self.assertRaises(ValueError, resource.delete)\n\n # case #2: octaviaclient inner exception with random message\n from octaviaclient.api.v2 import octavia as octavia_exc\n\n e = octavia_exc.OctaviaClientException(409, \"bla bla bla\")\n octavia_client.some_delete.side_effect = e\n\n self.assertRaises(octavia_exc.OctaviaClientException, resource.delete)\n\n # case #3: octaviaclient inner exception with specific message\n e = octavia_exc.OctaviaClientException(\n 409, \"Invalid state PENDING_DELETE bla bla\")\n octavia_client.some_delete.side_effect = e\n\n resource.delete()\n\n def test_delete_load_balancer_with_exception(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n resource = resources.OctaviaLoadBalancers(\n user=clients, resource={\"id\": \"test_id\"})\n\n # case #1: random exception is raised\n octavia_client.load_balancer_delete.side_effect = ValueError(\"asd\")\n\n self.assertRaises(ValueError, resource.delete)\n\n # case #2: octaviaclient inner exception with random message\n from octaviaclient.api.v2 import octavia as octavia_exc\n\n e = octavia_exc.OctaviaClientException(409, \"bla bla bla\")\n octavia_client.load_balancer_delete.side_effect = e\n\n self.assertRaises(octavia_exc.OctaviaClientException, resource.delete)\n\n # case #3: octaviaclient inner exception with specific message\n e = octavia_exc.OctaviaClientException(\n 409, \"Invalid state PENDING_DELETE bla bla\")\n octavia_client.load_balancer_delete.side_effect = e\n\n resource.delete()\n\n def test_is_deleted_false(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n resource = OctaviaSimpleResource(\n user=clients, resource={\"id\": \"test_id\"})\n self.assertFalse(resource.is_deleted())\n octavia_client.some_show.assert_called_once_with(\"test_id\")\n\n def test_is_deleted_true(self):\n from osc_lib import exceptions as osc_exc\n\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n octavia_client.some_show.side_effect = osc_exc.NotFound(404, \"foo\")\n resource = OctaviaSimpleResource(\n user=clients, resource={\"id\": \"test_id\"})\n\n self.assertTrue(resource.is_deleted())\n\n octavia_client.some_show.assert_called_once_with(\"test_id\")\n\n def test_list(self):\n clients = mock.MagicMock()\n octavia_client = clients.octavia.return_value\n octavia_client.some_list.return_value = {\"somes\": [1, 2]}\n manager = OctaviaSimpleResource(user=clients)\n\n self.assertEqual([1, 2], manager.list())\n\n octavia_client.some_list.assert_called_once_with()\n\n octavia_client.l7policy_list.return_value = {\"l7policies\": [3, 4]}\n manager = resources.OctaviaL7Policies(user=clients)\n\n self.assertEqual([3, 4], manager.list())\n\n octavia_client.l7policy_list.assert_called_once_with()\n" }, { "alpha_fraction": 0.6312224864959717, "alphanum_fraction": 0.6332542896270752, "avg_line_length": 36.85897445678711, "blob_id": "ecbbd6b7c6a57b58f8284c3da57994ea3ab86eb4", "content_id": "af2c429be7403638828f4486b928b1dc8c47dd1a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2953, "license_type": "permissive", "max_line_length": 79, "num_lines": 78, "path": "/rally_openstack/common/services/heat/main.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import utils as common_utils\nfrom rally.task import atomic\nfrom rally.task import utils\n\nCONF = cfg.CONF\n\n\nclass Stack(common_utils.RandomNameGeneratorMixin):\n \"\"\"Represent heat stack.\n\n Usage:\n >>> stack = Stack(scenario, task, \"template.yaml\", parameters={\"nodes\": 3})\n >>> do_testing(stack)\n >>> stack.update(nodes=4)\n >>> do_testing(stack)\n \"\"\"\n\n def __init__(self, scenario, task, template, files, parameters=None):\n \"\"\"Init heat wrapper.\n\n :param Scenario scenario: scenario instance\n :param Task task: task instance\n :param str template: template file path\n :param dict files: dict with file name and path\n :param dict parameters: parameters for template\n\n \"\"\"\n self.scenario = scenario\n self.task = task\n self.template = open(template).read()\n self.files = {}\n self.parameters = parameters\n for name, path in files.items():\n self.files[name] = open(path).read()\n\n def _wait(self, ready_statuses, failure_statuses):\n self.stack = utils.wait_for_status(\n self.stack,\n check_interval=CONF.openstack.heat_stack_create_poll_interval,\n timeout=CONF.openstack.heat_stack_create_timeout,\n ready_statuses=ready_statuses,\n failure_statuses=failure_statuses,\n update_resource=utils.get_from_manager(),\n )\n\n def create(self):\n with atomic.ActionTimer(self.scenario, \"heat.create\"):\n self.stack = self.scenario.clients(\"heat\").stacks.create(\n stack_name=self.scenario.generate_random_name(),\n template=self.template,\n files=self.files,\n parameters=self.parameters)\n self.stack_id = self.stack[\"stack\"][\"id\"]\n self.stack = self.scenario.clients(\n \"heat\").stacks.get(self.stack_id)\n self._wait([\"CREATE_COMPLETE\"], [\"CREATE_FAILED\"])\n\n def update(self, data):\n self.parameters.update(data)\n with atomic.ActionTimer(self.scenario, \"heat.update\"):\n self.scenario.clients(\"heat\").stacks.update(\n self.stack_id, template=self.template,\n files=self.files, parameters=self.parameters)\n self._wait([\"UPDATE_COMPLETE\"], [\"UPDATE_FAILED\"])\n" }, { "alpha_fraction": 0.5778260827064514, "alphanum_fraction": 0.5891304612159729, "avg_line_length": 43.230770111083984, "blob_id": "a820ddb6f1c2eb0ed51a5d7a1b30f96a96aa8f4b", "content_id": "a6170f52bae79da99f500fc65dd32ee07b569c95", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2300, "license_type": "permissive", "max_line_length": 78, "num_lines": 52, "path": "/rally_openstack/common/cfg/glance.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\n\nOPTS = {\"openstack\": [\n cfg.FloatOpt(\"glance_image_delete_timeout\",\n default=120.0,\n deprecated_group=\"benchmark\",\n help=\"Time to wait for glance image to be deleted.\"),\n cfg.FloatOpt(\"glance_image_delete_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Interval between checks when waiting for image \"\n \"deletion.\"),\n cfg.FloatOpt(\"glance_image_create_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after creating a resource before \"\n \"polling for it status\"),\n cfg.FloatOpt(\"glance_image_create_timeout\",\n default=120.0,\n deprecated_group=\"benchmark\",\n help=\"Time to wait for glance image to be created.\"),\n cfg.FloatOpt(\"glance_image_create_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Interval between checks when waiting for image \"\n \"creation.\"),\n cfg.FloatOpt(\"glance_image_create_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after creating a resource before \"\n \"polling for it status\"),\n cfg.FloatOpt(\"glance_image_create_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Interval between checks when waiting for image \"\n \"creation.\")\n]}\n" }, { "alpha_fraction": 0.5952540040016174, "alphanum_fraction": 0.6002005338668823, "avg_line_length": 35.57701873779297, "blob_id": "0b3d0887cd20fec3d47bc03fcd8cec3c8093002c", "content_id": "2beb3493d124aadb6fba1a486a90d7dbc399fa15", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14960, "license_type": "permissive", "max_line_length": 79, "num_lines": 409, "path": "/rally_openstack/common/wrappers/network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom neutronclient.common import exceptions as neutron_exceptions\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services.network import net_utils\nfrom rally_openstack.common.services.network import neutron\n\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\ndef generate_cidr(start_cidr=\"10.2.0.0/24\"):\n \"\"\"Generate next CIDR for network or subnet, without IP overlapping.\n\n This is process and thread safe, because `cidr_incr' points to\n value stored directly in RAM. This guarantees that CIDRs will be\n serial and unique even under hard multiprocessing/threading load.\n\n :param start_cidr: start CIDR str\n :returns: next available CIDR str\n \"\"\"\n ip_version, cidr = net_utils.generate_cidr(start_cidr=start_cidr)\n return cidr\n\n\nclass NetworkWrapperException(exceptions.RallyException):\n error_code = 532\n msg_fmt = \"%(message)s\"\n\n\nclass NetworkWrapper(object, metaclass=abc.ABCMeta):\n \"\"\"Base class for network service implementations.\n\n We actually have two network services implementations, with different API:\n NovaNetwork and Neutron. The idea is (at least to try) to use unified\n service, which hides most differences and routines behind the scenes.\n This allows to significantly re-use and simplify code.\n \"\"\"\n START_CIDR = \"10.2.0.0/24\"\n START_IPV6_CIDR = \"dead:beaf::/64\"\n SERVICE_IMPL = None\n\n def __init__(self, clients, owner, config=None):\n \"\"\"Returns available network wrapper instance.\n\n :param clients: rally.plugins.openstack.osclients.Clients instance\n :param owner: The object that owns resources created by this\n wrapper instance. It will be used to generate\n random names, so must implement\n rally.common.utils.RandomNameGeneratorMixin\n :param config: The configuration of the network\n wrapper. Currently only two config options are\n recognized, 'start_cidr' and 'start_ipv6_cidr'.\n :returns: NetworkWrapper subclass instance\n \"\"\"\n self.clients = clients\n if hasattr(clients, self.SERVICE_IMPL):\n self.client = getattr(clients, self.SERVICE_IMPL)()\n else:\n self.client = clients(self.SERVICE_IMPL)\n self.config = config or {}\n self.owner = owner\n self.start_cidr = self.config.get(\"start_cidr\", self.START_CIDR)\n self.start_ipv6_cidr = self.config.get(\n \"start_ipv6_cidr\", self.START_IPV6_CIDR)\n\n @abc.abstractmethod\n def create_network(self):\n \"\"\"Create network.\"\"\"\n\n @abc.abstractmethod\n def delete_network(self):\n \"\"\"Delete network.\"\"\"\n\n @abc.abstractmethod\n def list_networks(self):\n \"\"\"List networks.\"\"\"\n\n @abc.abstractmethod\n def create_floating_ip(self):\n \"\"\"Create floating IP.\"\"\"\n\n @abc.abstractmethod\n def delete_floating_ip(self):\n \"\"\"Delete floating IP.\"\"\"\n\n @abc.abstractmethod\n def supports_extension(self):\n \"\"\"Checks whether a network extension is supported.\"\"\"\n\n\nclass NeutronWrapper(NetworkWrapper):\n SERVICE_IMPL = consts.Service.NEUTRON\n SUBNET_IP_VERSION = 4\n SUBNET_IPV6_VERSION = 6\n LB_METHOD = \"ROUND_ROBIN\"\n LB_PROTOCOL = \"HTTP\"\n\n def __init__(self, *args, **kwargs):\n super(NeutronWrapper, self).__init__(*args, **kwargs)\n\n class _SingleClientWrapper(object):\n def neutron(_self):\n return self.client\n\n @property\n def credential(_self):\n return self.clients.credential\n\n self.neutron = neutron.NeutronService(\n clients=_SingleClientWrapper(),\n name_generator=self.owner.generate_random_name,\n atomic_inst=getattr(self.owner, \"_atomic_actions\", [])\n )\n\n @property\n def external_networks(self):\n return self.neutron.list_networks(router_external=True)\n\n @property\n def ext_gw_mode_enabled(self):\n \"\"\"Determine if the ext-gw-mode extension is enabled.\n\n Without this extension, we can't pass the enable_snat parameter.\n \"\"\"\n return self.neutron.supports_extension(\"ext-gw-mode\", silent=True)\n\n def get_network(self, net_id=None, name=None):\n net = None\n try:\n if net_id:\n net = self.neutron.get_network(net_id)\n else:\n networks = self.neutron.list_networks(name=name)\n if networks:\n net = networks[0]\n except neutron_exceptions.NeutronClientException:\n pass\n\n if net:\n return {\"id\": net[\"id\"],\n \"name\": net[\"name\"],\n \"tenant_id\": net.get(\"tenant_id\",\n net.get(\"project_id\", None)),\n \"status\": net[\"status\"],\n \"external\": net.get(\"router:external\", False),\n \"subnets\": net.get(\"subnets\", []),\n \"router_id\": None}\n else:\n raise NetworkWrapperException(\n \"Network not found: %s\" % (name or net_id))\n\n def create_router(self, external=False, **kwargs):\n \"\"\"Create neutron router.\n\n :param external: bool, whether to set setup external_gateway_info\n :param **kwargs: POST /v2.0/routers request options\n :returns: neutron router dict\n \"\"\"\n kwargs.pop(\"name\", None)\n if \"tenant_id\" in kwargs and \"project_id\" not in kwargs:\n kwargs[\"project_id\"] = kwargs.pop(\"tenant_id\")\n\n return self.neutron.create_router(\n discover_external_gw=external, **kwargs)\n\n def create_v1_pool(self, tenant_id, subnet_id, **kwargs):\n \"\"\"Create LB Pool (v1).\n\n :param tenant_id: str, pool tenant id\n :param subnet_id: str, neutron subnet-id\n :param **kwargs: extra options\n :returns: neutron lb-pool dict\n \"\"\"\n pool_args = {\n \"pool\": {\n \"tenant_id\": tenant_id,\n \"name\": self.owner.generate_random_name(),\n \"subnet_id\": subnet_id,\n \"lb_method\": kwargs.get(\"lb_method\", self.LB_METHOD),\n \"protocol\": kwargs.get(\"protocol\", self.LB_PROTOCOL)\n }\n }\n return self.client.create_pool(pool_args)\n\n def _generate_cidr(self, ip_version=4):\n # TODO(amaretskiy): Generate CIDRs unique for network, not cluster\n ip_version, cidr = net_utils.generate_cidr(\n start_cidr=self.start_cidr if ip_version == 4\n else self.start_ipv6_cidr)\n return cidr\n\n def _create_network_infrastructure(self, tenant_id, **kwargs):\n \"\"\"Create network.\n\n The following keyword arguments are accepted:\n\n * add_router: Deprecated, please use router_create_args instead.\n Create an external router and add an interface to each\n subnet created. Default: False\n * subnets_num: Number of subnets to create per network. Default: 0\n * dualstack: Whether subnets should be of both IPv4 and IPv6\n * dns_nameservers: Nameservers for each subnet. Default:\n 8.8.8.8, 8.8.4.4\n * network_create_args: Additional network creation arguments.\n * router_create_args: Additional router creation arguments.\n\n :param tenant_id: str, tenant ID\n :param kwargs: Additional options, left open-ended for compatbilitiy.\n See above for recognized keyword args.\n :returns: dict, network data\n \"\"\"\n network_args = dict(kwargs.get(\"network_create_args\", {}))\n network_args[\"project_id\"] = tenant_id\n\n router_args = dict(kwargs.get(\"router_create_args\", {}))\n add_router = kwargs.get(\"add_router\", False)\n if not (router_args or add_router):\n router_args = None\n else:\n router_args[\"project_id\"] = tenant_id\n router_args[\"discover_external_gw\"] = router_args.pop(\n \"external\", False) or add_router\n subnet_create_args = {\"project_id\": tenant_id}\n if \"dns_nameservers\" in kwargs:\n subnet_create_args[\"dns_nameservers\"] = kwargs[\"dns_nameservers\"]\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=network_args,\n router_create_args=router_args,\n subnet_create_args=subnet_create_args,\n subnets_dualstack=kwargs.get(\"dualstack\", False),\n subnets_count=kwargs.get(\"subnets_num\", 0)\n )\n network = net_topo[\"network\"]\n subnets = net_topo[\"subnets\"]\n if net_topo[\"routers\"]:\n router = net_topo[\"routers\"][0]\n else:\n router = None\n\n return {\n \"network\": {\n \"id\": network[\"id\"],\n \"name\": network[\"name\"],\n \"status\": network[\"status\"],\n \"subnets\": [s[\"id\"] for s in subnets],\n \"external\": network.get(\"router:external\", False),\n \"router_id\": router and router[\"id\"] or None,\n \"tenant_id\": tenant_id\n },\n \"subnets\": subnets,\n \"router\": router\n }\n\n def create_network(self, tenant_id, **kwargs):\n \"\"\"Create network.\n\n The following keyword arguments are accepted:\n\n * add_router: Deprecated, please use router_create_args instead.\n Create an external router and add an interface to each\n subnet created. Default: False\n * subnets_num: Number of subnets to create per network. Default: 0\n * dualstack: Whether subnets should be of both IPv4 and IPv6\n * dns_nameservers: Nameservers for each subnet. Default:\n 8.8.8.8, 8.8.4.4\n * network_create_args: Additional network creation arguments.\n * router_create_args: Additional router creation arguments.\n\n :param tenant_id: str, tenant ID\n :param kwargs: Additional options, left open-ended for compatbilitiy.\n See above for recognized keyword args.\n :returns: dict, network data\n \"\"\"\n return self._create_network_infrastructure(\n tenant_id, **kwargs)[\"network\"]\n\n def delete_v1_pool(self, pool_id):\n \"\"\"Delete LB Pool (v1)\n\n :param pool_id: str, Lb-Pool-id\n \"\"\"\n self.client.delete_pool(pool_id)\n\n def delete_network(self, network):\n \"\"\"Delete network\n\n :param network: network object returned by create_network method\n \"\"\"\n\n router = {\"id\": network[\"router_id\"]} if network[\"router_id\"] else None\n # delete_network_topology uses only IDs, but let's transmit as much as\n # possible info\n topo = {\n \"network\": {\n \"id\": network[\"id\"],\n \"name\": network[\"name\"],\n \"status\": network[\"status\"],\n \"subnets\": network[\"subnets\"],\n \"router:external\": network[\"external\"]\n },\n \"subnets\": [{\"id\": s} for s in network[\"subnets\"]],\n \"routers\": [router] if router else []\n }\n\n self.neutron.delete_network_topology(topo)\n\n def _delete_subnet(self, subnet_id):\n self.neutron.delete_subnet(subnet_id)\n\n def list_networks(self):\n return self.neutron.list_networks()\n\n def create_port(self, network_id, **kwargs):\n \"\"\"Create neutron port.\n\n :param network_id: neutron network id\n :param **kwargs: POST /v2.0/ports request options\n :returns: neutron port dict\n \"\"\"\n return self.neutron.create_port(network_id=network_id, **kwargs)\n\n def create_floating_ip(self, ext_network=None,\n tenant_id=None, port_id=None, **kwargs):\n \"\"\"Create Neutron floating IP.\n\n :param ext_network: floating network name or dict\n :param tenant_id: str tenant id\n :param port_id: str port id\n :param **kwargs: for compatibility, not used here\n :returns: floating IP dict\n \"\"\"\n if not tenant_id:\n raise ValueError(\"Missed tenant_id\")\n try:\n fip = self.neutron.create_floatingip(\n floating_network=ext_network, project_id=tenant_id,\n port_id=port_id)\n except (exceptions.NotFoundException,\n exceptions.GetResourceFailure) as e:\n raise NetworkWrapperException(str(e)) from None\n return {\"id\": fip[\"id\"], \"ip\": fip[\"floating_ip_address\"]}\n\n def delete_floating_ip(self, fip_id, **kwargs):\n \"\"\"Delete floating IP.\n\n :param fip_id: int floating IP id\n :param **kwargs: for compatibility, not used here\n \"\"\"\n self.neutron.delete_floatingip(fip_id)\n\n def supports_extension(self, extension):\n \"\"\"Check whether a neutron extension is supported\n\n :param extension: str, neutron extension\n :returns: result tuple\n :rtype: (bool, string)\n \"\"\"\n try:\n self.neutron.supports_extension(extension)\n except exceptions.NotFoundException as e:\n return False, str(e)\n\n return True, \"\"\n\n\ndef wrap(clients, owner, config=None):\n \"\"\"Returns available network wrapper instance.\n\n :param clients: rally.plugins.openstack.osclients.Clients instance\n :param owner: The object that owns resources created by this\n wrapper instance. It will be used to generate random\n names, so must implement\n rally.common.utils.RandomNameGeneratorMixin\n :param config: The configuration of the network wrapper. Currently\n only one config option is recognized, 'start_cidr',\n and only for Nova network.\n :returns: NetworkWrapper subclass instance\n \"\"\"\n if hasattr(clients, \"services\"):\n services = clients.services()\n else:\n services = clients(\"services\")\n\n if consts.Service.NEUTRON in services.values():\n return NeutronWrapper(clients, owner, config=config)\n LOG.warning(\"NovaNetworkWrapper is deprecated since 0.9.0\")\n" }, { "alpha_fraction": 0.5817787051200867, "alphanum_fraction": 0.5917497873306274, "avg_line_length": 39.50814437866211, "blob_id": "070e040da1a90f1777a3aaef8a4b73e0c547092d", "content_id": "65ce41a431f143a2a1638ef9dd1caf56b3741962", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12436, "license_type": "permissive", "max_line_length": 78, "num_lines": 307, "path": "/tests/unit/task/scenarios/vm/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport subprocess\nfrom unittest import mock\n\nimport netaddr\n\nfrom rally.common import cfg\nfrom rally_openstack.task.scenarios.vm import utils\nfrom tests.unit import test\n\nVMTASKS_UTILS = \"rally_openstack.task.scenarios.vm.utils\"\nCONF = cfg.CONF\n\n\nclass VMScenarioTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.open\" % VMTASKS_UTILS,\n side_effect=mock.mock_open(), create=True)\n def test__run_command_over_ssh_script_file(self, mock_open):\n mock_ssh = mock.MagicMock()\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._run_command_over_ssh(\n mock_ssh,\n {\n \"script_file\": \"foobar\",\n \"interpreter\": [\"interpreter\", \"interpreter_arg\"],\n \"command_args\": [\"arg1\", \"arg2\"]\n }\n )\n mock_ssh.execute.assert_called_once_with(\n [\"interpreter\", \"interpreter_arg\", \"arg1\", \"arg2\"],\n stdin=mock_open.side_effect())\n mock_open.assert_called_once_with(\"foobar\", \"rb\")\n\n @mock.patch(\"%s.io.StringIO\" % VMTASKS_UTILS)\n def test__run_command_over_ssh_script_inline(self, mock_string_io):\n mock_ssh = mock.MagicMock()\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._run_command_over_ssh(\n mock_ssh,\n {\n \"script_inline\": \"foobar\",\n \"interpreter\": [\"interpreter\", \"interpreter_arg\"],\n \"command_args\": [\"arg1\", \"arg2\"]\n }\n )\n mock_ssh.execute.assert_called_once_with(\n [\"interpreter\", \"interpreter_arg\", \"arg1\", \"arg2\"],\n stdin=mock_string_io.return_value)\n mock_string_io.assert_called_once_with(\"foobar\")\n\n def test__run_command_over_ssh_remote_path(self):\n mock_ssh = mock.MagicMock()\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._run_command_over_ssh(\n mock_ssh,\n {\n \"remote_path\": [\"foo\", \"bar\"],\n \"command_args\": [\"arg1\", \"arg2\"]\n }\n )\n mock_ssh.execute.assert_called_once_with(\n [\"foo\", \"bar\", \"arg1\", \"arg2\"],\n stdin=None)\n\n def test__run_command_over_ssh_remote_path_copy(self):\n mock_ssh = mock.MagicMock()\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._run_command_over_ssh(\n mock_ssh,\n {\n \"remote_path\": [\"foo\", \"bar\"],\n \"local_path\": \"/bin/false\",\n \"command_args\": [\"arg1\", \"arg2\"]\n }\n )\n mock_ssh.put_file.assert_called_once_with(\n \"/bin/false\", \"bar\", mode=0o755\n )\n mock_ssh.execute.assert_called_once_with(\n [\"foo\", \"bar\", \"arg1\", \"arg2\"],\n stdin=None)\n\n def test__wait_for_ssh(self):\n ssh = mock.MagicMock()\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._wait_for_ssh(ssh)\n ssh.wait.assert_called_once_with(120, 1)\n\n def test__wait_for_ping(self):\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario._ping_ip_address = mock.Mock(return_value=True)\n vm_scenario._wait_for_ping(netaddr.IPAddress(\"1.2.3.4\"))\n self.mock_wait_for_status.mock.assert_called_once_with(\n utils.Host(\"1.2.3.4\"),\n ready_statuses=[utils.Host.ICMP_UP_STATUS],\n update_resource=utils.Host.update_status,\n timeout=CONF.openstack.vm_ping_timeout,\n check_interval=CONF.openstack.vm_ping_poll_interval)\n\n @mock.patch(VMTASKS_UTILS + \".VMScenario._run_command_over_ssh\")\n @mock.patch(\"rally.utils.sshutils.SSH\")\n def test__run_command(self, mock_sshutils_ssh,\n mock_vm_scenario__run_command_over_ssh):\n vm_scenario = utils.VMScenario(self.context)\n vm_scenario.context = {\"user\": {\"keypair\": {\"private\": \"ssh\"}}}\n vm_scenario._run_command(\"1.2.3.4\", 22, \"username\", \"password\",\n command={\"script_file\": \"foo\",\n \"interpreter\": \"bar\"})\n\n mock_sshutils_ssh.assert_called_once_with(\n \"username\", \"1.2.3.4\",\n port=22, pkey=\"ssh\", password=\"password\")\n mock_sshutils_ssh.return_value.wait.assert_called_once_with(120, 1)\n mock_vm_scenario__run_command_over_ssh.assert_called_once_with(\n mock_sshutils_ssh.return_value,\n {\"script_file\": \"foo\", \"interpreter\": \"bar\"})\n\n def get_scenario(self):\n server = mock.Mock(\n networks={\"foo_net\": \"foo_data\"},\n addresses={\"foo_net\": [{\"addr\": \"foo_ip\"}]},\n tenant_id=\"foo_tenant\"\n )\n scenario = utils.VMScenario(\n self.context,\n clients=mock.MagicMock(credential=mock.MagicMock(api_info={})))\n\n scenario._boot_server = mock.Mock(return_value=server)\n scenario._delete_server = mock.Mock()\n scenario._associate_floating_ip = mock.Mock()\n scenario._wait_for_ping = mock.Mock()\n\n return scenario, server\n\n def test__boot_server_with_fip_without_networks(self):\n scenario, server = self.get_scenario()\n server.networks = {}\n self.assertRaises(RuntimeError,\n scenario._boot_server_with_fip,\n \"foo_image\", \"foo_flavor\", foo_arg=\"foo_value\")\n scenario._boot_server.assert_called_once_with(\n \"foo_image\", \"foo_flavor\",\n foo_arg=\"foo_value\", auto_assign_nic=True)\n\n def test__boot_server_with_fixed_ip(self):\n scenario, server = self.get_scenario()\n scenario._attach_floating_ip = mock.Mock()\n server, ip = scenario._boot_server_with_fip(\n \"foo_image\", \"foo_flavor\", floating_network=\"ext_network\",\n use_floating_ip=False, foo_arg=\"foo_value\")\n\n self.assertEqual(ip, {\"ip\": \"foo_ip\", \"id\": None,\n \"is_floating\": False})\n scenario._boot_server.assert_called_once_with(\n \"foo_image\", \"foo_flavor\",\n auto_assign_nic=True, foo_arg=\"foo_value\")\n self.assertEqual(scenario._attach_floating_ip.mock_calls, [])\n\n def test__boot_server_with_fip(self):\n scenario, server = self.get_scenario()\n scenario._attach_floating_ip = mock.Mock(\n return_value={\"id\": \"foo_id\", \"ip\": \"foo_ip\"})\n server, ip = scenario._boot_server_with_fip(\n \"foo_image\", \"foo_flavor\", floating_network=\"ext_network\",\n use_floating_ip=True, foo_arg=\"foo_value\")\n self.assertEqual(ip, {\"ip\": \"foo_ip\", \"id\": \"foo_id\",\n \"is_floating\": True})\n\n scenario._boot_server.assert_called_once_with(\n \"foo_image\", \"foo_flavor\",\n auto_assign_nic=True, foo_arg=\"foo_value\")\n scenario._attach_floating_ip.assert_called_once_with(\n server, \"ext_network\")\n\n def test__delete_server_with_fixed_ip(self):\n ip = {\"ip\": \"foo_ip\", \"id\": None, \"is_floating\": False}\n scenario, server = self.get_scenario()\n scenario._delete_floating_ip = mock.Mock()\n scenario._delete_server_with_fip(server, ip, force_delete=True)\n\n self.assertEqual(scenario._delete_floating_ip.mock_calls, [])\n scenario._delete_server.assert_called_once_with(server, force=True)\n\n def test__delete_server_with_fip(self):\n fip = {\"ip\": \"foo_ip\", \"id\": \"foo_id\", \"is_floating\": True}\n scenario, server = self.get_scenario()\n scenario._delete_floating_ip = mock.Mock()\n scenario._delete_server_with_fip(server, fip, force_delete=True)\n\n scenario._delete_floating_ip.assert_called_once_with(server, fip)\n scenario._delete_server.assert_called_once_with(server, force=True)\n\n def test__attach_floating_ip(self):\n scenario, server = self.get_scenario()\n nc = scenario._clients.neutron.return_value\n\n fip = {\"id\": \"foo_id\", \"floating_ip_address\": \"foo_ip\"}\n nc.create_floatingip.return_value = {\"floatingip\": fip}\n\n floating_network = {\"id\": \"floating-network-id\",\n \"name\": \"floating-network\"}\n scenario._attach_floating_ip(\n server, floating_network=floating_network)\n\n nc.create_floatingip.assert_called_once_with({\n \"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": floating_network[\"id\"]}\n })\n\n scenario._associate_floating_ip.assert_called_once_with(\n server, fip, fixed_address=fip[\"floating_ip_address\"])\n\n def test__delete_floating_ip(self):\n scenario, server = self.get_scenario()\n nc = scenario._clients.neutron.return_value\n\n _check_addr = mock.Mock(return_value=True)\n scenario.check_ip_address = mock.Mock(return_value=_check_addr)\n scenario._dissociate_floating_ip = mock.Mock()\n\n fip = {\"id\": \"foo_id\", \"ip\": \"foo_ip\"}\n scenario._delete_floating_ip(server, fip=fip)\n\n scenario.check_ip_address.assert_called_once_with(\n \"foo_ip\")\n _check_addr.assert_called_once_with(server)\n scenario._dissociate_floating_ip.assert_called_once_with(\n server, fip)\n nc.delete_floatingip.assert_called_once_with(\"foo_id\")\n\n\nclass HostTestCase(test.TestCase):\n\n @mock.patch(VMTASKS_UTILS + \".sys\")\n @mock.patch(\"subprocess.Popen\")\n def test__ping_ip_address_linux(self, mock_popen, mock_sys):\n mock_popen.return_value.returncode = 0\n mock_sys.platform = \"linux2\"\n\n host = utils.Host(\"1.2.3.4\")\n self.assertEqual(utils.Host.ICMP_UP_STATUS,\n utils.Host.update_status(host).status)\n\n mock_popen.assert_called_once_with(\n [\"ping\", \"-c1\", \"-w1\", str(host.ip)],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n mock_popen.return_value.wait.assert_called_once_with()\n\n @mock.patch(VMTASKS_UTILS + \".sys\")\n @mock.patch(\"subprocess.Popen\")\n def test__ping_ip_address_linux_ipv6(self, mock_popen, mock_sys):\n mock_popen.return_value.returncode = 0\n mock_sys.platform = \"linux2\"\n\n host = utils.Host(\"1ce:c01d:bee2:15:a5:900d:a5:11fe\")\n self.assertEqual(utils.Host.ICMP_UP_STATUS,\n utils.Host.update_status(host).status)\n\n mock_popen.assert_called_once_with(\n [\"ping6\", \"-c1\", \"-w1\", str(host.ip)],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n mock_popen.return_value.wait.assert_called_once_with()\n\n @mock.patch(VMTASKS_UTILS + \".sys\")\n @mock.patch(\"subprocess.Popen\")\n def test__ping_ip_address_other_os(self, mock_popen, mock_sys):\n mock_popen.return_value.returncode = 0\n mock_sys.platform = \"freebsd10\"\n\n host = utils.Host(\"1.2.3.4\")\n self.assertEqual(utils.Host.ICMP_UP_STATUS,\n utils.Host.update_status(host).status)\n\n mock_popen.assert_called_once_with(\n [\"ping\", \"-c1\", str(host.ip)],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n mock_popen.return_value.wait.assert_called_once_with()\n\n @mock.patch(VMTASKS_UTILS + \".sys\")\n @mock.patch(\"subprocess.Popen\")\n def test__ping_ip_address_other_os_ipv6(self, mock_popen, mock_sys):\n mock_popen.return_value.returncode = 0\n mock_sys.platform = \"freebsd10\"\n\n host = utils.Host(\"1ce:c01d:bee2:15:a5:900d:a5:11fe\")\n self.assertEqual(utils.Host.ICMP_UP_STATUS,\n utils.Host.update_status(host).status)\n\n mock_popen.assert_called_once_with(\n [\"ping6\", \"-c1\", str(host.ip)],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n mock_popen.return_value.wait.assert_called_once_with()\n" }, { "alpha_fraction": 0.5808805227279663, "alphanum_fraction": 0.5866666436195374, "avg_line_length": 39.15151596069336, "blob_id": "80cd63a355f81e07070cdb94716361d63ee95df7", "content_id": "f06c17265c55cf1581ce9ca28f4d631e88f9055c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3975, "license_type": "permissive", "max_line_length": 79, "num_lines": 99, "path": "/rally_openstack/task/contexts/swift/objects.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.swift import utils as swift_utils\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"swift_objects\", platform=\"openstack\", order=360)\nclass SwiftObjectGenerator(swift_utils.SwiftObjectMixin,\n context.OpenStackContext):\n \"\"\"Create containers and objects in each tenant.\"\"\"\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"containers_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"objects_per_container\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"object_size\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"resource_management_workers\": {\n \"type\": \"integer\",\n \"minimum\": 1\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"containers_per_tenant\": 1,\n \"objects_per_container\": 1,\n \"object_size\": 1024,\n \"resource_management_workers\": 30\n }\n\n def setup(self):\n \"\"\"Create containers and objects, using the broker pattern.\"\"\"\n threads = self.config[\"resource_management_workers\"]\n\n containers_per_tenant = self.config[\"containers_per_tenant\"]\n containers_num = len(self.context[\"tenants\"]) * containers_per_tenant\n LOG.debug(\"Creating %d containers using %d threads.\"\n % (containers_num, threads))\n containers_count = len(self._create_containers(containers_per_tenant,\n threads))\n if containers_count != containers_num:\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Failed to create the requested number of containers, \"\n \"expected %(expected)s but got %(actual)s.\"\n % {\"expected\": containers_num, \"actual\": containers_count})\n\n objects_per_container = self.config[\"objects_per_container\"]\n objects_num = containers_num * objects_per_container\n LOG.debug(\"Creating %d objects using %d threads.\"\n % (objects_num, threads))\n objects_count = len(self._create_objects(objects_per_container,\n self.config[\"object_size\"],\n threads))\n if objects_count != objects_num:\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Failed to create the requested number of objects, \"\n \"expected %(expected)s but got %(actual)s.\"\n % {\"expected\": objects_num, \"actual\": objects_count})\n\n def cleanup(self):\n \"\"\"Delete containers and objects, using the broker pattern.\"\"\"\n threads = self.config[\"resource_management_workers\"]\n\n self._delete_objects(threads)\n self._delete_containers(threads)\n" }, { "alpha_fraction": 0.5945584177970886, "alphanum_fraction": 0.5972346067428589, "avg_line_length": 40.51852035522461, "blob_id": "1216b3acb3fd06498e4eb4c6c710d9a09543f219", "content_id": "728d1a7797ad70972abc5b545532925d459c5592", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4484, "license_type": "permissive", "max_line_length": 79, "num_lines": 108, "path": "/rally_openstack/task/contexts/quotas/quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Dassault Systemes\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.quotas import cinder_quotas\nfrom rally_openstack.task.contexts.quotas import designate_quotas\nfrom rally_openstack.task.contexts.quotas import manila_quotas\nfrom rally_openstack.task.contexts.quotas import neutron_quotas\nfrom rally_openstack.task.contexts.quotas import nova_quotas\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"quotas\", platform=\"openstack\", order=300)\nclass Quotas(context.OpenStackContext):\n \"\"\"Sets OpenStack Tenants quotas.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"additionalProperties\": False,\n \"properties\": {\n \"nova\": nova_quotas.NovaQuotas.QUOTAS_SCHEMA,\n \"cinder\": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA,\n \"manila\": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA,\n \"designate\": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA,\n \"neutron\": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA\n }\n }\n\n def __init__(self, ctx):\n super(Quotas, self).__init__(ctx)\n self.clients = osclients.Clients(\n self.context[\"admin\"][\"credential\"])\n\n self.manager = {\n \"nova\": nova_quotas.NovaQuotas(self.clients),\n \"cinder\": cinder_quotas.CinderQuotas(self.clients),\n \"manila\": manila_quotas.ManilaQuotas(self.clients),\n \"designate\": designate_quotas.DesignateQuotas(self.clients),\n \"neutron\": neutron_quotas.NeutronQuotas(self.clients)\n }\n self.original_quotas = []\n\n def _service_has_quotas(self, service):\n return len(self.config.get(service, {})) > 0\n\n def setup(self):\n for tenant_id in self.context[\"tenants\"]:\n for service in self.manager:\n if self._service_has_quotas(service):\n # NOTE(andreykurilin): in case of existing users it is\n # required to restore original quotas instead of reset\n # to default ones.\n if \"existing_users\" in self.context:\n self.original_quotas.append(\n (service, tenant_id,\n self.manager[service].get(tenant_id)))\n self.manager[service].update(tenant_id,\n **self.config[service])\n\n def _restore_quotas(self):\n for service, tenant_id, quotas in self.original_quotas:\n try:\n self.manager[service].update(tenant_id, **quotas)\n except Exception as e:\n LOG.warning(\"Failed to restore quotas for tenant %(tenant_id)s\"\n \" in service %(service)s \\n reason: %(exc)s\" %\n {\"tenant_id\": tenant_id, \"service\": service,\n \"exc\": e})\n\n def _delete_quotas(self):\n for service in self.manager:\n if self._service_has_quotas(service):\n for tenant_id in self.context[\"tenants\"]:\n try:\n self.manager[service].delete(tenant_id)\n except Exception as e:\n LOG.warning(\n \"Failed to remove quotas for tenant %(tenant)s \"\n \"in service %(service)s reason: %(e)s\" %\n {\"tenant\": tenant_id, \"service\": service, \"e\": e})\n\n def cleanup(self):\n if self.original_quotas:\n # existing users\n self._restore_quotas()\n else:\n self._delete_quotas()\n" }, { "alpha_fraction": 0.6002558469772339, "alphanum_fraction": 0.6114486455917358, "avg_line_length": 37.13414764404297, "blob_id": "6c982d157314c105fa180b1a4a183c377b02b8ba", "content_id": "3eca7c7218a5f0f11685050b764a45caad6e92d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3127, "license_type": "permissive", "max_line_length": 79, "num_lines": 82, "path": "/tests/unit/task/scenarios/ironic/test_nodes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.ironic import nodes\nfrom tests.unit import test\n\n\nclass IronicNodesTestCase(test.ScenarioTestCase):\n\n def test_create_and_list_node(self):\n class Node(object):\n def __init__(self, name):\n self.name = name\n\n scenario = nodes.CreateAndListNode(self.context)\n scenario._create_node = mock.Mock(return_value=Node(\"node_obj1\"))\n scenario._list_nodes = mock.Mock(\n return_value=[Node(name)\n for name in (\"node_obj1\", \"node_obj2\", \"node_obj3\")])\n driver = \"foo\"\n properties = \"fake_prop\"\n fake_params = {\n \"sort_dir\": \"foo1\",\n \"associated\": \"foo2\",\n \"detail\": True,\n \"maintenance\": \"foo5\",\n \"fake_parameter1\": \"foo7\"\n }\n\n # Positive case:\n scenario.run(driver, properties, **fake_params)\n\n scenario._create_node.assert_called_once_with(driver, properties,\n fake_parameter1=\"foo7\")\n scenario._list_nodes.assert_called_once_with(\n sort_dir=\"foo1\", associated=\"foo2\", detail=True,\n maintenance=\"foo5\")\n\n # Negative case: created node not in the list of available nodes\n scenario._create_node = mock.Mock(uuid=\"foooo\")\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, driver, properties, **fake_params)\n\n scenario._create_node.assert_called_with(driver, properties,\n fake_parameter1=\"foo7\")\n scenario._list_nodes.assert_called_with(\n sort_dir=\"foo1\", associated=\"foo2\", detail=True,\n maintenance=\"foo5\")\n\n def test_create_and_delete_node(self):\n fake_node = mock.Mock(uuid=\"fake_uuid\")\n scenario = nodes.CreateAndDeleteNode(self.context)\n scenario._create_node = mock.Mock(return_value=fake_node)\n scenario._delete_node = mock.Mock()\n\n driver = \"fake\"\n properties = \"fake_prop\"\n\n scenario.run(driver, properties, fake_parameter1=\"fake1\",\n fake_parameter2=\"fake2\")\n scenario._create_node.assert_called_once_with(\n driver, properties, fake_parameter1=\"fake1\",\n fake_parameter2=\"fake2\")\n\n scenario._delete_node.assert_called_once_with(\n scenario._create_node.return_value)\n" }, { "alpha_fraction": 0.7484662532806396, "alphanum_fraction": 0.754601240158081, "avg_line_length": 31.600000381469727, "blob_id": "aa6c082679f2401334b7406f4642139791b814d6", "content_id": "f51dd55c92ad5fd608645e9b1a1feb4933d26f7f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 163, "license_type": "permissive", "max_line_length": 79, "num_lines": 5, "path": "/samples/tasks/support/instance_test.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\necho \"The script was absorbed by VMTasks.dd_load_test scenario.\"\n# If we do not fail it, no one will found the warning message about deprecation\nexit 1\n" }, { "alpha_fraction": 0.6079714894294739, "alphanum_fraction": 0.6106133460998535, "avg_line_length": 45.932613372802734, "blob_id": "c4b563e25240fba4103d2d1c0d82924b20d355ad", "content_id": "08f293559f0817aef34b8c27d48cdc93b1c64a3a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17412, "license_type": "permissive", "max_line_length": 78, "num_lines": 371, "path": "/rally_openstack/task/scenarios/glance/images.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services.image import glance_v2\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\nLOG = logging.getLogger(__name__)\n\n\"\"\"Scenarios for Glance images.\"\"\"\n\n\nclass GlanceBasic(scenario.OpenStackScenario):\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(GlanceBasic, self).__init__(context, admin_clients, clients)\n if hasattr(self, \"_admin_clients\"):\n self.admin_glance = image.Image(\n self._admin_clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n if hasattr(self, \"_clients\"):\n self.glance = image.Image(\n self._clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n\n\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_list_image\",\n platform=\"openstack\")\nclass CreateAndListImage(GlanceBasic):\n\n def run(self, container_format, image_location, disk_format,\n visibility=\"private\", min_disk=0, min_ram=0, properties=None):\n \"\"\"Create an image and then list all images.\n\n Measure the \"glance image-list\" command performance.\n\n If you have only 1 user in your context, you will\n add 1 image on every iteration. So you will have more\n and more images and will be able to measure the\n performance of the \"glance image-list\" command depending on\n the number of images owned by users.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: A dict of image metadata properties to set\n on the image\n \"\"\"\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n self.assertTrue(image)\n image_list = self.glance.list_images()\n self.assertIn(image.id, [i.id for i in image_list])\n\n\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_get_image\",\n platform=\"openstack\")\nclass CreateAndGetImage(GlanceBasic):\n\n def run(self, container_format, image_location, disk_format,\n visibility=\"private\", min_disk=0, min_ram=0, properties=None):\n \"\"\"Create and get detailed information of an image.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: A dict of image metadata properties to set\n on the image\n \"\"\"\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n self.assertTrue(image)\n image_info = self.glance.get_image(image)\n self.assertEqual(image.id, image_info.id)\n\n\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"GlanceImages.list_images\",\n platform=\"openstack\")\nclass ListImages(GlanceBasic):\n\n def run(self):\n \"\"\"List all images.\n\n This simple scenario tests the glance image-list command by listing\n all the images.\n\n Suppose if we have 2 users in context and each has 2 images\n uploaded for them we will be able to test the performance of\n glance image-list command in this case.\n \"\"\"\n self.glance.list_images()\n\n\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_delete_image\",\n platform=\"openstack\")\nclass CreateAndDeleteImage(GlanceBasic):\n\n def run(self, container_format, image_location, disk_format,\n visibility=\"private\", min_disk=0, min_ram=0, properties=None):\n \"\"\"Create and then delete an image.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: A dict of image metadata properties to set\n on the image\n \"\"\"\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n self.glance.delete_image(image.id)\n\n\[email protected](flavor={\"type\": \"nova_flavor\"},\n image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](\"restricted_parameters\", param_names=[\"image_name\", \"name\"])\[email protected](\"flavor_exists\", param_name=\"flavor\")\[email protected](\"required_services\", services=[consts.Service.GLANCE,\n consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\", \"nova\"]},\n name=\"GlanceImages.create_image_and_boot_instances\",\n platform=\"openstack\")\nclass CreateImageAndBootInstances(GlanceBasic, nova_utils.NovaScenario):\n\n def run(self, container_format, image_location, disk_format,\n flavor, number_instances, visibility=\"private\", min_disk=0,\n min_ram=0, properties=None, boot_server_kwargs=None):\n \"\"\"Create an image and boot several instances from it.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: A dict of image metadata properties to set\n on the image\n :param flavor: Nova flavor to be used to launch an instance\n :param number_instances: number of Nova servers to boot\n :param boot_server_kwargs: optional parameters to boot server\n \"\"\"\n\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n\n self._boot_servers(image.id, flavor, number_instances,\n **(boot_server_kwargs or {}))\n\n\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_update_image\",\n platform=\"openstack\")\nclass CreateAndUpdateImage(GlanceBasic):\n\n def run(self, container_format, image_location, disk_format,\n remove_props=None, visibility=\"private\", create_min_disk=0,\n create_min_ram=0, create_properties=None,\n update_min_disk=0, update_min_ram=0):\n \"\"\"Create an image then update it.\n\n Measure the \"glance image-create\" and \"glance image-update\" commands\n performance.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param remove_props: List of property names to remove.\n (It is only supported by Glance v2.)\n :param visibility: The access permission for the created image\n :param create_min_disk: The min disk of created images\n :param create_min_ram: The min ram of created images\n :param create_properties: A dict of image metadata properties to set\n on the created image\n :param update_min_disk: The min disk of updated images\n :param update_min_ram: The min ram of updated images\n \"\"\"\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=create_min_disk,\n min_ram=create_min_ram,\n properties=create_properties)\n\n self.glance.update_image(image.id,\n min_disk=update_min_disk,\n min_ram=update_min_ram,\n remove_props=remove_props)\n\n\[email protected](\"required_services\", services=(consts.Service.GLANCE, ))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_api_versions\", component=\"glance\", versions=[\"2\"])\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_deactivate_image\",\n platform=\"openstack\")\nclass CreateAndDeactivateImage(GlanceBasic):\n def run(self, container_format, image_location, disk_format,\n visibility=\"private\", min_disk=0, min_ram=0):\n \"\"\"Create an image, then deactivate it.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n \"\"\"\n service = glance_v2.GlanceV2Service(self._clients,\n self.generate_random_name,\n atomic_inst=self.atomic_actions())\n\n image = service.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram)\n service.deactivate_image(image.id)\n\n\[email protected](\"enum\", param_name=\"container_format\",\n values=[\"ami\", \"ari\", \"aki\", \"bare\", \"ovf\"])\[email protected](\"enum\", param_name=\"disk_format\",\n values=[\"ami\", \"ari\", \"aki\", \"vhd\", \"vmdk\", \"raw\",\n \"qcow2\", \"vdi\", \"iso\"])\[email protected](image_location={\"type\": \"path_or_url\"},\n kwargs={\"type\": \"glance_image_args\"})\[email protected](\"required_services\", services=[consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"glance\"]},\n name=\"GlanceImages.create_and_download_image\",\n platform=\"openstack\")\nclass CreateAndDownloadImage(GlanceBasic):\n\n def run(self, container_format, image_location, disk_format,\n visibility=\"private\", min_disk=0, min_ram=0, properties=None):\n \"\"\"Create an image, then download data of the image.\n\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param image_location: image file location\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso\n :param visibility: The access permission for the created image\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: A dict of image metadata properties to set\n on the image\n \"\"\"\n image = self.glance.create_image(\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n\n self.glance.download_image(image.id)\n" }, { "alpha_fraction": 0.5650256872177124, "alphanum_fraction": 0.5735978484153748, "avg_line_length": 32.19512176513672, "blob_id": "2403d373ee3f0282abb36eeaaab3bddf16128c24", "content_id": "47ffa6d4ef8a0ac2a88f2f1b9bd670c92941a8dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4083, "license_type": "permissive", "max_line_length": 78, "num_lines": 123, "path": "/tests/unit/task/contexts/nova/test_flavors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nfrom novaclient import exceptions as nova_exceptions\n\nfrom rally_openstack.task.contexts.nova import flavors\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.nova\"\n\n\nclass FlavorsGeneratorTestCase(test.TestCase):\n\n def setUp(self):\n super(FlavorsGeneratorTestCase, self).setUp()\n self.context = {\n \"config\": {\n \"flavors\": [{\n \"name\": \"flavor_name\",\n \"ram\": 2048,\n \"disk\": 10,\n \"vcpus\": 3,\n \"ephemeral\": 3,\n \"swap\": 5,\n \"extra_specs\": {\n \"key\": \"value\"\n }\n }]\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"task\": mock.MagicMock(),\n }\n\n @mock.patch(\"%s.flavors.osclients.Clients\" % CTX)\n def test_setup(self, mock_clients):\n # Setup and mock\n mock_create = mock_clients().nova().flavors.create\n mock_create().to_dict.return_value = {\"flavor_key\": \"flavor_value\"}\n\n # Run\n flavors_ctx = flavors.FlavorsGenerator(self.context)\n flavors_ctx.setup()\n\n # Assertions\n self.assertEqual({\"flavor_name\": {\"flavor_key\": \"flavor_value\"}},\n flavors_ctx.context[\"flavors\"])\n\n mock_clients.assert_called_with(self.context[\"admin\"][\"credential\"])\n\n mock_create.assert_called_with(\n name=\"flavor_name\", ram=2048, vcpus=3,\n disk=10, ephemeral=3, swap=5)\n mock_create().set_keys.assert_called_with({\"key\": \"value\"})\n mock_create().to_dict.assert_called_with()\n\n @mock.patch(\"%s.flavors.osclients.Clients\" % CTX)\n def test_setup_failexists(self, mock_clients):\n # Setup and mock\n new_context = copy.deepcopy(self.context)\n new_context[\"flavors\"] = {}\n\n mock_flavor_create = mock_clients().nova().flavors.create\n\n exception = nova_exceptions.Conflict(\"conflict\")\n mock_flavor_create.side_effect = exception\n\n # Run\n flavors_ctx = flavors.FlavorsGenerator(self.context)\n flavors_ctx.setup()\n\n # Assertions\n self.assertEqual(new_context, flavors_ctx.context)\n\n mock_clients.assert_called_with(self.context[\"admin\"][\"credential\"])\n\n mock_flavor_create.assert_called_once_with(\n name=\"flavor_name\", ram=2048, vcpus=3,\n disk=10, ephemeral=3, swap=5)\n\n @mock.patch(\"%s.flavors.rutils.make_name_matcher\" % CTX)\n @mock.patch(\"%s.flavors.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup, mock_make_name_matcher):\n # Setup and mock\n real_context = {\n \"config\": {\n \"flavors\": [\n {\"name\": \"flavor_name\"},\n ]\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"task\": mock.MagicMock(),\n }\n\n # Run\n flavors_ctx = flavors.FlavorsGenerator(real_context)\n flavors_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"nova.flavors\"],\n admin=real_context[\"admin\"],\n superclass=mock_make_name_matcher.return_value,\n task_id=flavors_ctx.get_owner_id())\n\n mock_make_name_matcher.assert_called_once_with(\"flavor_name\")\n" }, { "alpha_fraction": 0.5897619128227234, "alphanum_fraction": 0.592380940914154, "avg_line_length": 34.74468231201172, "blob_id": "6b5aae684edd805d74f83eb7424d70b470925668", "content_id": "79c31175180286de37041cb66ea99c80b9bafa42", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8400, "license_type": "permissive", "max_line_length": 78, "num_lines": 235, "path": "/tests/unit/test.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport fixtures\nimport os\nfrom unittest import mock\nimport uuid\n\nimport testtools\n\nfrom rally.common import cfg\nfrom rally.common import db\nfrom rally import plugins\n\nfrom tests.unit import fakes\n\n\nplugins.load()\n\n\nclass DatabaseFixture(cfg.fixture.Config):\n \"\"\"Create clean DB before starting test.\"\"\"\n def setUp(self):\n super(DatabaseFixture, self).setUp()\n db_url = os.environ.get(\"RALLY_UNITTEST_DB_URL\", \"sqlite://\")\n db.engine_reset()\n self.conf.set_default(\"connection\", db_url, group=\"database\")\n db.schema_cleanup()\n db.schema_create()\n\n\nclass TestCase(testtools.TestCase):\n \"\"\"Test case base class for all unit tests.\"\"\"\n\n def setUp(self):\n super(TestCase, self).setUp()\n self.addCleanup(mock.patch.stopall)\n\n def _test_atomic_action_timer(self, atomic_actions, name, count=1,\n parent=[]):\n\n if parent:\n is_found = False\n for action in atomic_actions:\n if action[\"name\"] == parent[0]:\n is_found = True\n self._test_atomic_action_timer(action[\"children\"],\n name, count=count,\n parent=parent[1:])\n if not is_found:\n self.fail(\"The parent action %s can not be found.\"\n % parent[0])\n else:\n actual_count = 0\n for atomic_action in atomic_actions:\n if atomic_action[\"name\"] == name:\n self.assertIsInstance(atomic_action[\"started_at\"], float)\n self.assertIsInstance(atomic_action[\"finished_at\"], float)\n actual_count += 1\n if count != actual_count:\n self.fail(\"%(count)d count is expected for atomic action\"\n \" %(name)s, the actual count\"\n \" is %(actual_count)d.\"\n % {\"name\": name, \"count\": count,\n \"actual_count\": actual_count})\n\n def assertSequenceEqual(self, iterable_1, iterable_2, msg=None):\n self.assertEqual(tuple(iterable_1), tuple(iterable_2), msg)\n\n\nclass DBTestCase(TestCase):\n \"\"\"Base class for tests which use DB.\"\"\"\n\n def setUp(self):\n super(DBTestCase, self).setUp()\n self.useFixture(DatabaseFixture())\n\n\n# TODO(boris-42): This should be moved to test.plugins.test module\n# or similar\n\nclass ScenarioTestCase(TestCase):\n \"\"\"Base class for Scenario tests using mocked self.clients.\"\"\"\n task_utils = \"rally.task.utils\"\n patch_task_utils = True\n\n def client_factory(self, client_type, version=None, admin=False):\n \"\"\"Create a new client object.\"\"\"\n return mock.MagicMock(client_type=client_type,\n version=version,\n admin=admin)\n\n def clients(self, client_type, version=None, admin=False):\n \"\"\"Get a mocked client.\"\"\"\n key = (client_type, version, admin)\n if key not in self._clients:\n self._clients[key] = self.client_factory(client_type,\n version=version,\n admin=admin)\n return self._clients[key]\n\n def admin_clients(self, client_type, version=None):\n \"\"\"Get a mocked admin client.\"\"\"\n return self.clients(client_type, version=version, admin=True)\n\n def client_created(self, client_type, version=None, admin=False):\n \"\"\"Determine if a client has been created.\n\n This can be used to see if a scenario calls\n 'self.clients(\"foo\")', without checking to see what was done\n with the client object returned by that call.\n \"\"\"\n key = (client_type, version, admin)\n return key in self._clients\n\n def get_client_mocks(self):\n base_path = \"rally_openstack.task\"\n\n return [\n mock.patch(\n f\"{base_path}.scenario.OpenStackScenario.clients\",\n mock.Mock(side_effect=self.clients)),\n mock.patch(\n f\"{base_path}.scenario.OpenStackScenario.admin_clients\",\n mock.Mock(side_effect=self.admin_clients))\n ]\n\n def get_test_context(self):\n return get_test_context()\n\n def setUp(self):\n super(ScenarioTestCase, self).setUp()\n if self.patch_task_utils:\n self.mock_resource_is = fixtures.MockPatch(\n self.task_utils + \".resource_is\")\n self.mock_get_from_manager = fixtures.MockPatch(\n self.task_utils + \".get_from_manager\")\n self.mock_wait_for = fixtures.MockPatch(\n self.task_utils + \".wait_for\")\n self.mock_wait_for_delete = fixtures.MockPatch(\n self.task_utils + \".wait_for_delete\")\n self.mock_wait_for_status = fixtures.MockPatch(\n self.task_utils + \".wait_for_status\")\n self.useFixture(self.mock_resource_is)\n self.useFixture(self.mock_get_from_manager)\n self.useFixture(self.mock_wait_for)\n self.useFixture(self.mock_wait_for_delete)\n self.useFixture(self.mock_wait_for_status)\n\n self.mock_sleep = fixtures.MockPatch(\"time.sleep\")\n self.useFixture(self.mock_sleep)\n\n self._clients = {}\n self._client_mocks = self.get_client_mocks()\n\n for patcher in self._client_mocks:\n patcher.start()\n\n self.context = self.get_test_context()\n\n def tearDown(self):\n for patcher in self._client_mocks:\n patcher.stop()\n super(ScenarioTestCase, self).tearDown()\n\n\nclass ContextClientAdapter(object):\n def __init__(self, endpoint, test_case):\n self.endpoint = endpoint\n self.test_case = test_case\n\n def mock_client(self, name, version=None):\n admin = self.endpoint.startswith(\"admin\")\n client = self.test_case.clients(name, version=version, admin=admin)\n if not isinstance(client.return_value, mock.Mock):\n return client.return_value\n if client.side_effect is not None:\n # NOTE(pboldin): if a client has side_effects that means the\n # user wants some of the returned values overrided (look at\n # the test_existing_users for instance)\n return client()\n return client\n\n def __getattr__(self, name):\n # NOTE(pboldin): __getattr__ magic is called last, after the value\n # were looked up for in __dict__\n return lambda version=None: self.mock_client(name, version)\n\n\nclass ContextTestCase(ScenarioTestCase):\n def setUp(self):\n super(ContextTestCase, self).setUp()\n\n self._adapters = {}\n\n def context_client(self, endpoint, api_info=None):\n if endpoint not in self._adapters:\n self._adapters[endpoint] = ContextClientAdapter(endpoint, self)\n return self._adapters[endpoint]\n\n def get_client_mocks(self):\n return [\n mock.patch(\n \"rally_openstack.common.osclients.Clients\",\n mock.Mock(side_effect=self.context_client))\n ]\n\n\nclass FakeClientsScenarioTestCase(ScenarioTestCase):\n \"\"\"Base class for Scenario tests using fake (not mocked) self.clients.\"\"\"\n\n def client_factory(self, client_type, version=None, admin=False):\n return getattr(self._fake_clients, client_type)()\n\n def setUp(self):\n super(FakeClientsScenarioTestCase, self).setUp()\n self._fake_clients = fakes.FakeClients()\n\n\ndef get_test_context(**kwargs):\n kwargs[\"task\"] = {\"uuid\": str(uuid.uuid4())}\n kwargs[\"owner_id\"] = str(uuid.uuid4())\n return kwargs\n" }, { "alpha_fraction": 0.6480255126953125, "alphanum_fraction": 0.6568555235862732, "avg_line_length": 32.418033599853516, "blob_id": "5de46c52d5502cf6a18a46594081c87501c1f166", "content_id": "d2c03d4118bbd06e64fc8f91531f21169fda6796", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4077, "license_type": "permissive", "max_line_length": 78, "num_lines": 122, "path": "/tests/unit/task/cleanup/test_base.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.cleanup import base\nfrom tests.unit import test\n\n\nBASE = \"rally_openstack.task.cleanup.base\"\n\n\nclass ResourceDecoratorTestCase(test.TestCase):\n\n def test_resource(self):\n\n @base.resource(\"service\", \"res\")\n class Fake(object):\n pass\n\n self.assertEqual(\"service\", Fake._service)\n self.assertEqual(\"res\", Fake._resource)\n\n\nclass ResourceManagerTestCase(test.TestCase):\n\n def test__manager(self):\n user = mock.MagicMock()\n user.service1().resource1 = \"user_res\"\n\n manager = base.ResourceManager(user=user)\n manager._service = \"service1\"\n manager._resource = \"resource1\"\n\n self.assertEqual(\"user_res\", manager._manager())\n\n def test__manager_admin(self):\n admin = mock.MagicMock()\n admin.service1().resource1 = \"admin_res\"\n\n manager = base.ResourceManager(admin=admin)\n manager._service = \"service1\"\n manager._resource = \"resource1\"\n manager._admin_required = True\n\n self.assertEqual(\"admin_res\", manager._manager())\n\n def test_id(self):\n resource = mock.MagicMock(id=\"test_id\")\n\n manager = base.ResourceManager(resource=resource)\n self.assertEqual(resource.id, manager.id())\n\n def test_name(self):\n resource = mock.MagicMock(name=\"test_name\")\n\n manager = base.ResourceManager(resource=resource)\n self.assertEqual(resource.name, manager.name())\n\n @mock.patch(\"%s.ResourceManager._manager\" % BASE)\n def test_is_deleted(self, mock_resource_manager__manager):\n raw_res = mock.MagicMock(status=\"deleted\")\n mock_resource_manager__manager().get.return_value = raw_res\n mock_resource_manager__manager.reset_mock()\n\n resource = mock.MagicMock(id=\"test_id\")\n\n manager = base.ResourceManager(resource=resource)\n self.assertTrue(manager.is_deleted())\n raw_res.status = \"DELETE_COMPLETE\"\n self.assertTrue(manager.is_deleted())\n raw_res.status = \"ACTIVE\"\n self.assertFalse(manager.is_deleted())\n\n mock_resource_manager__manager.assert_has_calls(\n [mock.call(), mock.call().get(resource.id)] * 3)\n self.assertEqual(3, mock_resource_manager__manager.call_count)\n\n @mock.patch(\"%s.ResourceManager._manager\" % BASE)\n def test_is_deleted_exceptions(self, mock_resource_manager__manager):\n\n class Fake500Exc(Exception):\n code = 500\n\n class Fake404Exc(Exception):\n code = 404\n\n mock_resource_manager__manager.side_effect = [\n Exception, Fake500Exc, Fake404Exc]\n\n manager = base.ResourceManager(resource=mock.MagicMock())\n self.assertFalse(manager.is_deleted())\n self.assertFalse(manager.is_deleted())\n self.assertTrue(manager.is_deleted())\n\n @mock.patch(\"%s.ResourceManager._manager\" % BASE)\n def test_delete(self, mock_resource_manager__manager):\n res = mock.MagicMock(id=\"test_id\")\n\n manager = base.ResourceManager(resource=res)\n manager.delete()\n\n mock_resource_manager__manager.assert_has_calls(\n [mock.call(), mock.call().delete(res.id)])\n\n @mock.patch(\"%s.ResourceManager._manager\" % BASE)\n def test_list(self, mock_resource_manager__manager):\n base.ResourceManager().list()\n mock_resource_manager__manager.assert_has_calls(\n [mock.call(), mock.call().list()])\n" }, { "alpha_fraction": 0.7463150024414062, "alphanum_fraction": 0.7494181394577026, "avg_line_length": 41.96666717529297, "blob_id": "43befb177ce4d90533a6cd86f84d95eb2fceaa76", "content_id": "3758c2b54841b02e6ac5095993314af8a4b51533", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2578, "license_type": "permissive", "max_line_length": 78, "num_lines": 60, "path": "/rally_openstack/task/scenarios/barbican/orders.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.barbican import utils\n\n\"\"\"Scenarios for Barbican orders.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanOrders.list\")\nclass BarbicanOrdersList(utils.BarbicanBase):\n def run(self):\n \"\"\"List Orders.\"\"\"\n self.admin_barbican.orders_list()\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanOrders.create_key_and_delete\")\nclass BarbicanOrdersCreateKeyAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete key orders\"\"\"\n keys = self.admin_barbican.create_key()\n self.admin_barbican.orders_delete(keys.order_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanOrders.create_certificate_and_delete\")\nclass BarbicanOrdersCreateCertificateAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete certificate orders\"\"\"\n certificate = self.admin_barbican.create_certificate()\n self.admin_barbican.orders_delete(certificate.order_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanOrders.create_asymmetric_and_delete\")\nclass BarbicanOrdersCreateAsymmetricAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete asymmetric order.\"\"\"\n certificate = self.admin_barbican.create_asymmetric()\n self.admin_barbican.orders_delete(certificate.order_ref)\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.4000000059604645, "avg_line_length": 20, "blob_id": "3fcbe7af0068ae1173e355b17de1695dbc796110", "content_id": "6d7d186a0f746aa586096cf91d3b0735a6b91e8e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 125, "license_type": "permissive", "max_line_length": 32, "num_lines": 6, "path": "/tasks/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "================================\nRally Tasks For Production Usage\n================================\n\n\nDetailed Instruction TBD" }, { "alpha_fraction": 0.6043515801429749, "alphanum_fraction": 0.6066418290138245, "avg_line_length": 42.12345504760742, "blob_id": "b7b1ac0f4a72d92b369c6fea3a3211dd66f00b51", "content_id": "7ea11792b28b662932cfd5097f98fb1c34b570f7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3493, "license_type": "permissive", "max_line_length": 78, "num_lines": 81, "path": "/tests/unit/task/scenarios/sahara/test_node_group_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.sahara import node_group_templates as ngts\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.sahara.node_group_templates\"\n\n\nclass SaharaNodeGroupTemplatesTestCase(test.TestCase):\n\n def setUp(self):\n super(SaharaNodeGroupTemplatesTestCase, self).setUp()\n self.context = test.get_test_context()\n\n @mock.patch(\"%s.CreateAndListNodeGroupTemplates\"\n \"._list_node_group_templates\" % BASE)\n @mock.patch(\"%s.CreateAndListNodeGroupTemplates\"\n \"._create_master_node_group_template\" % BASE)\n @mock.patch(\"%s.CreateAndListNodeGroupTemplates\"\n \"._create_worker_node_group_template\" % BASE)\n def test_create_and_list_node_group_templates(self,\n mock_create_worker,\n mock_create_master,\n mock_list_group):\n ngts.CreateAndListNodeGroupTemplates(self.context).run(\n \"test_flavor\", \"test_plugin\", \"test_version\")\n\n mock_create_master.assert_called_once_with(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True)\n mock_create_worker.assert_called_once_with(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True)\n mock_list_group.assert_called_once_with()\n\n @mock.patch(\"%s.CreateDeleteNodeGroupTemplates\"\n \"._delete_node_group_template\" % BASE)\n @mock.patch(\"%s.CreateDeleteNodeGroupTemplates\"\n \"._create_master_node_group_template\" % BASE)\n @mock.patch(\"%s.CreateDeleteNodeGroupTemplates\"\n \"._create_worker_node_group_template\" % BASE)\n def test_create_delete_node_group_templates(self,\n mock_create_worker,\n mock_create_master,\n mock_delete_group):\n ngts.CreateDeleteNodeGroupTemplates(self.context).run(\n \"test_flavor\", \"test_plugin\", \"test_version\")\n\n mock_create_master.assert_called_once_with(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True)\n mock_create_worker.assert_called_once_with(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True)\n\n mock_delete_group.assert_has_calls(calls=[\n mock.call(mock_create_master.return_value),\n mock.call(mock_create_worker.return_value)])\n" }, { "alpha_fraction": 0.5560429096221924, "alphanum_fraction": 0.5640838146209717, "avg_line_length": 40.45454406738281, "blob_id": "22f8f725bce280997ca995f434fa5b683ae8bb02", "content_id": "a1bc975d76dee1eb9cf3bb7e0e3640e861182e7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12312, "license_type": "permissive", "max_line_length": 79, "num_lines": 297, "path": "/tests/unit/verification/tempest/test_config.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import cfg\nfrom rally_openstack.common import osclients\nfrom rally_openstack.verification.tempest import config\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nCONF = cfg.CONF\n\n\nCRED = {\n \"username\": \"admin\",\n \"tenant_name\": \"admin\",\n \"password\": \"admin-12345\",\n \"auth_url\": \"http://test:5000/v2.0/\",\n \"region_name\": \"test\",\n \"https_insecure\": False,\n \"https_cacert\": \"/path/to/cacert/file\",\n \"user_domain_name\": \"admin\",\n \"project_domain_name\": \"admin\"\n}\n\nPATH = \"rally_openstack.verification.tempest.config\"\n\n\[email protected]\nclass TempestConfigfileManagerTestCase(test.TestCase):\n\n def setUp(self):\n super(TempestConfigfileManagerTestCase, self).setUp()\n env = fakes.FakeEnvironment(\n env_uuid=\"fake_env\",\n data={\n \"platforms\": {\n \"openstack\": {\n \"platform_data\": {\n \"admin\": CRED\n }\n }\n }\n }\n )\n with mock.patch(\"%s.credential.OpenStackCredential\" % PATH,\n return_value=fakes.FakeCredential(**CRED)):\n self.tempest = config.TempestConfigfileManager(env)\n\n def test__configure_auth(self):\n self.tempest.conf.add_section(\"auth\")\n self.tempest._configure_auth()\n\n expected = (\n (\"admin_username\", CRED[\"username\"]),\n (\"admin_password\", CRED[\"password\"]),\n (\"admin_project_name\", CRED[\"tenant_name\"]),\n (\"admin_domain_name\", CRED[\"user_domain_name\"]))\n result = self.tempest.conf.items(\"auth\")\n for item in expected:\n self.assertIn(item, result)\n\n @ddt.data(\"data_processing\", \"data-processing\")\n def test__configure_data_processing(self, service_type):\n self.tempest.available_services = [\"sahara\"]\n\n self.tempest.clients.services.return_value = {\n service_type: \"sahara\"}\n self.tempest.conf.add_section(\"data-processing\")\n self.tempest._configure_data_processing()\n self.assertEqual(service_type,\n self.tempest.conf.get(\"data-processing\",\n \"catalog_type\"))\n\n @ddt.data(\n # The prefix \"ex_\" is abbreviation of \"expected\"\n # case #1: both versions are discoverable; version is in the auth_url\n {\"auth_url\": \"http://example.com/v2.0\",\n \"data\": [{\"version\": (3, 0), \"url\": \"foo3.com\"},\n {\"version\": (2, 0), \"url\": \"foo2.com\"}],\n \"ex_uri\": \"http://example.com/v2.0\", \"ex_auth_version\": \"v2\",\n \"ex_uri_v3\": \"http://example.com/v3\"},\n # case #2: the same case, but v3 is in the url\n {\"auth_url\": \"http://example.com/v3\",\n \"data\": [{\"version\": (3, 0), \"url\": \"foo3.com\"},\n {\"version\": (2, 0), \"url\": \"foo2.com\"}],\n \"ex_uri\": \"http://example.com/v2.0\", \"ex_auth_version\": \"v3\",\n \"ex_uri_v3\": \"http://example.com/v3\"},\n # case #3: both versions are discoverable; version is not in auth_url\n {\"auth_url\": \"http://example.com\",\n \"data\": [{\"version\": (3, 0), \"url\": \"foo3.com\"},\n {\"version\": (2, 0), \"url\": \"foo2.com\"}],\n \"ex_uri\": \"foo2.com\", \"ex_uri_v3\": \"foo3.com\",\n \"ex_auth_version\": \"v3\"},\n # case #4: the same case, but data in the another sort.\n {\"auth_url\": \"http://example.com\",\n \"data\": [{\"version\": (2, 0), \"url\": \"foo2.com\"},\n {\"version\": (3, 0), \"url\": \"foo3.com\"}],\n \"ex_uri\": \"foo2.com\", \"ex_uri_v3\": \"foo3.com\",\n \"ex_auth_version\": \"v3\"},\n # case #5: only one version is discoverable;\n {\"auth_url\": \"http://example.com\",\n \"data\": [{\"version\": (2, 0), \"url\": \"foo2.com\"}],\n \"ex_uri\": \"foo2.com\", \"ex_auth_version\": \"v2\",\n \"ex_uri_v3\": \"http://example.com/v3\"},\n # case #6: the same case, but keystone v3 is discoverable\n {\"auth_url\": \"http://example.com\",\n \"data\": [{\"version\": (3, 0), \"url\": \"foo3.com\"}],\n \"ex_uri\": \"http://example.com/v2.0\", \"ex_auth_version\": \"v3\",\n \"ex_uri_v3\": \"foo3.com\",\n \"ex_v2_off\": True}\n )\n @ddt.unpack\n def test__configure_identity(self, auth_url, data, ex_uri,\n ex_uri_v3, ex_auth_version, ex_v2_off=False):\n self.tempest.conf.add_section(\"identity\")\n self.tempest.conf.add_section(\"identity-feature-enabled\")\n self.tempest.credential.auth_url = auth_url\n process_url = osclients.Keystone(\n self.tempest.credential, 0)._remove_url_version\n self.tempest.clients.keystone._remove_url_version = process_url\n\n from keystoneauth1 import discover\n from keystoneauth1 import session\n\n with mock.patch.object(discover, \"Discover\") as mock_discover:\n with mock.patch.object(session, \"Session\") as mock_session:\n mock_discover.return_value.version_data.return_value = data\n\n self.tempest._configure_identity()\n\n mock_discover.assert_called_once_with(\n mock_session.return_value, auth_url)\n\n expected = {\"region\": CRED[\"region_name\"],\n \"auth_version\": ex_auth_version,\n \"uri\": ex_uri, \"uri_v3\": ex_uri_v3,\n \"disable_ssl_certificate_validation\": str(\n CRED[\"https_insecure\"]),\n \"ca_certificates_file\": CRED[\"https_cacert\"]}\n self.assertEqual(expected, dict(self.tempest.conf.items(\"identity\")))\n if ex_v2_off:\n self.assertEqual(\n \"False\",\n self.tempest.conf.get(\"identity-feature-enabled\", \"api_v2\"))\n\n # Test a conf setting with a None value\n try:\n self.tempest.conf.set(\"identity\", \"region\", None)\n except TypeError as e:\n self.fail(\"self.tempest.conf.set('identity', 'region', None) \"\n \"raised a TypeError: \" + str(e))\n\n def test__configure_network_if_neutron(self):\n self.tempest.available_services = [\"neutron\"]\n client = self.tempest.clients.neutron()\n client.list_networks.return_value = {\n \"networks\": [\n {\n \"status\": \"ACTIVE\",\n \"id\": \"test_id\",\n \"name\": \"test_name\",\n \"router:external\": True\n }\n ]\n }\n\n self.tempest.conf.add_section(\"network\")\n self.tempest._configure_network()\n self.assertEqual(\"test_id\",\n self.tempest.conf.get(\"network\", \"public_network_id\"))\n self.assertEqual(\"test_name\",\n self.tempest.conf.get(\"network\",\n \"floating_network_name\"))\n\n def test__configure_network_if_nova(self):\n self.tempest.available_services = [\"nova\"]\n client = self.tempest.clients.nova()\n client.networks.list.return_value = [\n mock.MagicMock(human_id=\"fake-network\")]\n\n self.tempest.conf.add_section(\"compute\")\n self.tempest.conf.add_section(\"validation\")\n self.tempest._configure_network()\n\n expected = {\"compute\": (\"fixed_network_name\", \"fake-network\"),\n \"validation\": (\"network_for_ssh\", \"fake-network\")}\n for section, option in expected.items():\n result = self.tempest.conf.items(section)\n self.assertIn(option, result)\n\n def test__configure_network_feature_enabled(self):\n self.tempest.available_services = [\"neutron\"]\n client = self.tempest.clients.neutron()\n client.list_ext.return_value = {\n \"extensions\": [\n {\"alias\": \"dvr\"},\n {\"alias\": \"extra_dhcp_opt\"},\n {\"alias\": \"extraroute\"}\n ]\n }\n\n self.tempest.conf.add_section(\"network-feature-enabled\")\n self.tempest._configure_network_feature_enabled()\n client.list_ext.assert_called_once_with(\"extensions\", \"/extensions\",\n retrieve_all=True)\n self.assertEqual(\"dvr,extra_dhcp_opt,extraroute\",\n self.tempest.conf.get(\"network-feature-enabled\",\n \"api_extensions\"))\n\n def test__configure_object_storage(self):\n self.tempest.conf.add_section(\"object-storage\")\n self.tempest._configure_object_storage()\n\n expected = (\n (\"operator_role\", CONF.openstack.swift_operator_role),\n (\"reseller_admin_role\", CONF.openstack.swift_reseller_admin_role))\n result = self.tempest.conf.items(\"object-storage\")\n for item in expected:\n self.assertIn(item, result)\n\n def test__configure_orchestration(self):\n self.tempest.conf.add_section(\"orchestration\")\n self.tempest._configure_orchestration()\n\n expected = (\n (\"stack_owner_role\", CONF.openstack.heat_stack_owner_role),\n (\"stack_user_role\", CONF.openstack.heat_stack_user_role))\n result = self.tempest.conf.items(\"orchestration\")\n for item in expected:\n self.assertIn(item, result)\n\n def test__configure_service_available(self):\n available_services = (\"nova\", \"cinder\", \"glance\", \"sahara\")\n self.tempest.available_services = available_services\n self.tempest.conf.add_section(\"service_available\")\n self.tempest._configure_service_available()\n\n expected = (\n (\"neutron\", \"False\"), (\"heat\", \"False\"), (\"nova\", \"True\"),\n (\"swift\", \"False\"), (\"cinder\", \"True\"), (\"sahara\", \"True\"),\n (\"glance\", \"True\"))\n result = self.tempest.conf.items(\"service_available\")\n for item in expected:\n self.assertIn(item, result)\n\n @ddt.data({}, {\"service\": \"neutron\", \"connect_method\": \"floating\"})\n @ddt.unpack\n def test__configure_validation(self, service=\"nova\",\n connect_method=\"fixed\"):\n self.tempest.available_services = [service]\n self.tempest.conf.add_section(\"validation\")\n self.tempest._configure_validation()\n\n expected = ((\"connect_method\", connect_method), )\n result = self.tempest.conf.items(\"validation\")\n for item in expected:\n self.assertIn(item, result)\n\n @mock.patch(\"%s.io.StringIO\" % PATH)\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n @mock.patch(\"inspect.getmembers\")\n def test_create(self, mock_inspect_getmembers, mock_open, mock_string_io):\n configure_something_method = mock.MagicMock()\n mock_inspect_getmembers.return_value = [(\"_configure_something\",\n configure_something_method)]\n self.tempest.conf.read = mock.Mock()\n self.tempest.conf.write = mock.Mock()\n self.tempest.conf.read.return_value = \"[section]\\noption = value\"\n\n fake_extra_conf = {\"section2\": {\"option2\": \"value2\"}}\n self.tempest.create(\"/path/to/fake/conf\", fake_extra_conf)\n\n self.assertEqual(1, configure_something_method.call_count)\n self.assertIn((\"option2\", \"value2\"),\n self.tempest.conf.items(\"section2\"))\n mock_open.assert_called_once_with(\"/path/to/fake/conf\", \"w\")\n self.tempest.conf.write.assert_has_calls(\n [mock.call(mock_open.side_effect()),\n mock.call(mock_string_io.return_value)])\n mock_string_io.return_value.getvalue.assert_called_once_with()\n" }, { "alpha_fraction": 0.5766246318817139, "alphanum_fraction": 0.5792919397354126, "avg_line_length": 38.653846740722656, "blob_id": "401c8976b07701cd16c99f3bc544a830091100e3", "content_id": "2725eb5e00542569f6f249c3b4f3f2b2f362a446", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4124, "license_type": "permissive", "max_line_length": 77, "num_lines": 104, "path": "/rally_openstack/task/contexts/sahara/sahara_output_data_sources.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.sahara import utils\nfrom rally_openstack.task.scenarios.swift import utils as swift_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"sahara_output_data_sources\", platform=\"openstack\",\n order=444)\nclass SaharaOutputDataSources(context.OpenStackContext):\n \"\"\"Context class for setting up Output Data Sources for an EDP job.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"output_type\": {\n \"enum\": [\"swift\", \"hdfs\"],\n },\n \"output_url_prefix\": {\n \"type\": \"string\",\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"output_type\", \"output_url_prefix\"]\n }\n\n def setup(self):\n utils.init_sahara_context(self)\n for user, tenant_id in self._iterate_per_tenants():\n\n clients = osclients.Clients(user[\"credential\"])\n sahara = clients.sahara()\n\n if self.config[\"output_type\"] == \"swift\":\n swift = swift_utils.SwiftScenario(clients=clients,\n context=self.context)\n container_name = self.generate_random_name()\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"container\"] = {\n \"name\": swift._create_container(\n container_name=container_name),\n \"output_swift_objects\": []\n }\n self.setup_outputs_swift(swift, sahara, tenant_id,\n container_name,\n user[\"credential\"].username,\n user[\"credential\"].password)\n else:\n self.setup_outputs_hdfs(sahara, tenant_id,\n self.config[\"output_url_prefix\"])\n\n def setup_outputs_hdfs(self, sahara, tenant_id, output_url):\n output_ds = sahara.data_sources.create(\n name=self.generate_random_name(),\n description=\"\",\n data_source_type=\"hdfs\",\n url=output_url)\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"output\"] = output_ds.id\n\n def setup_outputs_swift(self, swift, sahara, tenant_id, container_name,\n username, password):\n output_ds_swift = sahara.data_sources.create(\n name=self.generate_random_name(),\n description=\"\",\n data_source_type=\"swift\",\n url=\"swift://\" + container_name + \".sahara/\",\n credential_user=username,\n credential_pass=password)\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"output\"] = (\n output_ds_swift.id\n )\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"swift.object\", \"swift.container\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n resource_manager.cleanup(\n names=[\"sahara.data_sources\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6801202297210693, "alphanum_fraction": 0.6857020258903503, "avg_line_length": 41.345455169677734, "blob_id": "9fcab185dc4655cf7ad31351ae48d0225c7de84b", "content_id": "65da30f764178cdb04352bf17c45e0a72c988063", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2329, "license_type": "permissive", "max_line_length": 78, "num_lines": 55, "path": "/tests/unit/task/scenarios/mistral/test_workbooks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.mistral import workbooks\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.mistral.workbooks\"\n\n\nclass MistralWorkbooksTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.ListWorkbooks._list_workbooks\" % BASE)\n def test_list_workbooks(self, mock_list_workbooks__list_workbooks):\n workbooks.ListWorkbooks(self.context).run()\n mock_list_workbooks__list_workbooks.assert_called_once_with()\n\n @mock.patch(\"%s.CreateWorkbook._create_workbook\" % BASE)\n def test_create_workbook(self, mock_create_workbook__create_workbook):\n definition = \"---\\nversion: \\\"2.0\\\"\\nname: wb\"\n fake_wb = mock.MagicMock()\n fake_wb.name = \"wb\"\n mock_create_workbook__create_workbook.return_value = fake_wb\n workbooks.CreateWorkbook(self.context).run(definition)\n\n self.assertEqual(1, mock_create_workbook__create_workbook.called)\n\n @mock.patch(\"%s.CreateWorkbook._delete_workbook\" % BASE)\n @mock.patch(\"%s.CreateWorkbook._create_workbook\" % BASE)\n def test_create_delete_workbook(self,\n mock_create_workbook__create_workbook,\n mock_create_workbook__delete_workbook):\n definition = \"---\\nversion: \\\"2.0\\\"\\nname: wb\"\n fake_wb = mock.MagicMock()\n fake_wb.name = \"wb\"\n mock_create_workbook__create_workbook.return_value = fake_wb\n\n workbooks.CreateWorkbook(self.context).run(definition, do_delete=True)\n\n self.assertTrue(mock_create_workbook__create_workbook.called)\n mock_create_workbook__delete_workbook.assert_called_once_with(\n fake_wb.name)\n" }, { "alpha_fraction": 0.538597583770752, "alphanum_fraction": 0.5411244630813599, "avg_line_length": 37.60975646972656, "blob_id": "476745b4e73c391db188caa1c8c86ecc911eb92c", "content_id": "dcf67af8dec9364e89841cec417b52089495cae5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7915, "license_type": "permissive", "max_line_length": 79, "num_lines": 205, "path": "/tests/unit/task/contexts/manila/test_manila_shares.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.common import consts as rally_consts\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.contexts.manila import manila_shares\nfrom rally_openstack.task.scenarios.manila import utils as manila_utils\nfrom tests.unit import test\n\nMANILA_UTILS_PATH = (\n \"rally_openstack.task.scenarios.manila.utils.ManilaScenario.\")\n\n\nclass Fake(object):\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def to_dict(self):\n return self.__dict__\n\n\[email protected]\nclass SharesTestCase(test.TestCase):\n TENANTS_AMOUNT = 3\n USERS_PER_TENANT = 4\n SHARES_PER_TENANT = 7\n SHARE_NETWORKS = [{\"id\": \"sn_%s_id\" % d} for d in range(3)]\n\n def _get_context(self, use_share_networks=False, shares_per_tenant=None,\n share_size=1, share_proto=\"fake_proto\", share_type=None):\n tenants = {}\n for t_id in range(self.TENANTS_AMOUNT):\n tenants[str(t_id)] = {\"name\": str(t_id)}\n users = []\n for t_id in sorted(list(tenants.keys())):\n for i in range(self.USERS_PER_TENANT):\n users.append({\n \"id\": i, \"tenant_id\": t_id,\n \"credential\": mock.MagicMock()})\n context = {\n \"config\": {\n \"users\": {\n \"tenants\": self.TENANTS_AMOUNT,\n \"users_per_tenant\": self.USERS_PER_TENANT,\n \"user_choice_method\": \"round_robin\",\n },\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"use_share_networks\": use_share_networks,\n \"share_networks\": self.SHARE_NETWORKS,\n },\n consts.SHARES_CONTEXT_NAME: {\n \"shares_per_tenant\": (\n shares_per_tenant or self.SHARES_PER_TENANT),\n \"size\": share_size,\n \"share_proto\": share_proto,\n \"share_type\": share_type,\n },\n },\n \"admin\": {\n \"credential\": mock.MagicMock(),\n },\n \"task\": mock.MagicMock(),\n \"owner_id\": \"foo_uuid\",\n \"users\": users,\n \"tenants\": tenants,\n }\n if use_share_networks:\n for t in context[\"tenants\"].keys():\n context[\"tenants\"][t][consts.SHARE_NETWORKS_CONTEXT_NAME] = {\n \"share_networks\": self.SHARE_NETWORKS,\n }\n return context\n\n def test_init(self):\n ctxt = {\n \"task\": mock.MagicMock(),\n \"config\": {\n consts.SHARES_CONTEXT_NAME: {\"foo\": \"bar\"},\n \"fake\": {\"fake_key\": \"fake_value\"},\n },\n }\n\n inst = manila_shares.Shares(ctxt)\n\n self.assertEqual(\n {\"foo\": \"bar\", \"shares_per_tenant\": 1, \"size\": 1,\n \"share_proto\": \"NFS\", \"share_type\": None},\n inst.config)\n self.assertIn(\n rally_consts.JSON_SCHEMA, inst.CONFIG_SCHEMA.get(\"$schema\"))\n self.assertFalse(inst.CONFIG_SCHEMA.get(\"additionalProperties\"))\n self.assertEqual(\"object\", inst.CONFIG_SCHEMA.get(\"type\"))\n props = inst.CONFIG_SCHEMA.get(\"properties\", {})\n self.assertEqual(\n {\"minimum\": 1, \"type\": \"integer\"}, props.get(\"shares_per_tenant\"))\n self.assertEqual({\"minimum\": 1, \"type\": \"integer\"}, props.get(\"size\"))\n self.assertEqual({\"type\": \"string\"}, props.get(\"share_proto\"))\n self.assertEqual({\"type\": \"string\"}, props.get(\"share_type\"))\n self.assertEqual(455, inst.get_order())\n self.assertEqual(consts.SHARES_CONTEXT_NAME, inst.get_name())\n\n @mock.patch(MANILA_UTILS_PATH + \"_create_share\")\n @ddt.data(True, False)\n def test_setup(\n self,\n use_share_networks,\n mock_manila_scenario__create_share):\n share_type = \"fake_share_type\"\n ctxt = self._get_context(\n use_share_networks=use_share_networks, share_type=share_type)\n inst = manila_shares.Shares(ctxt)\n shares = [\n Fake(id=\"fake_share_id_%d\" % s_id)\n for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT)\n ]\n mock_manila_scenario__create_share.side_effect = shares\n expected_ctxt = copy.deepcopy(ctxt)\n\n inst.setup()\n\n self.assertEqual(\n self.TENANTS_AMOUNT * self.SHARES_PER_TENANT,\n mock_manila_scenario__create_share.call_count)\n for d in range(self.TENANTS_AMOUNT):\n self.assertEqual(\n [\n s.to_dict() for s in shares[\n (d * self.SHARES_PER_TENANT):(\n d * self.SHARES_PER_TENANT + self.SHARES_PER_TENANT\n )\n ]\n ],\n inst.context.get(\"tenants\", {}).get(\"%s\" % d, {}).get(\"shares\")\n )\n self.assertEqual(expected_ctxt[\"task\"], inst.context.get(\"task\"))\n self.assertEqual(expected_ctxt[\"config\"], inst.context.get(\"config\"))\n self.assertEqual(expected_ctxt[\"users\"], inst.context.get(\"users\"))\n if use_share_networks:\n mock_calls = [\n mock.call(\n share_proto=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\n \"share_proto\"],\n size=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\"size\"],\n share_type=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\n \"share_type\"],\n share_network=self.SHARE_NETWORKS[\n int(t_id) % len(self.SHARE_NETWORKS)][\"id\"]\n ) for t_id in expected_ctxt[\"tenants\"].keys()\n ]\n else:\n mock_calls = [\n mock.call(\n share_proto=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\n \"share_proto\"],\n size=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\"size\"],\n share_type=ctxt[\"config\"][consts.SHARES_CONTEXT_NAME][\n \"share_type\"],\n ) for t_id in expected_ctxt[\"tenants\"].keys()\n ]\n mock_manila_scenario__create_share.assert_has_calls(\n mock_calls, any_order=True)\n\n @mock.patch(MANILA_UTILS_PATH + \"_create_share\")\n @mock.patch(\"rally_openstack.task.cleanup.manager.cleanup\")\n def test_cleanup(\n self,\n mock_cleanup_manager_cleanup,\n mock_manila_scenario__create_share):\n ctxt = self._get_context()\n inst = manila_shares.Shares(ctxt)\n shares = [\n Fake(id=\"fake_share_id_%d\" % s_id)\n for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT)\n ]\n mock_manila_scenario__create_share.side_effect = shares\n inst.setup()\n\n inst.cleanup()\n\n mock_cleanup_manager_cleanup.assert_called_once_with(\n names=[\"manila.shares\"],\n users=inst.context.get(\"users\", []),\n superclass=manila_utils.ManilaScenario,\n task_id=\"foo_uuid\")\n" }, { "alpha_fraction": 0.6290807723999023, "alphanum_fraction": 0.6323024034500122, "avg_line_length": 34.00751876831055, "blob_id": "de1137415f893592d024d2b8f7457d7290202fd3", "content_id": "86a940af31ee2ea061ec97a8322932554090ee35", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4656, "license_type": "permissive", "max_line_length": 78, "num_lines": 133, "path": "/rally_openstack/common/services/image/image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally import exceptions\nfrom rally.task import service\n\n\nCONF = cfg.CONF\n\nUnifiedImage = service.make_resource_cls(\n \"Image\", properties=[\"id\", \"name\", \"visibility\", \"status\"])\n\n\nclass VisibilityException(exceptions.RallyException):\n \"\"\"Wrong visibility value exception.\n\n \"\"\"\n error_code = 531\n\n\nclass RemovePropsException(exceptions.RallyException):\n \"\"\"Remove Props it not supported exception.\n\n \"\"\"\n error_code = 560\n\n\nclass Image(service.UnifiedService):\n @classmethod\n def is_applicable(cls, clients):\n cloud_version = str(clients.glance().version).split(\".\")[0]\n return cloud_version == cls._meta_get(\"impl\")._meta_get(\"version\")\n\n @service.should_be_overridden\n def create_image(self, image_name=None, container_format=None,\n image_location=None, disk_format=None,\n visibility=\"private\", min_disk=0,\n min_ram=0, properties=None):\n \"\"\"Creates new image.\n\n :param image_name: Image name for which need to be created\n :param container_format: Container format\n :param image_location: The new image's location\n :param disk_format: Disk format\n :param visibility: The access permission for the created image.\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: Dict of image properties\n \"\"\"\n properties = properties or {}\n image = self._impl.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n return image\n\n @service.should_be_overridden\n def update_image(self, image_id, image_name=None,\n min_disk=0, min_ram=0, remove_props=None):\n \"\"\"Update image.\n\n :param image_id: ID of image to update\n :param image_name: Image name to be updated to\n :param min_disk: The min disk of updated image\n :param min_ram: The min ram of updated image\n :param remove_props: List of property names to remove\n \"\"\"\n return self._impl.update_image(\n image_id,\n image_name=image_name,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n\n @service.should_be_overridden\n def list_images(self, status=\"active\", visibility=None, owner=None):\n \"\"\"List images.\n\n :param status: Filter in images for the specified status\n :param visibility: Filter in images for the specified visibility\n :param owner: Filter in images for tenant ID\n \"\"\"\n return self._impl.list_images(status=status,\n visibility=visibility,\n owner=owner)\n\n @service.should_be_overridden\n def set_visibility(self, image_id, visibility=\"public\"):\n \"\"\"Update visibility.\n\n :param image_id: ID of image to update\n :param visibility: The visibility of specified image\n \"\"\"\n self._impl.set_visibility(image_id, visibility=visibility)\n\n @service.should_be_overridden\n def get_image(self, image):\n \"\"\"Get specified image.\n\n :param image: ID or object with ID of image to obtain.\n \"\"\"\n return self._impl.get_image(image)\n\n @service.should_be_overridden\n def delete_image(self, image_id):\n \"\"\"delete image.\"\"\"\n self._impl.delete_image(image_id)\n\n @service.should_be_overridden\n def download_image(self, image, do_checksum=True):\n \"\"\"Download data for an image.\n\n :param image: image object or id to look up\n :param do_checksum: Enable/disable checksum validation\n :rtype: iterable containing image data or None\n \"\"\"\n return self._impl.download_image(image, do_checksum=do_checksum)\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 23.705883026123047, "blob_id": "ca2d4f5524e684ada0f4e7688e560fb69b5366dc", "content_id": "5e006bdea3ccaaa53565985408611e265ec5b6a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 420, "license_type": "permissive", "max_line_length": 119, "num_lines": 17, "path": "/rally-jobs/extra/murano/applications/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "Murano applications\n===================\n\nFiles for Murano plugins\n\nStructure\n---------\n\n* <application_name>/ directories. Each directory store a simple Murano package\n for environment deployment in Murano context. Also there can be other files\n needs for application.\n\n\nUseful links\n------------\n\n* `More about Murano package <https://wiki.openstack.org/wiki/Murano/Documentation/How_to_create_application_package>`_\n" }, { "alpha_fraction": 0.6036781668663025, "alphanum_fraction": 0.6050574779510498, "avg_line_length": 45.41768264770508, "blob_id": "4d9239a94173cd91fead413095893c021bd64062", "content_id": "3d5d9d63b7b919df3d060f25f9e2074ea7b35af7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15225, "license_type": "permissive", "max_line_length": 78, "num_lines": 328, "path": "/tests/unit/task/scenarios/heat/test_stacks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.heat import stacks\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.heat.stacks\"\n\n\nclass HeatStacksTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(HeatStacksTestCase, self).setUp()\n self.default_template = \"heat_template_version: 2013-05-23\"\n self.default_parameters = {\"dummy_param\": \"dummy_key\"}\n self.default_files = [\"dummy_file.yaml\"]\n self.default_environment = {\"env\": \"dummy_env\"}\n self.default_output_key = \"dummy_output_key\"\n\n @mock.patch(\"%s.CreateAndListStack._list_stacks\" % BASE)\n @mock.patch(\"%s.CreateAndListStack._create_stack\" % BASE)\n def test_create_and_list_stack(self,\n mock__create_stack,\n mock__list_stacks):\n stack = mock.Mock()\n mock__create_stack.return_value = stack\n mock__list_stacks.return_value = [stack] * 3\n\n # Positive case:\n stacks.CreateAndListStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__list_stacks.assert_called_once_with()\n\n # Negative case1: stack isn't created\n mock__create_stack.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n stacks.CreateAndListStack(self.context).run,\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n\n # Negative case2: created stack not in the list of available stacks\n fake_stack = mock.Mock()\n mock__create_stack.return_value = fake_stack\n self.assertRaises(exceptions.RallyAssertionError,\n stacks.CreateAndListStack(self.context).run,\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__list_stacks.assert_called_with()\n\n @mock.patch(\"%s.ListStacksAndResources._list_stacks\" % BASE)\n def test_list_stack_and_resources(self, mock__list_stacks):\n stack = mock.Mock()\n heat_scenario = stacks.ListStacksAndResources(self.context)\n mock__list_stacks.return_value = [stack]\n heat_scenario.run()\n self.clients(\"heat\").resources.list.assert_called_once_with(\n stack.id)\n self._test_atomic_action_timer(heat_scenario.atomic_actions(),\n \"heat.list_resources\")\n\n @mock.patch(\"%s.ListStacksAndEvents._list_stacks\" % BASE)\n def test_list_stack_and_events(self, mock__list_stacks):\n stack = mock.Mock()\n mock__list_stacks.return_value = [stack]\n heat_scenario = stacks.ListStacksAndEvents(self.context)\n heat_scenario.run()\n self.clients(\"heat\").events.list.assert_called_once_with(stack.id)\n self._test_atomic_action_timer(\n heat_scenario.atomic_actions(), \"heat.list_events\")\n\n @mock.patch(\"%s.CreateAndDeleteStack._delete_stack\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteStack._create_stack\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteStack.generate_random_name\" % BASE,\n return_value=\"test-rally-stack\")\n def test_create_and_delete_stack(self,\n mock_generate_random_name,\n mock__create_stack,\n mock__delete_stack):\n fake_stack = object()\n mock__create_stack.return_value = fake_stack\n stacks.CreateAndDeleteStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template,\n self.default_parameters,\n self.default_files,\n self.default_environment)\n mock__delete_stack.assert_called_once_with(fake_stack)\n\n @mock.patch(\"%s.CreateCheckDeleteStack._delete_stack\" % BASE)\n @mock.patch(\"%s.CreateCheckDeleteStack._check_stack\" % BASE)\n @mock.patch(\"%s.CreateCheckDeleteStack._create_stack\" % BASE)\n def test_create_check_delete_stack(self,\n mock__create_stack,\n mock__check_stack,\n mock__delete_stack):\n stacks.CreateCheckDeleteStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__check_stack.assert_called_once_with(\n mock__create_stack.return_value)\n mock__delete_stack.assert_called_once_with(\n mock__create_stack.return_value)\n\n @mock.patch(\"%s.CreateUpdateDeleteStack._delete_stack\" % BASE)\n @mock.patch(\"%s.CreateUpdateDeleteStack._update_stack\" % BASE)\n @mock.patch(\"%s.CreateUpdateDeleteStack._create_stack\" % BASE)\n @mock.patch(\"%s.CreateUpdateDeleteStack.generate_random_name\" % BASE,\n return_value=\"test-rally-stack\")\n def test_create_update_delete_stack(self,\n mock_generate_random_name,\n mock__create_stack,\n mock__update_stack,\n mock__delete_stack):\n fake_stack = object()\n mock__create_stack.return_value = fake_stack\n stacks.CreateUpdateDeleteStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n updated_template_path=self.default_template,\n files=self.default_files,\n environment=self.default_environment\n )\n\n mock__create_stack.assert_called_once_with(\n self.default_template,\n self.default_parameters,\n self.default_files,\n self.default_environment)\n mock__update_stack.assert_called_once_with(\n fake_stack, self.default_template,\n self.default_parameters,\n self.default_files,\n self.default_environment)\n mock__delete_stack.assert_called_once_with(fake_stack)\n\n def test_create_stack_and_scale(self):\n heat_scenario = stacks.CreateStackAndScale(self.context)\n stack = mock.Mock()\n heat_scenario._create_stack = mock.Mock(return_value=stack)\n heat_scenario._scale_stack = mock.Mock()\n\n heat_scenario.run(\n self.default_template, \"key\", -1,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n heat_scenario._create_stack.assert_called_once_with(\n self.default_template,\n self.default_parameters,\n self.default_files,\n self.default_environment)\n heat_scenario._scale_stack.assert_called_once_with(\n stack, \"key\", -1)\n\n @mock.patch(\"%s.CreateSuspendResumeDeleteStack._delete_stack\" % BASE)\n @mock.patch(\"%s.CreateSuspendResumeDeleteStack._resume_stack\" % BASE)\n @mock.patch(\"%s.CreateSuspendResumeDeleteStack._suspend_stack\" % BASE)\n @mock.patch(\"%s.CreateSuspendResumeDeleteStack._create_stack\" % BASE)\n def test_create_suspend_resume_delete_stack(self,\n mock__create_stack,\n mock__suspend_stack,\n mock__resume_stack,\n mock__delete_stack):\n stacks.CreateSuspendResumeDeleteStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template,\n self.default_parameters,\n self.default_files,\n self.default_environment\n )\n mock__suspend_stack.assert_called_once_with(\n mock__create_stack.return_value)\n mock__resume_stack.assert_called_once_with(\n mock__create_stack.return_value)\n mock__delete_stack.assert_called_once_with(\n mock__create_stack.return_value)\n\n @mock.patch(\"%s.CreateSnapshotRestoreDeleteStack._delete_stack\" % BASE)\n @mock.patch(\"%s.CreateSnapshotRestoreDeleteStack._restore_stack\" % BASE)\n @mock.patch(\"%s.CreateSnapshotRestoreDeleteStack._snapshot_stack\" % BASE,\n return_value={\"id\": \"dummy_id\"})\n @mock.patch(\"%s.CreateSnapshotRestoreDeleteStack._create_stack\" % BASE,\n return_value=object())\n def test_create_snapshot_restore_delete_stack(self,\n mock__create_stack,\n mock__snapshot_stack,\n mock__restore_stack,\n mock__delete_stack):\n\n stacks.CreateSnapshotRestoreDeleteStack(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__snapshot_stack.assert_called_once_with(\n mock__create_stack.return_value)\n mock__restore_stack.assert_called_once_with(\n mock__create_stack.return_value, \"dummy_id\")\n mock__delete_stack.assert_called_once_with(\n mock__create_stack.return_value)\n\n @mock.patch(\"%s.CreateStackAndShowOutputViaAPI\"\n \"._stack_show_output_via_API\" % BASE)\n @mock.patch(\"%s.CreateStackAndShowOutputViaAPI._create_stack\" % BASE)\n def test_create_and_show_output_via_API(self,\n mock__create_stack,\n mock__stack_show_output_api):\n stacks.CreateStackAndShowOutputViaAPI(self.context).run(\n template_path=self.default_template,\n output_key=self.default_output_key,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__stack_show_output_api.assert_called_once_with(\n mock__create_stack.return_value, self.default_output_key)\n\n @mock.patch(\"%s.CreateStackAndShowOutput._stack_show_output\" % BASE)\n @mock.patch(\"%s.CreateStackAndShowOutput._create_stack\" % BASE)\n def test_create_and_show_output(self,\n mock__create_stack,\n mock__stack_show_output):\n stacks.CreateStackAndShowOutput(self.context).run(\n template_path=self.default_template,\n output_key=self.default_output_key,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__stack_show_output.assert_called_once_with(\n mock__create_stack.return_value, self.default_output_key)\n\n @mock.patch(\"%s.CreateStackAndListOutputViaAPI\"\n \"._stack_list_output_via_API\" % BASE)\n @mock.patch(\"%s.CreateStackAndListOutputViaAPI._create_stack\" % BASE)\n def test_create_and_list_output_via_API(self,\n mock__create_stack,\n mock__stack_list_output_api):\n stacks.CreateStackAndListOutputViaAPI(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__stack_list_output_api.assert_called_once_with(\n mock__create_stack.return_value)\n\n @mock.patch(\"%s.CreateStackAndListOutput._stack_list_output\" % BASE)\n @mock.patch(\"%s.CreateStackAndListOutput._create_stack\" % BASE)\n def test_create_and_list_output(self,\n mock__create_stack,\n mock__stack_list_output):\n stacks.CreateStackAndListOutput(self.context).run(\n template_path=self.default_template,\n parameters=self.default_parameters,\n files=self.default_files,\n environment=self.default_environment)\n\n mock__create_stack.assert_called_once_with(\n self.default_template, self.default_parameters,\n self.default_files, self.default_environment)\n mock__stack_list_output.assert_called_once_with(\n mock__create_stack.return_value)\n" }, { "alpha_fraction": 0.6678564548492432, "alphanum_fraction": 0.6718223094940186, "avg_line_length": 36.917293548583984, "blob_id": "cf0215946495f2fa84c35d8f9b7da37c2cbde671", "content_id": "7623058d94ac0b3027aa96c309b18741860aa482", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5043, "license_type": "permissive", "max_line_length": 79, "num_lines": 133, "path": "/rally_openstack/task/cleanup/base.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally.common import cfg\nfrom rally.task import utils\n\nCONF = cfg.CONF\n\ncleanup_group = cfg.OptGroup(name=\"cleanup\", title=\"Cleanup Options\")\n\n\n# NOTE(andreykurilin): There are cases when there is no way to use any kind\n# of \"name\" for resource as an identifier of alignment resource to the\n# particular task run and even to Rally itself. Previously, we used empty\n# strings as a workaround for name matching specific templates, but\n# theoretically such behaviour can hide other cases when resource should have\n# a name property, but it is missed.\n# Let's use instances of specific class to return as a name of resources\n# which do not have names at all.\nclass NoName(object):\n def __init__(self, resource_type):\n self.resource_type = resource_type\n\n def __repr__(self):\n return \"<NoName %s resource>\" % self.resource_type\n\n\ndef resource(service, resource, order=0, admin_required=False,\n perform_for_admin_only=False, tenant_resource=False,\n max_attempts=3, timeout=CONF.openstack.resource_deletion_timeout,\n interval=1, threads=CONF.openstack.cleanup_threads):\n \"\"\"Decorator that overrides resource specification.\n\n Just put it on top of your resource class and specify arguments that you\n need.\n\n :param service: It is equal to client name for corresponding service.\n E.g. \"nova\", \"cinder\" or \"zaqar\"\n :param resource: Client manager name for resource. E.g. in case of\n nova.servers you should write here \"servers\"\n :param order: Used to adjust priority of cleanup for different resource\n types\n :param admin_required: Admin user is required\n :param perform_for_admin_only: Perform cleanup for admin user only\n :param tenant_resource: Perform deletion only 1 time per tenant\n :param max_attempts: Max amount of attempts to delete single resource\n :param timeout: Max duration of deletion in seconds\n :param interval: Resource status pooling interval\n :param threads: Amount of threads (workers) that are deleting resources\n simultaneously\n \"\"\"\n\n def inner(cls):\n # TODO(boris-42): This can be written better I believe =)\n cls._service = service\n cls._resource = resource\n cls._order = order\n cls._admin_required = admin_required\n cls._perform_for_admin_only = perform_for_admin_only\n cls._max_attempts = max_attempts\n cls._timeout = timeout\n cls._interval = interval\n cls._threads = threads\n cls._tenant_resource = tenant_resource\n\n return cls\n\n return inner\n\n\n@resource(service=None, resource=None)\nclass ResourceManager(object):\n \"\"\"Base class for cleanup plugins for specific resources.\n\n You should use @resource decorator to specify major configuration of\n resource manager. Usually you should specify: service, resource and order.\n\n If project python client is very specific, you can override delete(),\n list() and is_deleted() methods to make them fit to your case.\n \"\"\"\n\n def __init__(self, resource=None, admin=None, user=None, tenant_uuid=None):\n self.admin = admin\n self.user = user\n self.raw_resource = resource\n self.tenant_uuid = tenant_uuid\n\n def _manager(self):\n client = self._admin_required and self.admin or self.user\n return getattr(getattr(client, self._service)(), self._resource)\n\n def id(self):\n \"\"\"Returns id of resource.\"\"\"\n return self.raw_resource.id\n\n def name(self):\n \"\"\"Returns name of resource.\"\"\"\n return self.raw_resource.name\n\n def is_deleted(self):\n \"\"\"Checks if the resource is deleted.\n\n Fetch resource by id from service and check it status.\n In case of NotFound or status is DELETED or DELETE_COMPLETE returns\n True, otherwise False.\n \"\"\"\n try:\n resource = self._manager().get(self.id())\n except Exception as e:\n return getattr(e, \"code\", getattr(e, \"http_status\", 400)) == 404\n\n return utils.get_status(resource) in (\"DELETED\", \"DELETE_COMPLETE\")\n\n def delete(self):\n \"\"\"Delete resource that corresponds to instance of this class.\"\"\"\n self._manager().delete(self.id())\n\n def list(self):\n \"\"\"List all resources specific for admin or user.\"\"\"\n return self._manager().list()\n" }, { "alpha_fraction": 0.6197004914283752, "alphanum_fraction": 0.6264501214027405, "avg_line_length": 41.33035659790039, "blob_id": "e93636f86d8f1ac443c27588156699f6e0e1a9f8", "content_id": "fe8013b8f71adef130057b60300bf31658b0361e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4741, "license_type": "permissive", "max_line_length": 79, "num_lines": 112, "path": "/rally_openstack/task/scenarios/ironic/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport string\n\nfrom rally.common import cfg\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass IronicScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Ironic scenarios with basic atomic actions.\"\"\"\n\n # NOTE(stpierre): Ironic has two name checkers. The new-style\n # checker, in API v1.10+, is quite relaxed and will Just Work with\n # the default random name pattern. (See\n # https://bugs.launchpad.net/ironic/+bug/1434376.) The old-style\n # checker *claims* to implement RFCs 952 and 1123, but it doesn't\n # actually. (See https://bugs.launchpad.net/ironic/+bug/1468508\n # for details.) The default RESOURCE_NAME_FORMAT works fine for\n # the new-style checker, but the old-style checker only allows\n # underscores after the first dot, for reasons that I'm sure are\n # entirely obvious, so we have to supply a bespoke format for\n # Ironic names.\n RESOURCE_NAME_FORMAT = \"s-rally-XXXXXXXX-XXXXXXXX\"\n RESOURCE_NAME_ALLOWED_CHARACTERS = string.ascii_lowercase + string.digits\n\n @atomic.action_timer(\"ironic.create_node\")\n def _create_node(self, driver, properties, **kwargs):\n \"\"\"Create node immediately.\n\n :param driver: The name of the driver used to manage this Node.\n :param properties: Key/value pair describing the physical\n characteristics of the node.\n :param kwargs: optional parameters to create image\n :returns: node object\n \"\"\"\n kwargs[\"name\"] = self.generate_random_name()\n node = self.admin_clients(\"ironic\").node.create(driver=driver,\n properties=properties,\n **kwargs)\n\n self.sleep_between(CONF.openstack.ironic_node_create_poll_interval)\n node = utils.wait_for_status(\n node,\n ready_statuses=[\"AVAILABLE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.ironic_node_create_timeout,\n check_interval=CONF.openstack.ironic_node_poll_interval,\n id_attr=\"uuid\", status_attr=\"provision_state\"\n )\n\n return node\n\n @atomic.action_timer(\"ironic.list_nodes\")\n def _list_nodes(self, associated=None, maintenance=None, detail=False,\n sort_dir=None):\n \"\"\"Return list of nodes.\n\n :param associated: Optional. Either a Boolean or a string\n representation of a Boolean that indicates whether\n to return a list of associated (True or \"True\") or\n unassociated (False or \"False\") nodes.\n :param maintenance: Optional. Either a Boolean or a string\n representation of a Boolean that indicates whether\n to return nodes in maintenance mode (True or\n \"True\"), or not in maintenance mode (False or\n \"False\").\n :param detail: Optional, boolean whether to return detailed information\n about nodes.\n :param sort_dir: Optional, direction of sorting, either 'asc' (the\n default) or 'desc'.\n :returns: A list of nodes.\n \"\"\"\n return self.admin_clients(\"ironic\").node.list(\n associated=associated, maintenance=maintenance, detail=detail,\n sort_dir=sort_dir)\n\n @atomic.action_timer(\"ironic.delete_node\")\n def _delete_node(self, node):\n \"\"\"Delete the node with specific id.\n\n :param node: Ironic node object\n \"\"\"\n self.admin_clients(\"ironic\").node.delete(node.uuid)\n\n utils.wait_for_status(\n node,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.ironic_node_delete_timeout,\n check_interval=CONF.openstack.ironic_node_poll_interval,\n id_attr=\"uuid\", status_attr=\"provision_state\"\n )\n" }, { "alpha_fraction": 0.6162087917327881, "alphanum_fraction": 0.6176938414573669, "avg_line_length": 35.538761138916016, "blob_id": "1068265683bbe7ba0b1e7cfb0c22fd0f27fe1220", "content_id": "a8420ad36c7180b3c496e1ccad3926ab53371d01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9427, "license_type": "permissive", "max_line_length": 79, "num_lines": 258, "path": "/tests/unit/common/services/identity/test_identity.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.common.services.identity import identity\nfrom tests.unit import test\n\n\[email protected]\nclass IdentityTestCase(test.TestCase):\n def setUp(self):\n super(IdentityTestCase, self).setUp()\n self.clients = mock.MagicMock()\n\n def get_service_with_fake_impl(self):\n path = \"rally_openstack.common.services.identity.identity\"\n with mock.patch(\"%s.Identity.discover_impl\" % path) as mock_discover:\n mock_discover.return_value = mock.MagicMock(), None\n service = identity.Identity(self.clients)\n return service\n\n def test_create_project(self):\n service = self.get_service_with_fake_impl()\n project_name = \"name\"\n domain_name = \"domain\"\n service.create_project(project_name, domain_name=domain_name)\n service._impl.create_project.assert_called_once_with(\n project_name, domain_name=domain_name)\n\n def test_update_project(self):\n service = self.get_service_with_fake_impl()\n\n project_id = \"id\"\n project_name = \"name\"\n description = \"descr\"\n enabled = False\n service.update_project(project_id=project_id, name=project_name,\n description=description, enabled=enabled)\n service._impl.update_project.assert_called_once_with(\n project_id, name=project_name, description=description,\n enabled=enabled)\n\n def test_delete_project(self):\n service = self.get_service_with_fake_impl()\n project = \"id\"\n service.delete_project(project)\n service._impl.delete_project.assert_called_once_with(project)\n\n def test_list_projects(self):\n service = self.get_service_with_fake_impl()\n service.list_projects()\n service._impl.list_projects.assert_called_once_with()\n\n def test_get_project(self):\n service = self.get_service_with_fake_impl()\n project = \"id\"\n service.get_project(project)\n service._impl.get_project.assert_called_once_with(project)\n\n def test_create_user(self):\n service = self.get_service_with_fake_impl()\n\n username = \"username\"\n password = \"password\"\n project_id = \"project_id\"\n domain_name = \"domain_name\"\n\n service.create_user(username=username, password=password,\n project_id=project_id, domain_name=domain_name)\n service._impl.create_user.assert_called_once_with(\n username=username, password=password, project_id=project_id,\n domain_name=domain_name, default_role=\"member\")\n\n def test_create_users(self):\n service = self.get_service_with_fake_impl()\n\n project_id = \"project_id\"\n n = 3\n user_create_args = {}\n\n service.create_users(project_id, number_of_users=n,\n user_create_args=user_create_args)\n service._impl.create_users.assert_called_once_with(\n project_id, number_of_users=n, user_create_args=user_create_args)\n\n def test_delete_user(self):\n service = self.get_service_with_fake_impl()\n user_id = \"fake_id\"\n service.delete_user(user_id)\n service._impl.delete_user.assert_called_once_with(user_id)\n\n def test_list_users(self):\n service = self.get_service_with_fake_impl()\n service.list_users()\n service._impl.list_users.assert_called_once_with()\n\n def test_update_user(self):\n service = self.get_service_with_fake_impl()\n\n user_id = \"id\"\n user_name = \"name\"\n email = \"mail\"\n password = \"pass\"\n enabled = False\n service.update_user(user_id, name=user_name, password=password,\n email=email, enabled=enabled)\n service._impl.update_user.assert_called_once_with(\n user_id, name=user_name, password=password, email=email,\n enabled=enabled)\n\n def test_get_user(self):\n service = self.get_service_with_fake_impl()\n user = \"id\"\n service.get_user(user)\n service._impl.get_user.assert_called_once_with(user)\n\n def test_create_service(self):\n service = self.get_service_with_fake_impl()\n\n service_name = \"name\"\n service_type = \"service_type\"\n description = \"descr\"\n service.create_service(service_name, service_type=service_type,\n description=description)\n service._impl.create_service.assert_called_once_with(\n name=service_name, service_type=service_type,\n description=description)\n\n def test_delete_service(self):\n service = self.get_service_with_fake_impl()\n service_id = \"id\"\n\n service.delete_service(service_id)\n service._impl.delete_service.assert_called_once_with(service_id)\n\n def test_list_services(self):\n service = self.get_service_with_fake_impl()\n service.list_services()\n service._impl.list_services.assert_called_once_with()\n\n def test_get_service(self):\n service = self.get_service_with_fake_impl()\n service_id = \"id\"\n service.get_service(service_id)\n service._impl.get_service.assert_called_once_with(service_id)\n\n def test_get_service_by_name(self):\n service = self.get_service_with_fake_impl()\n service_name = \"name\"\n service.get_service_by_name(service_name)\n service._impl.get_service_by_name.assert_called_once_with(service_name)\n\n def test_create_role(self):\n service = self.get_service_with_fake_impl()\n\n name = \"name\"\n service.create_role(name)\n service._impl.create_role.assert_called_once_with(\n name=name, domain_name=None)\n\n def test_add_role(self):\n service = self.get_service_with_fake_impl()\n\n role_id = \"id\"\n user_id = \"user_id\"\n project_id = \"project_id\"\n service.add_role(role_id, user_id=user_id, project_id=project_id)\n service._impl.add_role.assert_called_once_with(role_id=role_id,\n user_id=user_id,\n project_id=project_id)\n\n def test_delete_role(self):\n service = self.get_service_with_fake_impl()\n role = \"id\"\n service.delete_role(role)\n service._impl.delete_role.assert_called_once_with(role)\n\n def test_revoke_role(self):\n service = self.get_service_with_fake_impl()\n\n role_id = \"id\"\n user_id = \"user_id\"\n project_id = \"project_id\"\n\n service.revoke_role(role_id, user_id=user_id, project_id=project_id)\n\n service._impl.revoke_role.assert_called_once_with(\n role_id=role_id, user_id=user_id, project_id=project_id)\n\n @ddt.data((None, None, None), (\"user_id\", \"project_id\", \"domain\"))\n def test_list_roles(self, params):\n user, project, domain = params\n service = self.get_service_with_fake_impl()\n service.list_roles(user_id=user, project_id=project,\n domain_name=domain)\n service._impl.list_roles.assert_called_once_with(user_id=user,\n project_id=project,\n domain_name=domain)\n\n def test_get_role(self):\n service = self.get_service_with_fake_impl()\n role = \"id\"\n service.get_role(role)\n service._impl.get_role.assert_called_once_with(role)\n\n def test_create_ec2credentials(self):\n service = self.get_service_with_fake_impl()\n\n user_id = \"id\"\n project_id = \"project-id\"\n\n service.create_ec2credentials(user_id=user_id, project_id=project_id)\n service._impl.create_ec2credentials.assert_called_once_with(\n user_id=user_id, project_id=project_id)\n\n def test_list_ec2credentials(self):\n service = self.get_service_with_fake_impl()\n\n user_id = \"id\"\n\n service.list_ec2credentials(user_id=user_id)\n service._impl.list_ec2credentials.assert_called_once_with(user_id)\n\n def test_delete_ec2credential(self):\n service = self.get_service_with_fake_impl()\n\n user_id = \"id\"\n access = \"access\"\n\n service.delete_ec2credential(user_id=user_id, access=access)\n service._impl.delete_ec2credential.assert_called_once_with(\n user_id=user_id, access=access)\n\n def test_fetch_token(self):\n service = self.get_service_with_fake_impl()\n service.fetch_token()\n service._impl.fetch_token.assert_called_once_with()\n\n def test_validate_token(self):\n service = self.get_service_with_fake_impl()\n\n token = \"id\"\n service.validate_token(token)\n service._impl.validate_token.assert_called_once_with(token)\n" }, { "alpha_fraction": 0.6212534308433533, "alphanum_fraction": 0.6332424879074097, "avg_line_length": 41.67441940307617, "blob_id": "feeb0304bdcad5511ed09d0744145a6e0df240b6", "content_id": "4123fea863ce4c198eb59e4fb4c6d1281d48ca35", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "permissive", "max_line_length": 78, "num_lines": 43, "path": "/rally_openstack/common/cfg/sahara.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\n\nOPTS = {\"openstack\": [\n cfg.IntOpt(\"sahara_cluster_create_timeout\",\n default=1800,\n deprecated_group=\"benchmark\",\n help=\"A timeout in seconds for a cluster create operation\"),\n cfg.IntOpt(\"sahara_cluster_delete_timeout\",\n default=900,\n deprecated_group=\"benchmark\",\n help=\"A timeout in seconds for a cluster delete operation\"),\n cfg.IntOpt(\"sahara_cluster_check_interval\",\n default=5,\n deprecated_group=\"benchmark\",\n help=\"Cluster status polling interval in seconds\"),\n cfg.IntOpt(\"sahara_job_execution_timeout\",\n default=600,\n deprecated_group=\"benchmark\",\n help=\"A timeout in seconds for a Job Execution to complete\"),\n cfg.IntOpt(\"sahara_job_check_interval\",\n default=5,\n deprecated_group=\"benchmark\",\n help=\"Job Execution status polling interval in seconds\"),\n cfg.IntOpt(\"sahara_workers_per_proxy\",\n default=20,\n deprecated_group=\"benchmark\",\n help=\"Amount of workers one proxy should serve to.\")\n]}\n" }, { "alpha_fraction": 0.508474588394165, "alphanum_fraction": 0.5220618844032288, "avg_line_length": 39.33333206176758, "blob_id": "85797d91da30e76e46daf357f6c97fdf20c49e9e", "content_id": "0cca5327cea55468b8beb5d22e215b9c3d7ca77b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7139, "license_type": "permissive", "max_line_length": 81, "num_lines": 177, "path": "/tests/unit/task/contexts/network/test_network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport netaddr\n\nfrom rally_openstack.task.contexts.network import networks as network_context\nfrom tests.unit import test\n\nPATH = \"rally_openstack.task.contexts.network.networks\"\n\n\[email protected]\nclass NetworkTestCase(test.TestCase):\n def get_context(self, **kwargs):\n return {\"task\": {\"uuid\": \"foo_task\"},\n \"admin\": {\"credential\": \"foo_admin\"},\n \"config\": {\"network\": kwargs},\n \"users\": [{\"id\": \"foo_user\", \"tenant_id\": \"foo_tenant\",\n \"credential\": mock.MagicMock()},\n {\"id\": \"bar_user\", \"tenant_id\": \"bar_tenant\",\n \"credential\": mock.MagicMock()}],\n \"tenants\": {\"foo_tenant\": {\"networks\": [{\"id\": \"foo_net\"}]},\n \"bar_tenant\": {\"networks\": [{\"id\": \"bar_net\"}]}}}\n\n def test_default_start_cidr_is_valid(self):\n netaddr.IPNetwork(network_context.Network.DEFAULT_CONFIG[\"start_cidr\"])\n\n def test__init__default(self):\n context = network_context.Network(self.get_context())\n self.assertEqual(1, context.config[\"networks_per_tenant\"])\n self.assertEqual(network_context.Network.DEFAULT_CONFIG[\"start_cidr\"],\n context.config[\"start_cidr\"])\n\n def test__init__explicit(self):\n context = network_context.Network(\n self.get_context(start_cidr=\"foo_cidr\", networks_per_tenant=42,\n network_create_args={\"fakearg\": \"fake\"},\n dns_nameservers=[\"1.2.3.4\", \"5.6.7.8\"]))\n self.assertEqual(42, context.config[\"networks_per_tenant\"])\n self.assertEqual(\"foo_cidr\", context.config[\"start_cidr\"])\n self.assertEqual({\"fakearg\": \"fake\"},\n context.config[\"network_create_args\"])\n self.assertEqual((\"1.2.3.4\", \"5.6.7.8\"),\n context.config[\"dns_nameservers\"])\n\n def test_setup(self):\n ctx = self.get_context(networks_per_tenant=1,\n network_create_args={},\n subnets_per_network=2,\n dns_nameservers=None,\n external=True)\n user = ctx[\"users\"][0]\n nc = user[\"credential\"].clients.return_value.neutron.return_value\n network = {\"id\": \"net-id\", \"name\": \"s-1\"}\n subnets = [\n {\"id\": \"subnet1-id\", \"name\": \"subnet1-name\"},\n {\"id\": \"subnet2-id\", \"name\": \"subnet2-name\"}\n ]\n router = {\"id\": \"router\"}\n nc.create_network.return_value = {\"network\": network.copy()}\n nc.create_router.return_value = {\"router\": router.copy()}\n nc.create_subnet.side_effect = [{\"subnet\": s} for s in subnets]\n\n network_context.Network(ctx).setup()\n\n ctx_data = ctx[\"tenants\"][ctx[\"users\"][0][\"tenant_id\"]]\n self.assertEqual(\n [{\n \"id\": network[\"id\"],\n \"name\": network[\"name\"],\n \"router_id\": router[\"id\"],\n \"subnets\": [s[\"id\"] for s in subnets]\n }],\n ctx_data[\"networks\"]\n )\n\n nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY}})\n nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": mock.ANY}})\n self.assertEqual(\n [\n mock.call({\"subnet\": {\n \"name\": mock.ANY, \"network_id\": network[\"id\"],\n \"dns_nameservers\": mock.ANY,\n \"ip_version\": 4,\n \"cidr\": mock.ANY}})\n for i in range(2)],\n nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [\n mock.call(router[\"id\"], {\"subnet_id\": subnets[0][\"id\"]}),\n mock.call(router[\"id\"], {\"subnet_id\": subnets[1][\"id\"]})\n ],\n nc.add_interface_router.call_args_list\n )\n\n def test_setup_without_router(self):\n dns_nameservers = [\"1.2.3.4\", \"5.6.7.8\"]\n ctx = self.get_context(networks_per_tenant=1,\n network_create_args={},\n subnets_per_network=2,\n router=None,\n dns_nameservers=dns_nameservers)\n user = ctx[\"users\"][0]\n nc = user[\"credential\"].clients.return_value.neutron.return_value\n network = {\"id\": \"net-id\", \"name\": \"s-1\"}\n subnets = [\n {\"id\": \"subnet1-id\", \"name\": \"subnet1-name\"},\n {\"id\": \"subnet2-id\", \"name\": \"subnet2-name\"}\n ]\n router = {\"id\": \"router\"}\n nc.create_network.return_value = {\"network\": network.copy()}\n nc.create_router.return_value = {\"router\": router.copy()}\n nc.create_subnet.side_effect = [{\"subnet\": s} for s in subnets]\n\n network_context.Network(ctx).setup()\n\n ctx_data = ctx[\"tenants\"][ctx[\"users\"][0][\"tenant_id\"]]\n self.assertEqual(\n [{\n \"id\": network[\"id\"],\n \"name\": network[\"name\"],\n \"router_id\": None,\n \"subnets\": [s[\"id\"] for s in subnets]\n }],\n ctx_data[\"networks\"]\n )\n\n nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY}})\n self.assertEqual(\n [\n mock.call({\"subnet\": {\n \"name\": mock.ANY, \"network_id\": network[\"id\"],\n # rally.task.context.Context converts list to unchangeable\n # collection - tuple\n \"dns_nameservers\": tuple(dns_nameservers),\n \"ip_version\": 4,\n \"cidr\": mock.ANY}})\n for i in range(2)],\n nc.create_subnet.call_args_list\n )\n\n self.assertFalse(nc.create_router.called)\n self.assertFalse(nc.add_interface_router.called)\n\n @mock.patch(\"%s.resource_manager.cleanup\" % PATH)\n def test_cleanup(self, mock_cleanup):\n ctx = self.get_context()\n\n network_context.Network(ctx).cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"neutron.subnet\", \"neutron.network\", \"neutron.router\",\n \"neutron.port\"],\n superclass=network_context.Network,\n admin=ctx.get(\"admin\"),\n users=ctx.get(\"users\", []),\n task_id=ctx[\"task\"][\"uuid\"]\n )\n" }, { "alpha_fraction": 0.5000771880149841, "alphanum_fraction": 0.50614994764328, "avg_line_length": 34.85055160522461, "blob_id": "4772786a93d85e18ebd6756e9dd4f7f1422bf6bf", "content_id": "4bd1501d21ee4a4b537e0ee664e933fcb5280b34", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19431, "license_type": "permissive", "max_line_length": 79, "num_lines": 542, "path": "/tests/unit/task/scenarios/sahara/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_utils import uuidutils\nfrom saharaclient.api import base as sahara_base\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.scenarios.sahara import utils\nfrom tests.unit import test\n\n\nCONF = cfg.CONF\n\nSAHARA_UTILS = \"rally_openstack.task.scenarios.sahara.utils\"\n\n\nclass SaharaScenarioTestCase(test.ScenarioTestCase):\n # NOTE(stpierre): the Sahara utils generally do funny stuff with\n # wait_for() calls -- frequently the is_ready and\n # update_resource arguments are functions defined in the Sahara\n # utils themselves instead of the more standard resource_is() and\n # get_from_manager() calls. As a result, the tests below do more\n # integrated/functional testing of wait_for() calls, and we can't\n # just mock out wait_for and friends the way we usually do.\n patch_task_utils = False\n\n def setUp(self):\n super(SaharaScenarioTestCase, self).setUp()\n\n CONF.set_override(\"sahara_cluster_check_interval\", 0, \"openstack\")\n CONF.set_override(\"sahara_job_check_interval\", 0, \"openstack\")\n\n def test_list_node_group_templates(self):\n ngts = []\n self.clients(\"sahara\").node_group_templates.list.return_value = ngts\n\n scenario = utils.SaharaScenario(self.context)\n return_ngts_list = scenario._list_node_group_templates()\n\n self.assertEqual(ngts, return_ngts_list)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"sahara.list_node_group_templates\")\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"random_name\")\n @mock.patch(SAHARA_UTILS + \".sahara_consts\")\n def test_create_node_group_templates(\n self, mock_sahara_consts,\n mock_generate_random_name):\n\n scenario = utils.SaharaScenario(self.context)\n mock_processes = {\n \"test_plugin\": {\n \"test_version\": {\n \"master\": [\"p1\"],\n \"worker\": [\"p2\"]\n }\n }\n }\n\n mock_sahara_consts.NODE_PROCESSES = mock_processes\n\n scenario._create_master_node_group_template(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True\n )\n scenario._create_worker_node_group_template(\n flavor_id=\"test_flavor\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n use_autoconfig=True\n )\n\n create_calls = [\n mock.call(\n name=\"random_name\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n flavor_id=\"test_flavor\",\n node_processes=[\"p1\"],\n use_autoconfig=True),\n mock.call(\n name=\"random_name\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n flavor_id=\"test_flavor\",\n node_processes=[\"p2\"],\n use_autoconfig=True\n )]\n self.clients(\"sahara\").node_group_templates.create.assert_has_calls(\n create_calls)\n\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"sahara.create_master_node_group_template\")\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"sahara.create_worker_node_group_template\")\n\n def test_delete_node_group_templates(self):\n scenario = utils.SaharaScenario(self.context)\n ng = mock.MagicMock(id=42)\n\n scenario._delete_node_group_template(ng)\n\n delete_mock = self.clients(\"sahara\").node_group_templates.delete\n delete_mock.assert_called_once_with(42)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"sahara.delete_node_group_template\")\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"random_name\")\n @mock.patch(SAHARA_UTILS + \".sahara_consts\")\n def test_launch_cluster(self, mock_sahara_consts,\n mock_generate_random_name):\n\n self.context.update({\n \"tenant\": {\n \"networks\": [\n {\n \"id\": \"test_neutron_id\",\n \"router_id\": \"test_router_id\"\n }\n ]\n }\n })\n\n self.clients(\"services\").values.return_value = [\n consts.Service.NEUTRON\n ]\n\n scenario = utils.SaharaScenario(context=self.context)\n\n mock_processes = {\n \"test_plugin\": {\n \"test_version\": {\n \"master\": [\"p1\"],\n \"worker\": [\"p2\"]\n }\n }\n }\n\n mock_configs = {\n \"test_plugin\": {\n \"test_version\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n }\n }\n\n floating_ip_pool_uuid = uuidutils.generate_uuid()\n node_groups = [\n {\n \"name\": \"master-ng\",\n \"flavor_id\": \"test_flavor_m\",\n \"node_processes\": [\"p1\"],\n \"floating_ip_pool\": floating_ip_pool_uuid,\n \"count\": 1,\n \"auto_security_group\": True,\n \"security_groups\": [\"g1\", \"g2\"],\n \"node_configs\": {\"HDFS\": {\"local_config\": \"local_value\"}},\n \"use_autoconfig\": True,\n }, {\n \"name\": \"worker-ng\",\n \"flavor_id\": \"test_flavor_w\",\n \"node_processes\": [\"p2\"],\n \"floating_ip_pool\": floating_ip_pool_uuid,\n \"volumes_per_node\": 5,\n \"volumes_size\": 10,\n \"count\": 42,\n \"auto_security_group\": True,\n \"security_groups\": [\"g1\", \"g2\"],\n \"node_configs\": {\"HDFS\": {\"local_config\": \"local_value\"}},\n \"use_autoconfig\": True,\n }\n ]\n\n mock_sahara_consts.NODE_PROCESSES = mock_processes\n mock_sahara_consts.REPLICATION_CONFIGS = mock_configs\n\n self.clients(\"sahara\").clusters.create.return_value.id = (\n \"test_cluster_id\")\n\n self.clients(\"sahara\").clusters.get.return_value.status = (\n \"active\")\n\n scenario._launch_cluster(\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n image_id=\"test_image\",\n floating_ip_pool=floating_ip_pool_uuid,\n volumes_per_node=5,\n volumes_size=10,\n auto_security_group=True,\n security_groups=[\"g1\", \"g2\"],\n workers_count=42,\n node_configs={\"HDFS\": {\"local_config\": \"local_value\"}},\n use_autoconfig=True\n )\n\n self.clients(\"sahara\").clusters.create.assert_called_once_with(\n name=\"random_name\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n node_groups=node_groups,\n default_image_id=\"test_image\",\n cluster_configs={\"HDFS\": {\"dfs.replication\": 3}},\n net_id=\"test_neutron_id\",\n anti_affinity=None,\n use_autoconfig=True\n )\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"sahara.launch_cluster\")\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"random_name\")\n @mock.patch(SAHARA_UTILS + \".sahara_consts\")\n def test_launch_cluster_with_proxy(self, mock_sahara_consts,\n mock_generate_random_name):\n\n context = {\n \"tenant\": {\n \"networks\": [\n {\n \"id\": \"test_neutron_id\",\n \"router_id\": \"test_router_id\"\n }\n ]\n }\n }\n\n self.clients(\"services\").values.return_value = [\n consts.Service.NEUTRON\n ]\n\n scenario = utils.SaharaScenario(context=context)\n\n mock_processes = {\n \"test_plugin\": {\n \"test_version\": {\n \"master\": [\"p1\"],\n \"worker\": [\"p2\"]\n }\n }\n }\n\n mock_configs = {\n \"test_plugin\": {\n \"test_version\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n }\n }\n\n floating_ip_pool_uuid = uuidutils.generate_uuid()\n node_groups = [\n {\n \"name\": \"master-ng\",\n \"flavor_id\": \"test_flavor_m\",\n \"node_processes\": [\"p1\"],\n \"floating_ip_pool\": floating_ip_pool_uuid,\n \"count\": 1,\n \"auto_security_group\": True,\n \"security_groups\": [\"g1\", \"g2\"],\n \"node_configs\": {\"HDFS\": {\"local_config\": \"local_value\"}},\n \"is_proxy_gateway\": True,\n \"use_autoconfig\": True,\n }, {\n \"name\": \"worker-ng\",\n \"flavor_id\": \"test_flavor_w\",\n \"node_processes\": [\"p2\"],\n \"volumes_per_node\": 5,\n \"volumes_size\": 10,\n \"count\": 40,\n \"auto_security_group\": True,\n \"security_groups\": [\"g1\", \"g2\"],\n \"node_configs\": {\"HDFS\": {\"local_config\": \"local_value\"}},\n \"use_autoconfig\": True,\n }, {\n \"name\": \"proxy-ng\",\n \"flavor_id\": \"test_flavor_w\",\n \"node_processes\": [\"p2\"],\n \"floating_ip_pool\": floating_ip_pool_uuid,\n \"volumes_per_node\": 5,\n \"volumes_size\": 10,\n \"count\": 2,\n \"auto_security_group\": True,\n \"security_groups\": [\"g1\", \"g2\"],\n \"node_configs\": {\"HDFS\": {\"local_config\": \"local_value\"}},\n \"is_proxy_gateway\": True,\n \"use_autoconfig\": True,\n }\n ]\n\n mock_sahara_consts.NODE_PROCESSES = mock_processes\n mock_sahara_consts.REPLICATION_CONFIGS = mock_configs\n\n self.clients(\"sahara\").clusters.create.return_value = mock.MagicMock(\n id=\"test_cluster_id\")\n\n self.clients(\"sahara\").clusters.get.return_value = mock.MagicMock(\n status=\"active\")\n\n scenario._launch_cluster(\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n image_id=\"test_image\",\n floating_ip_pool=floating_ip_pool_uuid,\n volumes_per_node=5,\n volumes_size=10,\n auto_security_group=True,\n security_groups=[\"g1\", \"g2\"],\n workers_count=42,\n node_configs={\"HDFS\": {\"local_config\": \"local_value\"}},\n enable_proxy=True,\n use_autoconfig=True\n )\n\n self.clients(\"sahara\").clusters.create.assert_called_once_with(\n name=\"random_name\",\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n node_groups=node_groups,\n default_image_id=\"test_image\",\n cluster_configs={\"HDFS\": {\"dfs.replication\": 3}},\n net_id=\"test_neutron_id\",\n anti_affinity=None,\n use_autoconfig=True\n )\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"sahara.launch_cluster\")\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"random_name\")\n @mock.patch(SAHARA_UTILS + \".sahara_consts\")\n def test_launch_cluster_error(self, mock_sahara_consts,\n mock_generate_random_name):\n\n scenario = utils.SaharaScenario(self.context)\n mock_processes = {\n \"test_plugin\": {\n \"test_version\": {\n \"master\": [\"p1\"],\n \"worker\": [\"p2\"]\n }\n }\n }\n\n mock_configs = {\n \"test_plugin\": {\n \"test_version\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n }\n }\n\n mock_sahara_consts.NODE_PROCESSES = mock_processes\n mock_sahara_consts.REPLICATION_CONFIGS = mock_configs\n\n self.clients(\"sahara\").clusters.create.return_value = mock.MagicMock(\n id=\"test_cluster_id\")\n\n self.clients(\"sahara\").clusters.get.return_value = mock.MagicMock(\n status=\"error\")\n\n self.assertRaises(exceptions.GetResourceErrorStatus,\n scenario._launch_cluster,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n image_id=\"test_image\",\n floating_ip_pool=\"test_pool\",\n volumes_per_node=5,\n volumes_size=10,\n workers_count=42,\n node_configs={\"HDFS\": {\"local_config\":\n \"local_value\"}})\n\n def test_scale_cluster(self):\n scenario = utils.SaharaScenario(self.context)\n cluster = mock.MagicMock(id=42, node_groups=[{\n \"name\": \"random_master\",\n \"count\": 1\n }, {\n \"name\": \"random_worker\",\n \"count\": 41\n }])\n self.clients(\"sahara\").clusters.get.return_value = mock.MagicMock(\n id=42,\n status=\"active\")\n\n expected_scale_object = {\n \"resize_node_groups\": [{\n \"name\": \"random_worker\",\n \"count\": 42\n }]\n }\n\n scenario._scale_cluster(cluster, 1)\n self.clients(\"sahara\").clusters.scale.assert_called_once_with(\n 42, expected_scale_object)\n\n def test_delete_cluster(self):\n scenario = utils.SaharaScenario(self.context)\n cluster = mock.MagicMock(id=42)\n self.clients(\"sahara\").clusters.get.side_effect = [\n cluster, sahara_base.APIException()\n ]\n\n scenario._delete_cluster(cluster)\n delete_mock = self.clients(\"sahara\").clusters.delete\n delete_mock.assert_called_once_with(42)\n\n cl_get_expected = mock.call(42)\n self.clients(\"sahara\").clusters.get.assert_has_calls([cl_get_expected,\n cl_get_expected])\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"sahara.delete_cluster\")\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"42\")\n def test_create_output_ds(self, mock_generate_random_name):\n self.context.update({\n \"sahara\": {\n \"output_conf\": {\n \"output_type\": \"hdfs\",\n \"output_url_prefix\": \"hdfs://test_out/\"\n }\n }\n })\n\n scenario = utils.SaharaScenario(self.context)\n scenario._create_output_ds()\n\n self.clients(\"sahara\").data_sources.create.assert_called_once_with(\n name=\"42\",\n description=\"\",\n data_source_type=\"hdfs\",\n url=\"hdfs://test_out/42\"\n )\n\n @mock.patch(SAHARA_UTILS + \".SaharaScenario.generate_random_name\",\n return_value=\"42\")\n def test_create_output_ds_swift(self, mock_generate_random_name):\n self.context.update({\n \"sahara\": {\n \"output_conf\": {\n \"output_type\": \"swift\",\n \"output_url_prefix\": \"swift://test_out/\"\n }\n }\n })\n\n scenario = utils.SaharaScenario(self.context)\n self.assertRaises(exceptions.RallyException,\n scenario._create_output_ds)\n\n def test_run_job_execution(self):\n self.clients(\"sahara\").job_executions.get.side_effect = [\n mock.MagicMock(info={\"status\": \"pending\"}, id=\"42\"),\n mock.MagicMock(info={\"status\": \"SUCCESS\"}, id=\"42\")]\n\n self.clients(\"sahara\").job_executions.create.return_value = (\n mock.MagicMock(id=\"42\"))\n\n scenario = utils.SaharaScenario(self.context)\n scenario._run_job_execution(job_id=\"test_job_id\",\n cluster_id=\"test_cluster_id\",\n input_id=\"test_input_id\",\n output_id=\"test_output_id\",\n configs={\"k\": \"v\"},\n job_idx=0)\n\n self.clients(\"sahara\").job_executions.create.assert_called_once_with(\n job_id=\"test_job_id\",\n cluster_id=\"test_cluster_id\",\n input_id=\"test_input_id\",\n output_id=\"test_output_id\",\n configs={\"k\": \"v\"}\n )\n\n je_get_expected = mock.call(\"42\")\n self.clients(\"sahara\").job_executions.get.assert_has_calls(\n [je_get_expected, je_get_expected]\n )\n\n def test_run_job_execution_fail(self):\n self.clients(\"sahara\").job_executions.get.side_effect = [\n mock.MagicMock(info={\"status\": \"pending\"}, id=\"42\"),\n mock.MagicMock(info={\"status\": \"killed\"}, id=\"42\")]\n\n self.clients(\"sahara\").job_executions.create.return_value = (\n mock.MagicMock(id=\"42\"))\n\n scenario = utils.SaharaScenario(self.context)\n self.assertRaises(exceptions.RallyException,\n scenario._run_job_execution,\n job_id=\"test_job_id\",\n cluster_id=\"test_cluster_id\",\n input_id=\"test_input_id\",\n output_id=\"test_output_id\",\n configs={\"k\": \"v\"},\n job_idx=0)\n\n self.clients(\"sahara\").job_executions.create.assert_called_once_with(\n job_id=\"test_job_id\",\n cluster_id=\"test_cluster_id\",\n input_id=\"test_input_id\",\n output_id=\"test_output_id\",\n configs={\"k\": \"v\"}\n )\n" }, { "alpha_fraction": 0.5546615123748779, "alphanum_fraction": 0.5602023601531982, "avg_line_length": 38.684513092041016, "blob_id": "43b02e36d2dbd3e4eda64d5152fe2a3c7f0f7c33", "content_id": "796e39f26dabd7549390ff4317f8e36a52fbedaf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20755, "license_type": "permissive", "max_line_length": 79, "num_lines": 523, "path": "/tests/unit/task/contexts/keystone/test_users.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.common import credential as oscredential\nfrom rally_openstack.task.contexts.keystone import users\nfrom tests.unit import test\n\nfrom rally_openstack.common import consts\n\nCTX = \"rally_openstack.task.contexts.keystone.users\"\n\n\nclass UserGeneratorBaseTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(UserGeneratorBaseTestCase, self).setUp()\n self.osclients_patcher = mock.patch(\"%s.osclients\" % CTX)\n self.osclients = self.osclients_patcher.start()\n self.addCleanup(self.osclients_patcher.stop)\n\n self.deployment_uuid = \"deployment_id\"\n\n self.admin_cred = {\n \"username\": \"root\", \"password\": \"qwerty\",\n \"auth_url\": \"https://example.com\",\n \"project_domain_name\": \"foo\",\n \"user_domain_name\": \"bar\"}\n\n self.platforms = {\n \"openstack\": {\n \"admin\": self.admin_cred,\n \"users\": []\n }\n }\n\n self.context.update({\n \"config\": {\"users\": {}},\n \"env\": {\"platforms\": self.platforms},\n \"task\": {\"uuid\": \"task_id\",\n \"deployment_uuid\": self.deployment_uuid}\n })\n\n def test___init__for_new_users(self):\n self.context[\"config\"][\"users\"] = {\n \"tenants\": 1, \"users_per_tenant\": 1,\n \"resource_management_workers\": 1}\n\n user_generator = users.UserGenerator(self.context)\n\n self.assertEqual([], user_generator.existing_users)\n self.assertEqual(self.admin_cred[\"project_domain_name\"],\n user_generator.config[\"project_domain\"])\n self.assertEqual(self.admin_cred[\"user_domain_name\"],\n user_generator.config[\"user_domain\"])\n\n # the case #2 - existing users are presented in deployment but\n # the user forces to create new ones\n self.platforms[\"openstack\"][\"users\"] = [mock.Mock()]\n\n user_generator = users.UserGenerator(self.context)\n\n self.assertEqual([], user_generator.existing_users)\n self.assertEqual(self.admin_cred[\"project_domain_name\"],\n user_generator.config[\"project_domain\"])\n self.assertEqual(self.admin_cred[\"user_domain_name\"],\n user_generator.config[\"user_domain\"])\n\n def test___init__for_existing_users(self):\n foo_user = mock.Mock()\n\n self.platforms[\"openstack\"][\"users\"] = [foo_user]\n\n user_generator = users.UserGenerator(self.context)\n\n self.assertEqual([foo_user], user_generator.existing_users)\n self.assertEqual({\"user_choice_method\": \"random\"},\n user_generator.config)\n\n # the case #2: the config with `user_choice_method` option\n self.context[\"config\"][\"users\"] = {\"user_choice_method\": \"foo\"}\n\n user_generator = users.UserGenerator(self.context)\n\n self.assertEqual([foo_user], user_generator.existing_users)\n self.assertEqual({\"user_choice_method\": \"foo\"}, user_generator.config)\n\n def test_setup(self):\n user_generator = users.UserGenerator(self.context)\n user_generator.use_existing_users = mock.Mock()\n user_generator.create_users = mock.Mock()\n\n # no existing users -> new users should be created\n user_generator.existing_users = []\n\n user_generator.setup()\n\n user_generator.create_users.assert_called_once_with()\n self.assertFalse(user_generator.use_existing_users.called)\n\n user_generator.create_users.reset_mock()\n user_generator.use_existing_users.reset_mock()\n\n # existing_users is not empty -> existing users should be created\n user_generator.existing_users = [mock.Mock()]\n\n user_generator.setup()\n\n user_generator.use_existing_users.assert_called_once_with()\n self.assertFalse(user_generator.create_users.called)\n\n def test_cleanup(self):\n user_generator = users.UserGenerator(self.context)\n user_generator._remove_default_security_group = mock.Mock()\n user_generator._delete_users = mock.Mock()\n user_generator._delete_tenants = mock.Mock()\n\n # In case if existing users nothing should be done\n user_generator.existing_users = [mock.Mock]\n\n user_generator.cleanup()\n\n self.assertFalse(user_generator._remove_default_security_group.called)\n self.assertFalse(user_generator._delete_users.called)\n self.assertFalse(user_generator._delete_tenants.called)\n\n # In case when new users were created, the proper cleanup should be\n # performed\n user_generator.existing_users = []\n\n user_generator.cleanup()\n\n user_generator._remove_default_security_group.assert_called_once_with()\n user_generator._delete_users.assert_called_once_with()\n user_generator._delete_tenants.assert_called_once_with()\n\n\nclass UserGeneratorForExistingUsersTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(UserGeneratorForExistingUsersTestCase, self).setUp()\n self.osclients_patcher = mock.patch(\"%s.osclients\" % CTX)\n self.osclients = self.osclients_patcher.start()\n self.addCleanup(self.osclients_patcher.stop)\n\n self.deployment_uuid = \"deployment_id\"\n\n self.platforms = {\n \"openstack\": {\n \"admin\": {\"username\": \"root\",\n \"password\": \"qwerty\",\n \"auth_url\": \"https://example.com\"},\n \"users\": []\n }\n }\n self.context.update({\n \"config\": {\"users\": {}},\n \"users\": [],\n \"env\": {\"platforms\": self.platforms},\n \"task\": {\"uuid\": \"task_id\",\n \"deployment_uuid\": self.deployment_uuid}\n })\n\n @mock.patch(\"%s.credential.OpenStackCredential\" % CTX)\n @mock.patch(\"%s.osclients.Clients\" % CTX)\n def test_use_existing_users(self, mock_clients,\n mock_open_stack_credential):\n user1 = {\"tenant_name\": \"proj\", \"username\": \"usr\",\n \"password\": \"pswd\", \"auth_url\": \"https://example.com\"}\n user2 = {\"tenant_name\": \"proj\", \"username\": \"usr\",\n \"password\": \"pswd\", \"auth_url\": \"https://example.com\"}\n user3 = {\"tenant_name\": \"proj\", \"username\": \"usr\",\n \"password\": \"pswd\", \"auth_url\": \"https://example.com\"}\n\n user_list = [user1, user2, user3]\n\n class AuthRef(object):\n USER_ID_COUNT = 0\n PROJECT_ID_COUNT = 0\n\n @property\n def user_id(self):\n self.USER_ID_COUNT += 1\n return \"u%s\" % self.USER_ID_COUNT\n\n @property\n def project_id(self):\n self.PROJECT_ID_COUNT += 1\n return \"p%s\" % (self.PROJECT_ID_COUNT % 2)\n\n auth_ref = AuthRef()\n\n mock_clients.return_value.keystone.auth_ref = auth_ref\n\n self.platforms[\"openstack\"][\"users\"] = user_list\n\n user_generator = users.UserGenerator(self.context)\n user_generator.setup()\n\n self.assertIn(\"users\", self.context)\n self.assertIn(\"tenants\", self.context)\n self.assertIn(\"user_choice_method\", self.context)\n self.assertEqual(\"random\", self.context[\"user_choice_method\"])\n\n creds = mock_open_stack_credential.return_value\n self.assertEqual(\n [{\"id\": \"u1\", \"credential\": creds, \"tenant_id\": \"p1\"},\n {\"id\": \"u2\", \"credential\": creds, \"tenant_id\": \"p0\"},\n {\"id\": \"u3\", \"credential\": creds, \"tenant_id\": \"p1\"}],\n self.context[\"users\"]\n )\n self.assertEqual({\"p0\": {\"id\": \"p0\", \"name\": creds.tenant_name},\n \"p1\": {\"id\": \"p1\", \"name\": creds.tenant_name}},\n self.context[\"tenants\"])\n\n\nclass UserGeneratorForNewUsersTestCase(test.ScenarioTestCase):\n\n tenants_num = 1\n users_per_tenant = 5\n users_num = tenants_num * users_per_tenant\n threads = 10\n\n def setUp(self):\n super(UserGeneratorForNewUsersTestCase, self).setUp()\n self.osclients_patcher = mock.patch(\"%s.osclients\" % CTX)\n self.osclients = self.osclients_patcher.start()\n self.addCleanup(self.osclients_patcher.stop)\n\n # Force the case of creating new users\n self.platforms = {\n \"openstack\": {\n \"admin\": {\"username\": \"root\",\n \"password\": \"qwerty\",\n \"auth_url\": \"https://example.com\"},\n \"users\": []\n }\n }\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n \"resource_management_workers\": self.threads,\n }\n },\n \"env\": {\"platforms\": self.platforms},\n \"users\": [],\n \"task\": {\"uuid\": \"task_id\", \"deployment_uuid\": \"dep_uuid\"}\n })\n\n def test__remove_default_security_group(self):\n\n self.context.update(\n tenants={\n \"tenant-1\": {},\n \"tenant-2\": {}\n }\n )\n\n self.osclients.Clients.return_value = mock.Mock()\n neutron = self.osclients.Clients.return_value.neutron.return_value\n neutron.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"security-group\"}]}\n\n neutron.list_security_groups.return_value = {\n \"security_groups\": [\n {\"id\": \"id-1\", \"name\": \"default\", \"tenant_id\": \"tenant-1\"},\n {\"id\": \"id-2\", \"name\": \"default\", \"tenant_id\": \"tenant-2\"},\n {\"id\": \"id-3\", \"name\": \"default\", \"tenant_id\": \"tenant-3\"}\n ]\n }\n\n users.UserGenerator(self.context)._remove_default_security_group()\n\n neutron.list_security_groups.assert_called_once_with(name=\"default\")\n self.assertEqual(\n [mock.call(\"id-1\"), mock.call(\"id-2\")],\n neutron.delete_security_group.call_args_list\n )\n\n def test__remove_default_security_group_no_sg(self):\n self.context.update(\n tenants={\n \"tenant-1\": {},\n \"tenant-2\": {}\n }\n )\n\n self.osclients.Clients.return_value = mock.Mock()\n neutron = self.osclients.Clients.return_value.neutron.return_value\n neutron.list_extensions.return_value = {\"extensions\": []}\n\n neutron.list_security_groups.return_value = {\n \"security_groups\": [\n {\"id\": \"id-1\", \"name\": \"default\", \"tenant_id\": \"tenant-1\"},\n {\"id\": \"id-2\", \"name\": \"default\", \"tenant_id\": \"tenant-2\"},\n {\"id\": \"id-3\", \"name\": \"default\", \"tenant_id\": \"tenant-3\"}\n ]\n }\n\n users.UserGenerator(self.context)._remove_default_security_group()\n\n self.assertFalse(neutron.list_security_groups.called)\n self.assertFalse(neutron.delete_security_group.called)\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__create_tenants(self, mock_identity):\n self.context[\"config\"][\"users\"][\"tenants\"] = 1\n user_generator = users.UserGenerator(self.context)\n tenants = user_generator._create_tenants(1)\n self.assertEqual(1, len(tenants))\n id, tenant = tenants.popitem()\n self.assertIn(\"name\", tenant)\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__create_users(self, mock_identity):\n self.context[\"config\"][\"users\"][\"users_per_tenant\"] = 2\n user_generator = users.UserGenerator(self.context)\n user_generator.context[\"tenants\"] = {\"t1\": {\"id\": \"t1\", \"name\": \"t1\"},\n \"t2\": {\"id\": \"t2\", \"name\": \"t2\"}}\n users_ = user_generator._create_users(4)\n self.assertEqual(4, len(users_))\n for user in users_:\n self.assertIn(\"id\", user)\n self.assertIn(\"credential\", user)\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__create_users_user_password(self, mock_identity):\n self.context[\"config\"][\"users\"][\"users_per_tenant\"] = 2\n self.context[\"config\"][\"users\"][\"user_password\"] = \"TrustMe\"\n user_generator = users.UserGenerator(self.context)\n user_generator.context[\"tenants\"] = {\"t1\": {\"id\": \"t1\", \"name\": \"t1\"},\n \"t2\": {\"id\": \"t2\", \"name\": \"t2\"}}\n users_ = user_generator._create_users(4)\n self.assertEqual(4, len(users_))\n for user in users_:\n self.assertIn(\"id\", user)\n self.assertIn(\"credential\", user)\n self.assertEqual(\"TrustMe\", user[\"credential\"][\"password\"])\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__delete_tenants(self, mock_identity):\n user_generator = users.UserGenerator(self.context)\n user_generator.context[\"tenants\"] = {\"t1\": {\"id\": \"t1\", \"name\": \"t1\"},\n \"t2\": {\"id\": \"t2\", \"name\": \"t2\"}}\n user_generator._delete_tenants()\n self.assertEqual(0, len(user_generator.context[\"tenants\"]))\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__delete_tenants_failure(self, mock_identity):\n identity_service = mock_identity.Identity.return_value\n identity_service.delete_project.side_effect = Exception()\n user_generator = users.UserGenerator(self.context)\n user_generator.context[\"tenants\"] = {\"t1\": {\"id\": \"t1\", \"name\": \"t1\"},\n \"t2\": {\"id\": \"t2\", \"name\": \"t2\"}}\n user_generator._delete_tenants()\n self.assertEqual(0, len(user_generator.context[\"tenants\"]))\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__delete_users(self, mock_identity):\n user_generator = users.UserGenerator(self.context)\n user1 = mock.MagicMock()\n user2 = mock.MagicMock()\n user_generator.context[\"users\"] = [user1, user2]\n user_generator._delete_users()\n self.assertEqual(0, len(user_generator.context[\"users\"]))\n\n @mock.patch(\"%s.identity\" % CTX)\n def test__delete_users_failure(self, mock_identity):\n identity_service = mock_identity.Identity.return_value\n identity_service.delete_user.side_effect = Exception()\n user_generator = users.UserGenerator(self.context)\n user1 = mock.MagicMock()\n user2 = mock.MagicMock()\n user_generator.context[\"users\"] = [user1, user2]\n user_generator._delete_users()\n self.assertEqual(0, len(user_generator.context[\"users\"]))\n\n @mock.patch(\"%s.identity\" % CTX)\n def test_setup_and_cleanup(self, mock_identity):\n with users.UserGenerator(self.context) as ctx:\n\n ctx.setup()\n\n self.assertEqual(self.users_num,\n len(ctx.context[\"users\"]))\n self.assertEqual(self.tenants_num,\n len(ctx.context[\"tenants\"]))\n\n self.assertEqual(\"random\", ctx.context[\"user_choice_method\"])\n\n # Cleanup (called by content manager)\n self.assertEqual(0, len(ctx.context[\"users\"]))\n self.assertEqual(0, len(ctx.context[\"tenants\"]))\n\n @mock.patch(\"rally.common.broker.LOG.warning\")\n @mock.patch(\"%s.identity\" % CTX)\n def test_setup_and_cleanup_with_error_during_create_user(\n self, mock_identity, mock_log_warning):\n identity_service = mock_identity.Identity.return_value\n identity_service.create_user.side_effect = Exception()\n with users.UserGenerator(self.context) as ctx:\n self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)\n mock_log_warning.assert_called_with(\n \"Failed to consume a task from the queue: \")\n\n # Ensure that tenants get deleted anyway\n self.assertEqual(0, len(ctx.context[\"tenants\"]))\n\n @mock.patch(\"%s.identity\" % CTX)\n def test_users_and_tenants_in_context(self, mock_identity):\n identity_service = mock_identity.Identity.return_value\n\n credential = oscredential.OpenStackCredential(\n \"foo_url\", \"foo\", \"foo_pass\",\n https_insecure=True,\n https_cacert=\"cacert\")\n tmp_context = dict(self.context)\n tmp_context[\"config\"][\"users\"] = {\"tenants\": 1,\n \"users_per_tenant\": 2,\n \"resource_management_workers\": 1}\n tmp_context[\"env\"][\"platforms\"][\"openstack\"][\"admin\"] = credential\n\n credential_dict = credential.to_dict()\n user_list = [mock.MagicMock(id=\"id_%d\" % i)\n for i in range(self.users_num)]\n identity_service.create_user.side_effect = user_list\n\n with users.UserGenerator(tmp_context) as ctx:\n ctx.generate_random_name = mock.Mock()\n ctx.setup()\n\n create_tenant_calls = []\n for i, t in enumerate(ctx.context[\"tenants\"]):\n create_tenant_calls.append(\n mock.call(ctx.generate_random_name.return_value,\n ctx.config[\"project_domain\"]))\n\n for user in ctx.context[\"users\"]:\n self.assertEqual(set([\"id\", \"credential\", \"tenant_id\"]),\n set(user.keys()))\n\n user_credential_dict = user[\"credential\"].to_dict()\n\n excluded_keys = [\"auth_url\", \"username\", \"password\",\n \"tenant_name\", \"region_name\",\n \"project_domain_name\",\n \"user_domain_name\", \"permission\"]\n for key in (set(credential_dict.keys()) - set(excluded_keys)):\n self.assertEqual(credential_dict[key],\n user_credential_dict[key],\n \"The key '%s' differs.\" % key)\n\n tenants_ids = []\n for t in ctx.context[\"tenants\"].keys():\n tenants_ids.append(t)\n\n for (user, tenant_id, orig_user) in zip(ctx.context[\"users\"],\n tenants_ids, user_list):\n self.assertEqual(orig_user.id, user[\"id\"])\n self.assertEqual(tenant_id, user[\"tenant_id\"])\n\n @mock.patch(\"%s.identity\" % CTX)\n def test_users_contains_correct_endpoint_type(self, mock_identity):\n credential = oscredential.OpenStackCredential(\n \"foo_url\", \"foo\", \"foo_pass\",\n endpoint_type=consts.EndpointType.INTERNAL)\n config = {\n \"config\": {\n \"users\": {\n \"tenants\": 1,\n \"users_per_tenant\": 2,\n \"resource_management_workers\": 1\n }\n },\n \"env\": {\"platforms\": {\"openstack\": {\"admin\": credential,\n \"users\": []}}},\n \"task\": {\"uuid\": \"task_id\", \"deployment_uuid\": \"deployment_id\"}\n }\n\n user_generator = users.UserGenerator(config)\n users_ = user_generator._create_users(2)\n\n for user in users_:\n self.assertEqual(\"internal\", user[\"credential\"].endpoint_type)\n\n @mock.patch(\"%s.identity\" % CTX)\n def test_users_contains_default_endpoint_type(self, mock_identity):\n credential = oscredential.OpenStackCredential(\n \"foo_url\", \"foo\", \"foo_pass\")\n config = {\n \"config\": {\n \"users\": {\n \"tenants\": 1,\n \"users_per_tenant\": 2,\n \"resource_management_workers\": 1\n }\n },\n \"env\": {\"platforms\": {\"openstack\": {\"admin\": credential,\n \"users\": []}}},\n \"task\": {\"uuid\": \"task_id\", \"deployment_uuid\": \"deployment_id\"}\n }\n\n user_generator = users.UserGenerator(config)\n users_ = user_generator._create_users(2)\n\n for user in users_:\n self.assertEqual(\"public\", user[\"credential\"].endpoint_type)\n" }, { "alpha_fraction": 0.6008511781692505, "alphanum_fraction": 0.606069803237915, "avg_line_length": 40.377357482910156, "blob_id": "d873d6016addaeaedc810f06bccb1af1b6ecf071", "content_id": "0e48908a5c2966ce91426ca0bbd603eacab66ae4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19737, "license_type": "permissive", "max_line_length": 79, "num_lines": 477, "path": "/tests/unit/common/services/identity/test_keystone_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\nimport uuid\n\nimport ddt\n\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.identity import keystone_v2\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.services.identity.keystone_v2\"\n\n\[email protected]\nclass KeystoneV2ServiceTestCase(test.TestCase):\n def setUp(self):\n super(KeystoneV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.kc = self.clients.keystone.return_value\n self.name_generator = mock.MagicMock()\n self.service = keystone_v2.KeystoneV2Service(\n self.clients, name_generator=self.name_generator)\n\n def test_create_tenant(self):\n name = \"name\"\n tenant = self.service.create_tenant(name)\n\n self.assertEqual(tenant, self.kc.tenants.create.return_value)\n self.kc.tenants.create.assert_called_once_with(name)\n\n @ddt.data({\"tenant_id\": \"fake_id\", \"name\": True, \"enabled\": True,\n \"description\": True},\n {\"tenant_id\": \"fake_id\", \"name\": \"some\", \"enabled\": False,\n \"description\": \"descr\"})\n @ddt.unpack\n def test_update_tenant(self, tenant_id, name, enabled, description):\n\n self.name_generator.side_effect = (\"foo\", \"bar\")\n self.service.update_tenant(tenant_id,\n name=name,\n description=description,\n enabled=enabled)\n\n name = \"foo\" if name is True else name\n description = \"bar\" if description is True else description\n\n self.kc.tenants.update.assert_called_once_with(\n tenant_id, name=name, description=description, enabled=enabled)\n\n def test_delete_tenant(self):\n tenant_id = \"fake_id\"\n self.service.delete_tenant(tenant_id)\n self.kc.tenants.delete.assert_called_once_with(tenant_id)\n\n def test_list_tenants(self):\n self.assertEqual(self.kc.tenants.list.return_value,\n self.service.list_tenants())\n self.kc.tenants.list.assert_called_once_with()\n\n def test_get_tenant(self):\n tenant_id = \"fake_id\"\n self.service.get_tenant(tenant_id)\n self.kc.tenants.get.assert_called_once_with(tenant_id)\n\n def test_create_user(self):\n name = \"name\"\n password = \"passwd\"\n email = \"[email protected]\"\n tenant_id = \"project\"\n\n user = self.service.create_user(name, password=password, email=email,\n tenant_id=tenant_id)\n\n self.assertEqual(user, self.kc.users.create.return_value)\n self.kc.users.create.assert_called_once_with(\n name=name, password=password, email=email, tenant_id=tenant_id,\n enabled=True)\n\n def test_create_users(self):\n self.service.create_user = mock.MagicMock()\n\n n = 2\n tenant_id = \"some\"\n self.assertEqual([self.service.create_user.return_value] * n,\n self.service.create_users(number_of_users=n,\n tenant_id=tenant_id))\n self.assertEqual([mock.call(tenant_id=tenant_id)] * n,\n self.service.create_user.call_args_list)\n\n def test_update_user_with_wrong_params(self):\n user_id = \"fake_id\"\n card_with_cvv2 = \"1234 5678 9000 0000 : 666\"\n self.assertRaises(NotImplementedError, self.service.update_user,\n user_id, card_with_cvv2=card_with_cvv2)\n\n def test_update_user(self):\n user_id = \"fake_id\"\n name = \"new name\"\n email = \"[email protected]\"\n enabled = True\n self.service.update_user(user_id, name=name, email=email,\n enabled=enabled)\n self.kc.users.update.assert_called_once_with(\n user_id, name=name, email=email, enabled=enabled)\n\n def test_update_user_password(self):\n user_id = \"fake_id\"\n password = \"qwerty123\"\n self.service.update_user_password(user_id, password=password)\n self.kc.users.update_password.assert_called_once_with(\n user_id, password=password)\n\n @ddt.data({\"name\": None, \"service_type\": None, \"description\": None},\n {\"name\": \"some\", \"service_type\": \"st\", \"description\": \"d\"})\n @ddt.unpack\n def test_create_service(self, name, service_type, description):\n self.assertEqual(self.kc.services.create.return_value,\n self.service.create_service(name=name,\n service_type=service_type,\n description=description))\n name = name or self.name_generator.return_value\n service_type = service_type or \"rally_test_type\"\n description = description or self.name_generator.return_value\n self.kc.services.create.assert_called_once_with(\n name, service_type=service_type, description=description)\n\n def test_create_role(self):\n name = \"some\"\n self.service.create_role(name)\n self.kc.roles.create.assert_called_once_with(name)\n\n def test_add_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n tenant_id = \"tenant_id\"\n\n self.service.add_role(role_id, user_id=user_id, tenant_id=tenant_id)\n self.kc.roles.add_user_role.assert_called_once_with(\n user=user_id, role=role_id, tenant=tenant_id)\n\n def test_list_roles(self):\n self.assertEqual(self.kc.roles.list.return_value,\n self.service.list_roles())\n self.kc.roles.list.assert_called_once_with()\n\n def test_list_roles_for_user(self):\n user_id = \"user_id\"\n tenant_id = \"tenant_id\"\n self.assertEqual(self.kc.roles.roles_for_user.return_value,\n self.service.list_roles_for_user(user_id,\n tenant_id=tenant_id))\n self.kc.roles.roles_for_user.assert_called_once_with(user_id,\n tenant_id)\n\n def test_revoke_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n tenant_id = \"tenant_id\"\n\n self.service.revoke_role(role_id, user_id=user_id,\n tenant_id=tenant_id)\n\n self.kc.roles.remove_user_role.assert_called_once_with(\n user=user_id, role=role_id, tenant=tenant_id)\n\n def test_create_ec2credentials(self):\n user_id = \"fake_id\"\n tenant_id = \"fake_id\"\n\n self.assertEqual(self.kc.ec2.create.return_value,\n self.service.create_ec2credentials(\n user_id, tenant_id=tenant_id))\n self.kc.ec2.create.assert_called_once_with(user_id,\n tenant_id=tenant_id)\n\n\[email protected]\nclass UnifiedKeystoneV2ServiceTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedKeystoneV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = keystone_v2.UnifiedKeystoneV2Service(self.clients)\n self.service._impl = mock.MagicMock()\n\n def test_init_identity_service(self):\n self.clients.keystone.return_value.version = \"v2.0\"\n self.assertIsInstance(identity.Identity(self.clients)._impl,\n keystone_v2.UnifiedKeystoneV2Service)\n\n def test__check_domain(self):\n self.service._check_domain(\"Default\")\n self.service._check_domain(\"default\")\n self.assertRaises(NotImplementedError, self.service._check_domain,\n \"non-default\")\n\n def test__unify_tenant(self):\n class KeystoneV2Tenant(object):\n def __init__(self, domain_id=\"domain_id\"):\n self.id = str(uuid.uuid4())\n self.name = str(uuid.uuid4())\n self.domain_id = domain_id\n\n tenant = KeystoneV2Tenant()\n project = self.service._unify_tenant(tenant)\n self.assertIsInstance(project, identity.Project)\n self.assertEqual(tenant.id, project.id)\n self.assertEqual(tenant.name, project.name)\n self.assertEqual(\"default\", project.domain_id)\n self.assertNotEqual(tenant.domain_id, project.domain_id)\n\n def test__unify_user(self):\n class KeystoneV2User(object):\n def __init__(self, tenantId=None):\n self.id = str(uuid.uuid4())\n self.name = str(uuid.uuid4())\n if tenantId is not None:\n self.tenantId = tenantId\n\n user = KeystoneV2User()\n\n unified_user = self.service._unify_user(user)\n self.assertIsInstance(unified_user, identity.User)\n self.assertEqual(user.id, unified_user.id)\n self.assertEqual(user.name, unified_user.name)\n self.assertEqual(\"default\", unified_user.domain_id)\n self.assertIsNone(unified_user.project_id)\n\n tenant_id = \"tenant_id\"\n user = KeystoneV2User(tenantId=tenant_id)\n unified_user = self.service._unify_user(user)\n self.assertIsInstance(unified_user, identity.User)\n self.assertEqual(user.id, unified_user.id)\n self.assertEqual(user.name, unified_user.name)\n self.assertEqual(\"default\", unified_user.domain_id)\n self.assertEqual(tenant_id, unified_user.project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._check_domain\" % PATH)\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_tenant\" % PATH)\n def test_create_project(\n self, mock_unified_keystone_v2_service__unify_tenant,\n mock_unified_keystone_v2_service__check_domain):\n mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant\n mock_check_domain = mock_unified_keystone_v2_service__check_domain\n name = \"name\"\n\n self.assertEqual(mock_unify_tenant.return_value,\n self.service.create_project(name))\n mock_check_domain.assert_called_once_with(\"Default\")\n mock_unify_tenant.assert_called_once_with(\n self.service._impl.create_tenant.return_value)\n self.service._impl.create_tenant.assert_called_once_with(name)\n\n def test_update_project(self):\n tenant_id = \"fake_id\"\n name = \"name\"\n description = \"descr\"\n enabled = False\n\n self.service.update_project(project_id=tenant_id, name=name,\n description=description, enabled=enabled)\n self.service._impl.update_tenant.assert_called_once_with(\n tenant_id=tenant_id, name=name, description=description,\n enabled=enabled)\n\n def test_delete_project(self):\n tenant_id = \"fake_id\"\n self.service.delete_project(tenant_id)\n self.service._impl.delete_tenant.assert_called_once_with(tenant_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_tenant\" % PATH)\n def test_get_project(self,\n mock_unified_keystone_v2_service__unify_tenant):\n mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant\n tenant_id = \"id\"\n\n self.assertEqual(mock_unify_tenant.return_value,\n self.service.get_project(tenant_id))\n mock_unify_tenant.assert_called_once_with(\n self.service._impl.get_tenant.return_value)\n self.service._impl.get_tenant.assert_called_once_with(tenant_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_tenant\" % PATH)\n def test_list_projects(self,\n mock_unified_keystone_v2_service__unify_tenant):\n mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant\n\n tenants = [mock.MagicMock()]\n self.service._impl.list_tenants.return_value = tenants\n\n self.assertEqual([mock_unify_tenant.return_value],\n self.service.list_projects())\n mock_unify_tenant.assert_called_once_with(tenants[0])\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._check_domain\" % PATH)\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_user\" % PATH)\n def test_create_user(self, mock_unified_keystone_v2_service__unify_user,\n mock_unified_keystone_v2_service__check_domain):\n mock_check_domain = mock_unified_keystone_v2_service__check_domain\n mock_unify_user = mock_unified_keystone_v2_service__unify_user\n\n name = \"name\"\n password = \"passwd\"\n tenant_id = \"project\"\n\n self.assertEqual(mock_unify_user.return_value,\n self.service.create_user(name, password=password,\n project_id=tenant_id))\n mock_check_domain.assert_called_once_with(\"Default\")\n mock_unify_user.assert_called_once_with(\n self.service._impl.create_user.return_value)\n self.service._impl.create_user.assert_called_once_with(\n username=name, password=password, tenant_id=tenant_id,\n enabled=True)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._check_domain\" % PATH)\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_user\" % PATH)\n def test_create_users(self, mock_unified_keystone_v2_service__unify_user,\n mock_unified_keystone_v2_service__check_domain):\n mock_check_domain = mock_unified_keystone_v2_service__check_domain\n\n tenant_id = \"project\"\n n = 3\n domain_name = \"Default\"\n\n self.service.create_users(\n tenant_id, number_of_users=3,\n user_create_args={\"domain_name\": domain_name})\n mock_check_domain.assert_called_once_with(domain_name)\n self.service._impl.create_users.assert_called_once_with(\n tenant_id=tenant_id, number_of_users=n,\n user_create_args={\"domain_name\": domain_name})\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_user\" % PATH)\n def test_list_users(self, mock_unified_keystone_v2_service__unify_user):\n mock_unify_user = mock_unified_keystone_v2_service__unify_user\n\n users = [mock.MagicMock()]\n self.service._impl.list_users.return_value = users\n\n self.assertEqual([mock_unify_user.return_value],\n self.service.list_users())\n mock_unify_user.assert_called_once_with(users[0])\n\n @ddt.data({\"user_id\": \"id\", \"enabled\": False, \"name\": \"Fake\",\n \"email\": \"[email protected]\", \"password\": \"pass\"},\n {\"user_id\": \"id\", \"enabled\": None, \"name\": None,\n \"email\": None, \"password\": None})\n @ddt.unpack\n def test_update_user(self, user_id, enabled, name, email, password):\n self.service.update_user(user_id, enabled=enabled, name=name,\n email=email, password=password)\n if password:\n self.service._impl.update_user_password.assert_called_once_with(\n user_id=user_id, password=password)\n\n args = {}\n if enabled is not None:\n args[\"enabled\"] = enabled\n if name is not None:\n args[\"name\"] = name\n if email is not None:\n args[\"email\"] = email\n\n if args:\n self.service._impl.update_user.assert_called_once_with(\n user_id, **args)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_service\" % PATH)\n def test_list_services(self,\n mock_unified_keystone_v2_service__unify_service):\n mock_unify_service = mock_unified_keystone_v2_service__unify_service\n\n services = [mock.MagicMock()]\n self.service._impl.list_services.return_value = services\n\n self.assertEqual([mock_unify_service.return_value],\n self.service.list_services())\n mock_unify_service.assert_called_once_with(services[0])\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_role\" % PATH)\n def test_create_role(self, mock_unified_keystone_v2_service__unify_role):\n mock_unify_role = mock_unified_keystone_v2_service__unify_role\n name = \"some\"\n\n self.assertEqual(mock_unify_role.return_value,\n self.service.create_role(name))\n\n self.service._impl.create_role.assert_called_once_with(name)\n mock_unify_role.assert_called_once_with(\n self.service._impl.create_role.return_value)\n\n def test_add_role(self):\n\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"user_id\"\n\n self.service.add_role(role_id, user_id=user_id,\n project_id=project_id)\n\n self.service._impl.add_role.assert_called_once_with(\n user_id=user_id, role_id=role_id, tenant_id=project_id)\n\n def test_delete_role(self):\n role_id = \"fake_id\"\n self.service.delete_role(role_id)\n self.service._impl.delete_role.assert_called_once_with(role_id)\n\n def test_revoke_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"user_id\"\n\n self.service.revoke_role(role_id, user_id=user_id,\n project_id=project_id)\n\n self.service._impl.revoke_role.assert_called_once_with(\n user_id=user_id, role_id=role_id, tenant_id=project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV2Service._unify_role\" % PATH)\n def test_list_roles(self, mock_unified_keystone_v2_service__unify_role):\n mock_unify_role = mock_unified_keystone_v2_service__unify_role\n\n roles = [mock.MagicMock()]\n another_roles = [mock.MagicMock()]\n self.service._impl.list_roles.return_value = roles\n self.service._impl.list_roles_for_user.return_value = another_roles\n\n # case 1\n self.assertEqual([mock_unify_role.return_value],\n self.service.list_roles())\n self.service._impl.list_roles.assert_called_once_with()\n mock_unify_role.assert_called_once_with(roles[0])\n self.assertFalse(self.service._impl.list_roles_for_user.called)\n\n self.service._impl.list_roles.reset_mock()\n mock_unify_role.reset_mock()\n\n # case 2\n user = \"user\"\n project = \"project\"\n self.assertEqual([mock_unify_role.return_value],\n self.service.list_roles(user_id=user,\n project_id=project))\n self.service._impl.list_roles_for_user.assert_called_once_with(\n user, tenant_id=project)\n self.assertFalse(self.service._impl.list_roles.called)\n mock_unify_role.assert_called_once_with(another_roles[0])\n\n # case 3\n self.assertRaises(NotImplementedError, self.service.list_roles,\n domain_name=\"some\")\n\n def test_create_ec2credentials(self):\n user_id = \"id\"\n tenant_id = \"tenant-id\"\n\n self.assertEqual(self.service._impl.create_ec2credentials.return_value,\n self.service.create_ec2credentials(\n user_id=user_id, project_id=tenant_id))\n\n self.service._impl.create_ec2credentials.assert_called_once_with(\n user_id=user_id, tenant_id=tenant_id)\n" }, { "alpha_fraction": 0.6830122470855713, "alphanum_fraction": 0.6870986819267273, "avg_line_length": 34.6875, "blob_id": "f02c793fc2b4f2f996c0136b01a35f11bedf9ac4", "content_id": "e3436835ad25e658753c2b9395886ca2c1394d89", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1713, "license_type": "permissive", "max_line_length": 76, "num_lines": 48, "path": "/rally_openstack/task/contexts/network/existing_network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task import context\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"existing_network\", platform=\"openstack\", order=349)\nclass ExistingNetwork(context.OpenStackContext):\n \"\"\"This context supports using existing networks in Rally.\n\n This context should be used on a deployment with existing users.\n \"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"additionalProperties\": False\n }\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants():\n clients = osclients.Clients(user[\"credential\"])\n self.context[\"tenants\"][tenant_id][\"networks\"] = (\n clients.neutron().list_networks()[\"networks\"]\n )\n\n self.context[\"tenants\"][tenant_id][\"subnets\"] = (\n clients.neutron().list_subnets()[\"subnets\"]\n )\n\n def cleanup(self):\n \"\"\"Networks were not created by Rally, so nothing to do.\"\"\"\n" }, { "alpha_fraction": 0.6508753895759583, "alphanum_fraction": 0.6529351472854614, "avg_line_length": 45.238094329833984, "blob_id": "4ddc392656113b64ad184e0d42f938c4d7579c73", "content_id": "a9a79ec26c990221d10633b7671f6e56570a322b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3884, "license_type": "permissive", "max_line_length": 78, "num_lines": 84, "path": "/tests/unit/task/scenarios/watcher/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Servionica LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\nfrom rally_openstack.task.scenarios.watcher import utils\nfrom tests.unit import test\n\nCONF = cfg.CONF\n\n\nclass WatcherScenarioTestCase(test.ScenarioTestCase):\n\n def test_create_audit_template(self):\n watcher_scenario = utils.WatcherScenario(self.context)\n watcher_scenario.generate_random_name = mock.MagicMock(\n return_value=\"mock_name\")\n watcher_scenario._create_audit_template(\"fake_goal\", \"fake_strategy\")\n self.admin_clients(\n \"watcher\").audit_template.create.assert_called_once_with(\n goal=\"fake_goal\", strategy=\"fake_strategy\",\n name=\"mock_name\")\n self._test_atomic_action_timer(watcher_scenario.atomic_actions(),\n \"watcher.create_audit_template\")\n\n def test_list_audit_templates(self):\n audit_templates_list = []\n watcher_scenario = utils.WatcherScenario(self.context)\n self.admin_clients(\n \"watcher\").audit_template.list.return_value = audit_templates_list\n return_audit_templates_list = watcher_scenario._list_audit_templates()\n self.assertEqual(audit_templates_list, return_audit_templates_list)\n self._test_atomic_action_timer(watcher_scenario.atomic_actions(),\n \"watcher.list_audit_templates\")\n\n def test_delete_audit_template(self):\n watcher_scenario = utils.WatcherScenario(self.context)\n watcher_scenario._delete_audit_template(\"fake_audit_template\")\n self.admin_clients(\n \"watcher\").audit_template.delete.assert_called_once_with(\n \"fake_audit_template\")\n self._test_atomic_action_timer(watcher_scenario.atomic_actions(),\n \"watcher.delete_audit_template\")\n\n def test_create_audit(self):\n mock_audit_template = mock.Mock()\n watcher_scenario = utils.WatcherScenario(self.context)\n audit = watcher_scenario._create_audit(mock_audit_template)\n self.mock_wait_for_status.mock.assert_called_once_with(\n audit,\n ready_statuses=[\"SUCCEEDED\"],\n failure_statuses=[\"FAILED\"],\n status_attr=\"state\",\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.watcher_audit_launch_poll_interval,\n timeout=CONF.openstack.watcher_audit_launch_timeout,\n id_attr=\"uuid\")\n self.mock_get_from_manager.mock.assert_called_once_with()\n self.admin_clients(\"watcher\").audit.create.assert_called_once_with(\n audit_template_uuid=mock_audit_template, audit_type=\"ONESHOT\")\n self._test_atomic_action_timer(watcher_scenario.atomic_actions(),\n \"watcher.create_audit\")\n\n def test_delete_audit(self):\n mock_audit = mock.Mock()\n watcher_scenario = utils.WatcherScenario(self.context)\n watcher_scenario._delete_audit(mock_audit)\n self.admin_clients(\"watcher\").audit.delete.assert_called_once_with(\n mock_audit.uuid)\n self._test_atomic_action_timer(watcher_scenario.atomic_actions(),\n \"watcher.delete_audit\")\n" }, { "alpha_fraction": 0.5695837736129761, "alphanum_fraction": 0.5703959465026855, "avg_line_length": 40.80815124511719, "blob_id": "63fc37f5bd079d09054d8516b5bd75346bf918ca", "content_id": "885386df1512dbdd473ed10fd86efee072895398", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24625, "license_type": "permissive", "max_line_length": 79, "num_lines": 589, "path": "/rally_openstack/task/scenarios/sahara/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom oslo_utils import uuidutils\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import utils as rutils\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.sahara import consts as sahara_consts\n\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass SaharaScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Sahara scenarios with basic atomic actions.\"\"\"\n\n # NOTE(sskripnick): Some sahara resource names are validated as hostnames.\n # Since underscores are not allowed in hostnames we should not use them.\n RESOURCE_NAME_FORMAT = \"rally-sahara-XXXXXX-XXXXXXXXXXXXXXXX\"\n\n @atomic.action_timer(\"sahara.list_node_group_templates\")\n def _list_node_group_templates(self):\n \"\"\"Return user Node Group Templates list.\"\"\"\n return self.clients(\"sahara\").node_group_templates.list()\n\n @atomic.action_timer(\"sahara.create_master_node_group_template\")\n def _create_master_node_group_template(self, flavor_id, plugin_name,\n hadoop_version,\n use_autoconfig=True):\n \"\"\"Create a master Node Group Template with a random name.\n\n :param flavor_id: The required argument for the Template\n :param plugin_name: Sahara provisioning plugin name\n :param hadoop_version: The version of Hadoop distribution supported by\n the plugin\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n :returns: The created Template\n \"\"\"\n name = self.generate_random_name()\n\n return self.clients(\"sahara\").node_group_templates.create(\n name=name,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n flavor_id=flavor_id,\n node_processes=sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"master\"],\n use_autoconfig=use_autoconfig)\n\n @atomic.action_timer(\"sahara.create_worker_node_group_template\")\n def _create_worker_node_group_template(self, flavor_id, plugin_name,\n hadoop_version, use_autoconfig):\n \"\"\"Create a worker Node Group Template with a random name.\n\n :param flavor_id: The required argument for the Template\n :param plugin_name: Sahara provisioning plugin name\n :param hadoop_version: The version of Hadoop distribution supported by\n the plugin\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n :returns: The created Template\n \"\"\"\n name = self.generate_random_name()\n\n return self.clients(\"sahara\").node_group_templates.create(\n name=name,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n flavor_id=flavor_id,\n node_processes=sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"worker\"],\n use_autoconfig=use_autoconfig)\n\n @atomic.action_timer(\"sahara.delete_node_group_template\")\n def _delete_node_group_template(self, node_group):\n \"\"\"Delete a Node Group Template by id.\n\n :param node_group: The Node Group Template to be deleted\n \"\"\"\n self.clients(\"sahara\").node_group_templates.delete(node_group.id)\n\n def _wait_active(self, cluster_object):\n utils.wait_for_status(\n resource=cluster_object, ready_statuses=[\"active\"],\n failure_statuses=[\"error\"], update_resource=self._update_cluster,\n timeout=CONF.openstack.sahara_cluster_create_timeout,\n check_interval=CONF.openstack.sahara_cluster_check_interval)\n\n def _setup_neutron_floating_ip_pool(self, name_or_id):\n if name_or_id:\n if uuidutils.is_uuid_like(name_or_id):\n # Looks like an id is provided Return as is.\n return name_or_id\n else:\n # It's a name. Changing to id.\n for net in self.clients(\"neutron\").list_networks()[\"networks\"]:\n if net[\"name\"] == name_or_id:\n return net[\"id\"]\n # If the name is not found in the list. Exit with error.\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Could not resolve Floating IP Pool name %s to id\"\n % name_or_id)\n else:\n # Pool is not provided. Using the one set as GW for current router.\n\n net = self.context[\"tenant\"][\"networks\"][0]\n router_id = net[\"router_id\"]\n router = self.clients(\"neutron\").show_router(router_id)[\"router\"]\n net_id = router[\"external_gateway_info\"][\"network_id\"]\n\n return net_id\n\n def _setup_nova_floating_ip_pool(self, name):\n if name:\n # The name is provided returning it as is.\n return name\n else:\n # The name is not provided. Discovering\n LOG.debug(\"No Floating Ip Pool provided. Taking random.\")\n pools = self.clients(\"nova\").floating_ip_pools.list()\n\n if pools:\n return random.choice(pools).name\n else:\n LOG.warning(\"No Floating Ip Pools found. This may cause \"\n \"instances to be unreachable.\")\n return None\n\n def _setup_floating_ip_pool(self, node_groups, floating_ip_pool,\n enable_proxy):\n if consts.Service.NEUTRON in self.clients(\"services\").values():\n LOG.debug(\"Neutron detected as networking backend.\")\n floating_ip_pool_value = self._setup_neutron_floating_ip_pool(\n floating_ip_pool)\n else:\n LOG.debug(\"Nova Network detected as networking backend.\")\n floating_ip_pool_value = self._setup_nova_floating_ip_pool(\n floating_ip_pool)\n\n if floating_ip_pool_value:\n LOG.debug(\"Using floating ip pool %s.\" % floating_ip_pool_value)\n # If the pool is set by any means assign it to all node groups.\n # If the proxy node feature is enabled, Master Node Group and\n # Proxy Workers should have a floating ip pool set up\n\n if enable_proxy:\n proxy_groups = [x for x in node_groups\n if x[\"name\"] in (\"master-ng\", \"proxy-ng\")]\n for ng in proxy_groups:\n ng[\"is_proxy_gateway\"] = True\n ng[\"floating_ip_pool\"] = floating_ip_pool_value\n else:\n for ng in node_groups:\n ng[\"floating_ip_pool\"] = floating_ip_pool_value\n\n return node_groups\n\n def _setup_volumes(self, node_groups, volumes_per_node, volumes_size):\n if volumes_per_node:\n LOG.debug(\"Adding volumes config to Node Groups\")\n for ng in node_groups:\n ng_name = ng[\"name\"]\n if \"worker\" in ng_name or \"proxy\" in ng_name:\n # NOTE: Volume storage is used only by HDFS Datanode\n # process which runs on workers and proxies.\n\n ng[\"volumes_per_node\"] = volumes_per_node\n ng[\"volumes_size\"] = volumes_size\n\n return node_groups\n\n def _setup_security_groups(self, node_groups, auto_security_group,\n security_groups):\n if auto_security_group:\n LOG.debug(\"Auto security group enabled. Adding to Node Groups.\")\n if security_groups:\n LOG.debug(\"Adding provided Security Groups to Node Groups.\")\n\n for ng in node_groups:\n if auto_security_group:\n ng[\"auto_security_group\"] = auto_security_group\n if security_groups:\n ng[\"security_groups\"] = security_groups\n\n return node_groups\n\n def _setup_node_configs(self, node_groups, node_configs):\n if node_configs:\n LOG.debug(\"Adding Hadoop configs to Node Groups\")\n for ng in node_groups:\n ng[\"node_configs\"] = node_configs\n\n return node_groups\n\n def _setup_node_autoconfig(self, node_groups, node_autoconfig):\n LOG.debug(\"Adding auto-config par to Node Groups\")\n for ng in node_groups:\n ng[\"use_autoconfig\"] = node_autoconfig\n\n return node_groups\n\n def _setup_replication_config(self, hadoop_version, workers_count,\n plugin_name):\n replication_value = min(workers_count, 3)\n # 3 is a default Hadoop replication\n conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version]\n LOG.debug(\"Using replication factor: %s\" % replication_value)\n replication_config = {\n conf[\"target\"]: {\n conf[\"config_name\"]: replication_value\n }\n }\n return replication_config\n\n @logging.log_deprecated_args(\"`flavor_id` argument is deprecated. Use \"\n \"`master_flavor_id` and `worker_flavor_id` \"\n \"parameters.\", rally_version=\"0.2.0\",\n deprecated_args=[\"flavor_id\"])\n @atomic.action_timer(\"sahara.launch_cluster\")\n def _launch_cluster(self, plugin_name, hadoop_version, master_flavor_id,\n worker_flavor_id, image_id, workers_count,\n flavor_id=None,\n floating_ip_pool=None, volumes_per_node=None,\n volumes_size=None, auto_security_group=None,\n security_groups=None, node_configs=None,\n cluster_configs=None, enable_anti_affinity=False,\n enable_proxy=False,\n wait_active=True,\n use_autoconfig=True):\n \"\"\"Create a cluster and wait until it becomes Active.\n\n The cluster is created with two node groups. The master Node Group is\n created with one instance. The worker node group contains\n node_count - 1 instances.\n\n :param plugin_name: provisioning plugin name\n :param hadoop_version: Hadoop version supported by the plugin\n :param master_flavor_id: flavor which will be used to create master\n instance\n :param worker_flavor_id: flavor which will be used to create workers\n :param image_id: image id that will be used to boot instances\n :param workers_count: number of worker instances. All plugins will\n also add one Master instance and some plugins\n add a Manager instance.\n :param floating_ip_pool: floating ip pool name from which Floating\n IPs will be allocated\n :param volumes_per_node: number of Cinder volumes that will be\n attached to every cluster node\n :param volumes_size: size of each Cinder volume in GB\n :param auto_security_group: boolean value. If set to True Sahara will\n create a Security Group for each Node Group\n in the Cluster automatically.\n :param security_groups: list of security groups that will be used\n while creating VMs. If auto_security_group is\n set to True, this list can be left empty.\n :param node_configs: configs dict that will be passed to each Node\n Group\n :param cluster_configs: configs dict that will be passed to the\n Cluster\n :param enable_anti_affinity: If set to true the vms will be scheduled\n one per compute node.\n :param enable_proxy: Use Master Node of a Cluster as a Proxy node and\n do not assign floating ips to workers.\n :param wait_active: Wait until a Cluster gets int \"Active\" state\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n :returns: created cluster\n \"\"\"\n\n if enable_proxy:\n proxies_count = int(\n workers_count / CONF.openstack.sahara_workers_per_proxy)\n else:\n proxies_count = 0\n\n if flavor_id:\n # Note: the deprecated argument is used. Falling back to single\n # flavor behavior.\n master_flavor_id = flavor_id\n worker_flavor_id = flavor_id\n\n node_groups = [\n {\n \"name\": \"master-ng\",\n \"flavor_id\": master_flavor_id,\n \"node_processes\": sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"master\"],\n \"count\": 1\n }, {\n \"name\": \"worker-ng\",\n \"flavor_id\": worker_flavor_id,\n \"node_processes\": sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"worker\"],\n \"count\": workers_count - proxies_count\n }\n ]\n\n if proxies_count:\n node_groups.append({\n \"name\": \"proxy-ng\",\n \"flavor_id\": worker_flavor_id,\n \"node_processes\": sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"worker\"],\n \"count\": proxies_count\n })\n\n if \"manager\" in (sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version]):\n # Adding manager group separately as it is supported only in\n # specific configurations.\n\n node_groups.append({\n \"name\": \"manager-ng\",\n \"flavor_id\": master_flavor_id,\n \"node_processes\": sahara_consts.NODE_PROCESSES[plugin_name]\n [hadoop_version][\"manager\"],\n \"count\": 1\n })\n\n node_groups = self._setup_floating_ip_pool(node_groups,\n floating_ip_pool,\n enable_proxy)\n\n neutron_net_id = self._get_neutron_net_id()\n\n node_groups = self._setup_volumes(node_groups, volumes_per_node,\n volumes_size)\n\n node_groups = self._setup_security_groups(node_groups,\n auto_security_group,\n security_groups)\n\n node_groups = self._setup_node_configs(node_groups, node_configs)\n\n node_groups = self._setup_node_autoconfig(node_groups, use_autoconfig)\n\n replication_config = self._setup_replication_config(hadoop_version,\n workers_count,\n plugin_name)\n\n # The replication factor should be set for small clusters. However the\n # cluster_configs parameter can override it\n merged_cluster_configs = self._merge_configs(replication_config,\n cluster_configs)\n\n aa_processes = None\n if enable_anti_affinity:\n aa_processes = (sahara_consts.ANTI_AFFINITY_PROCESSES[plugin_name]\n [hadoop_version])\n\n name = self.generate_random_name()\n\n cluster_object = self.clients(\"sahara\").clusters.create(\n name=name,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n node_groups=node_groups,\n default_image_id=image_id,\n net_id=neutron_net_id,\n cluster_configs=merged_cluster_configs,\n anti_affinity=aa_processes,\n use_autoconfig=use_autoconfig\n )\n\n if wait_active:\n LOG.debug(\"Starting cluster `%s`\" % name)\n self._wait_active(cluster_object)\n\n return self.clients(\"sahara\").clusters.get(cluster_object.id)\n\n def _update_cluster(self, cluster):\n return self.clients(\"sahara\").clusters.get(cluster.id)\n\n def _scale_cluster(self, cluster, delta):\n \"\"\"The scaling helper.\n\n This method finds the worker node group in a cluster, builds a\n scale_object required by Sahara API and waits for the scaling to\n complete.\n\n NOTE: This method is not meant to be called directly in scenarios.\n There two specific scaling methods of up and down scaling which have\n different atomic timers.\n \"\"\"\n worker_node_group = [g for g in cluster.node_groups\n if \"worker\" in g[\"name\"]][0]\n scale_object = {\n \"resize_node_groups\": [\n {\n \"name\": worker_node_group[\"name\"],\n \"count\": worker_node_group[\"count\"] + delta\n }\n ]\n }\n self.clients(\"sahara\").clusters.scale(cluster.id, scale_object)\n\n self._wait_active(cluster)\n\n @atomic.action_timer(\"sahara.scale_up\")\n def _scale_cluster_up(self, cluster, delta):\n \"\"\"Add a given number of worker nodes to the cluster.\n\n :param cluster: The cluster to be scaled\n :param delta: The number of workers to be added. (A positive number is\n expected here)\n \"\"\"\n self._scale_cluster(cluster, delta)\n\n @atomic.action_timer(\"sahara.scale_down\")\n def _scale_cluster_down(self, cluster, delta):\n \"\"\"Remove a given number of worker nodes from the cluster.\n\n :param cluster: The cluster to be scaled\n :param delta: The number of workers to be removed. (A negative number\n is expected here)\n \"\"\"\n self._scale_cluster(cluster, delta)\n\n @atomic.action_timer(\"sahara.delete_cluster\")\n def _delete_cluster(self, cluster):\n \"\"\"Delete cluster.\n\n :param cluster: cluster to delete\n \"\"\"\n\n LOG.debug(\"Deleting cluster `%s`\" % cluster.name)\n self.clients(\"sahara\").clusters.delete(cluster.id)\n\n utils.wait_for(\n resource=cluster,\n timeout=CONF.openstack.sahara_cluster_delete_timeout,\n check_interval=CONF.openstack.sahara_cluster_check_interval,\n is_ready=self._is_cluster_deleted)\n\n def _is_cluster_deleted(self, cluster):\n from saharaclient.api import base as sahara_base\n\n LOG.debug(\"Checking cluster `%s` to be deleted. Status: `%s`\"\n % (cluster.name, cluster.status))\n try:\n self.clients(\"sahara\").clusters.get(cluster.id)\n return False\n except sahara_base.APIException:\n return True\n\n def _create_output_ds(self):\n \"\"\"Create an output Data Source based on EDP context\n\n :returns: The created Data Source\n \"\"\"\n ds_type = self.context[\"sahara\"][\"output_conf\"][\"output_type\"]\n url_prefix = self.context[\"sahara\"][\"output_conf\"][\"output_url_prefix\"]\n\n if ds_type == \"swift\":\n raise exceptions.RallyException(\n \"Swift Data Sources are not implemented yet\")\n\n url = url_prefix.rstrip(\"/\") + \"/%s\" % self.generate_random_name()\n\n return self.clients(\"sahara\").data_sources.create(\n name=self.generate_random_name(),\n description=\"\",\n data_source_type=ds_type,\n url=url)\n\n def _run_job_execution(self, job_id, cluster_id, input_id, output_id,\n configs, job_idx):\n \"\"\"Run a Job Execution and wait until it completes or fails.\n\n The Job Execution is accepted as successful when Oozie reports\n \"success\" or \"succeeded\" status. The failure statuses are \"failed\" and\n \"killed\".\n\n The timeout and the polling interval may be configured through\n \"sahara_job_execution_timeout\" and \"sahara_job_check_interval\"\n parameters under the \"benchmark\" section.\n\n :param job_id: The Job id that will be executed\n :param cluster_id: The Cluster id which will execute the Job\n :param input_id: The input Data Source id\n :param output_id: The output Data Source id\n :param configs: The config dict that will be passed as Job Execution's\n parameters.\n :param job_idx: The index of a job in a sequence\n\n \"\"\"\n @atomic.action_timer(\"sahara.job_execution_%s\" % job_idx)\n def run(self):\n job_execution = self.clients(\"sahara\").job_executions.create(\n job_id=job_id,\n cluster_id=cluster_id,\n input_id=input_id,\n output_id=output_id,\n configs=configs)\n\n utils.wait_for(\n resource=job_execution.id,\n is_ready=self._job_execution_is_finished,\n timeout=CONF.openstack.sahara_job_execution_timeout,\n check_interval=CONF.openstack.sahara_job_check_interval)\n\n run(self)\n\n def _job_execution_is_finished(self, je_id):\n status = self.clients(\"sahara\").job_executions.get(je_id).info[\n \"status\"].lower()\n\n LOG.debug(\"Checking for Job Execution %s to complete. Status: %s\"\n % (je_id, status))\n if status in (\"success\", \"succeeded\"):\n return True\n elif status in (\"failed\", \"killed\"):\n raise exceptions.RallyException(\n \"Job execution %s has failed\" % je_id)\n return False\n\n def _merge_configs(self, *configs):\n \"\"\"Merge configs in special format.\n\n It supports merging of configs in the following format:\n applicable_target -> config_name -> config_value\n\n \"\"\"\n result = {}\n for config_dict in configs:\n if config_dict:\n for a_target in config_dict:\n if a_target not in result or not result[a_target]:\n result[a_target] = {}\n result[a_target].update(config_dict[a_target])\n\n return result\n\n def _get_neutron_net_id(self):\n \"\"\"Get the Neutron Network id from context.\n\n If Nova Network is used as networking backend, None is returned.\n\n :returns: Network id for Neutron or None for Nova Networking.\n \"\"\"\n\n if consts.Service.NEUTRON not in self.clients(\"services\").values():\n return None\n\n # Taking net id from context.\n net = self.context[\"tenant\"][\"networks\"][0]\n neutron_net_id = net[\"id\"]\n LOG.debug(\"Using neutron network %s.\" % neutron_net_id)\n LOG.debug(\"Using neutron router %s.\" % net[\"router_id\"])\n\n return neutron_net_id\n\n\ndef init_sahara_context(context_instance):\n context_instance.context[\"sahara\"] = context_instance.context.get(\"sahara\",\n {})\n for user, tenant_id in rutils.iterate_per_tenants(\n context_instance.context[\"users\"]):\n context_instance.context[\"tenants\"][tenant_id][\"sahara\"] = (\n context_instance.context[\"tenants\"][tenant_id].get(\"sahara\", {}))\n" }, { "alpha_fraction": 0.6216468214988708, "alphanum_fraction": 0.6260788440704346, "avg_line_length": 35.64102554321289, "blob_id": "bed52f08e46cb4de2e4c9830c415979bbfcf07f0", "content_id": "4abf604e560e4c9414d7f7e1bf37b88a0b53b48d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4287, "license_type": "permissive", "max_line_length": 79, "num_lines": 117, "path": "/rally_openstack/task/scenarios/designate/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Author: Endre Karlson <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\n\nfrom rally_openstack.task import scenario\n\n\nclass DesignateScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Designate scenarios with basic atomic actions.\"\"\"\n\n # valid domain name cannot contain underscore characters\n # which are used in default autogenerated names\n RESOURCE_NAME_FORMAT = \"s-rally-XXXXXXXX-XXXXXXXX\"\n\n @atomic.action_timer(\"designate.create_zone\")\n def _create_zone(self, name=None, type_=None, email=None, description=None,\n ttl=None):\n \"\"\"Create zone.\n\n :param name: Zone name\n :param type_: Zone type, PRIMARY or SECONDARY\n :param email: Zone owner email\n :param description: Zone description\n :param ttl: Zone ttl - Time to live in seconds\n :returns: designate zone dict\n \"\"\"\n type_ = type_ or \"PRIMARY\"\n\n if type_ == \"PRIMARY\":\n email = email or \"[email protected]\"\n # Name is only useful to be random for PRIMARY\n name = name or \"%s.name.\" % self.generate_random_name()\n\n return self.clients(\"designate\", version=\"2\").zones.create(\n name=name,\n type_=type_,\n email=email,\n description=description,\n ttl=ttl\n )\n\n @atomic.action_timer(\"designate.list_zones\")\n def _list_zones(self, criterion=None, marker=None, limit=None):\n \"\"\"Return user zone list.\n\n :param criterion: API Criterion to filter by\n :param marker: UUID marker of the item to start the page from\n :param limit: How many items to return in the page.\n :returns: list of designate zones\n \"\"\"\n return self.clients(\"designate\", version=\"2\").zones.list()\n\n @atomic.action_timer(\"designate.delete_zone\")\n def _delete_zone(self, zone_id):\n \"\"\"Delete designate zone.\n\n :param zone_id: Zone ID\n \"\"\"\n self.clients(\"designate\", version=\"2\").zones.delete(zone_id)\n\n @atomic.action_timer(\"designate.list_recordsets\")\n def _list_recordsets(self, zone_id, criterion=None, marker=None,\n limit=None):\n \"\"\"List zone recordsets.\n\n :param zone_id: Zone ID\n :param criterion: API Criterion to filter by\n :param marker: UUID marker of the item to start the page from\n :param limit: How many items to return in the page.\n :returns: zone recordsets list\n \"\"\"\n return self.clients(\"designate\", version=\"2\").recordsets.list(\n zone_id, criterion=criterion, marker=marker, limit=limit)\n\n @atomic.action_timer(\"designate.create_recordset\")\n def _create_recordset(self, zone, recordset=None):\n \"\"\"Create a recordset in a zone.\n\n :param zone: zone dict\n :param recordset: recordset dict\n :returns: Designate recordset dict\n \"\"\"\n recordset = recordset or {}\n recordset.setdefault(\"type_\", recordset.pop(\"type\", \"A\"))\n if \"name\" not in recordset:\n recordset[\"name\"] = \"%s.%s\" % (self.generate_random_name(),\n zone[\"name\"])\n if \"records\" not in recordset:\n recordset[\"records\"] = [\"10.0.0.1\"]\n\n return self.clients(\"designate\", version=\"2\").recordsets.create(\n zone[\"id\"], **recordset)\n\n @atomic.action_timer(\"designate.delete_recordset\")\n def _delete_recordset(self, zone_id, recordset_id):\n \"\"\"Delete a zone recordset.\n\n :param zone_id: Zone ID\n :param recordset_id: Recordset ID\n \"\"\"\n\n self.clients(\"designate\", version=\"2\").recordsets.delete(\n zone_id, recordset_id)\n" }, { "alpha_fraction": 0.5888409614562988, "alphanum_fraction": 0.5937438607215881, "avg_line_length": 43.33913040161133, "blob_id": "a8d5dfffed625ef554880d6c58f849818a7221b8", "content_id": "58ea35ec4752ad9d25752ec652b9d0f778e71d1a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10198, "license_type": "permissive", "max_line_length": 79, "num_lines": 230, "path": "/rally_openstack/verification/tempest/config.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport configparser\nimport inspect\nimport io\nimport os\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.verification import utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nclass TempestConfigfileManager(object):\n \"\"\"Class to create a Tempest config file.\"\"\"\n\n def __init__(self, env):\n openstack_platform = env.data[\"platforms\"][\"openstack\"]\n self.credential = credential.OpenStackCredential(\n permission=consts.EndpointPermission.ADMIN,\n **openstack_platform[\"platform_data\"][\"admin\"])\n\n if not self.credential:\n raise exceptions.ValidationError(\n f\"Failed to configure 'tempest' for '{env} since \"\n \"admin credentials for OpenStack platform is missed there.\"\n )\n self.clients = self.credential.clients()\n self.available_services = self.clients.services().values()\n\n self.conf = configparser.ConfigParser(allow_no_value=True)\n self.conf.optionxform = str\n\n def _get_service_type_by_service_name(self, service_name):\n for s_type, s_name in self.clients.services().items():\n if s_name == service_name:\n return s_type\n\n def _configure_auth(self, section_name=\"auth\"):\n self.conf.set(section_name, \"admin_username\",\n self.credential.username)\n self.conf.set(section_name, \"admin_password\",\n self.credential.password)\n self.conf.set(section_name, \"admin_project_name\",\n self.credential.tenant_name)\n # Keystone v3 related parameter\n self.conf.set(section_name, \"admin_domain_name\",\n self.credential.user_domain_name or \"Default\")\n\n # Sahara has two service types: 'data_processing' and 'data-processing'.\n # 'data_processing' is deprecated, but it can be used in previous OpenStack\n # releases. So we need to configure the 'catalog_type' option to support\n # environments where 'data_processing' is used as service type for Sahara.\n def _configure_data_processing(self, section_name=\"data-processing\"):\n if \"sahara\" in self.available_services:\n self.conf.set(section_name, \"catalog_type\",\n self._get_service_type_by_service_name(\"sahara\"))\n\n def _configure_identity(self, section_name=\"identity\"):\n self.conf.set(section_name, \"region\",\n self.credential.region_name)\n # discover keystone versions\n\n def get_versions(auth_url):\n from keystoneauth1 import discover\n from keystoneauth1 import session\n\n temp_session = session.Session(\n verify=(self.credential.https_cacert\n or not self.credential.https_insecure),\n timeout=CONF.openstack_client_http_timeout)\n data = discover.Discover(temp_session, auth_url).version_data()\n return dict([(v[\"version\"][0], v[\"url\"]) for v in data])\n\n # check the original auth_url without cropping versioning to identify\n # the default version\n\n versions = get_versions(self.credential.auth_url)\n cropped_auth_url = self.clients.keystone._remove_url_version()\n if cropped_auth_url == self.credential.auth_url:\n # the given auth_url doesn't contain version\n if set(versions.keys()) == {2, 3}:\n # ok, both versions of keystone are enabled, we can take urls\n # there\n uri = versions[2]\n uri_v3 = versions[3]\n target_version = 3\n elif set(versions.keys()) == {2} or set(versions.keys()) == {3}:\n # only one version is available while discovering\n\n # get the most recent version\n target_version = sorted(versions.keys())[-1]\n if target_version == 2:\n uri = versions[2]\n uri_v3 = os.path.join(cropped_auth_url, \"v3\")\n else:\n # keystone v2 is disabled. let's do it explicitly\n self.conf.set(\"identity-feature-enabled\", \"api_v2\",\n \"False\")\n uri_v3 = versions[3]\n uri = os.path.join(cropped_auth_url, \"v2.0\")\n else:\n # Does Keystone released new version of API ?!\n LOG.debug(\"Discovered keystone versions: %s\" % versions)\n raise exceptions.RallyException(\"Failed to discover keystone \"\n \"auth urls.\")\n\n else:\n if self.credential.auth_url.rstrip(\"/\").endswith(\"v2.0\"):\n uri = self.credential.auth_url\n uri_v3 = uri.replace(\"/v2.0\", \"/v3\")\n target_version = 2\n else:\n uri_v3 = self.credential.auth_url\n uri = uri_v3.replace(\"/v3\", \"/v2.0\")\n target_version = 3\n\n self.conf.set(section_name, \"auth_version\", \"v%s\" % target_version)\n self.conf.set(section_name, \"uri\", uri)\n self.conf.set(section_name, \"uri_v3\", uri_v3)\n if self.credential.endpoint_type:\n self.conf.set(section_name, \"v2_endpoint_type\",\n self.credential.endpoint_type)\n self.conf.set(section_name, \"v3_endpoint_type\",\n self.credential.endpoint_type)\n\n self.conf.set(section_name, \"disable_ssl_certificate_validation\",\n str(self.credential.https_insecure))\n self.conf.set(section_name, \"ca_certificates_file\",\n self.credential.https_cacert)\n\n # The compute section is configured in context class for Tempest resources.\n # Options which are configured there: 'image_ref', 'image_ref_alt',\n # 'flavor_ref', 'flavor_ref_alt'.\n\n def _configure_network(self, section_name=\"network\"):\n if \"neutron\" in self.available_services:\n neutronclient = self.clients.neutron()\n public_nets = [\n net for net in neutronclient.list_networks()[\"networks\"]\n if net[\"status\"] == \"ACTIVE\" and net[\"router:external\"] is True\n ]\n if public_nets:\n net_id = public_nets[0][\"id\"]\n net_name = public_nets[0][\"name\"]\n self.conf.set(section_name, \"public_network_id\", net_id)\n self.conf.set(section_name, \"floating_network_name\", net_name)\n else:\n novaclient = self.clients.nova()\n net_name = next(net.human_id for net in novaclient.networks.list()\n if net.human_id is not None)\n self.conf.set(\"compute\", \"fixed_network_name\", net_name)\n self.conf.set(\"validation\", \"network_for_ssh\", net_name)\n\n def _configure_network_feature_enabled(\n self, section_name=\"network-feature-enabled\"):\n if \"neutron\" in self.available_services:\n neutronclient = self.clients.neutron()\n extensions = neutronclient.list_ext(\"extensions\", \"/extensions\",\n retrieve_all=True)\n aliases = [ext[\"alias\"] for ext in extensions[\"extensions\"]]\n aliases_str = \",\".join(aliases)\n self.conf.set(section_name, \"api_extensions\", aliases_str)\n\n def _configure_object_storage(self, section_name=\"object-storage\"):\n self.conf.set(section_name, \"operator_role\",\n CONF.openstack.swift_operator_role)\n self.conf.set(section_name, \"reseller_admin_role\",\n CONF.openstack.swift_reseller_admin_role)\n\n def _configure_service_available(self, section_name=\"service_available\"):\n services = [\"cinder\", \"glance\", \"heat\", \"ironic\", \"neutron\", \"nova\",\n \"sahara\", \"swift\"]\n for service in services:\n # Convert boolean to string because ConfigParser fails\n # on attempt to get option with boolean value\n self.conf.set(section_name, service,\n str(service in self.available_services))\n\n def _configure_validation(self, section_name=\"validation\"):\n if \"neutron\" in self.available_services:\n self.conf.set(section_name, \"connect_method\", \"floating\")\n else:\n self.conf.set(section_name, \"connect_method\", \"fixed\")\n\n def _configure_orchestration(self, section_name=\"orchestration\"):\n self.conf.set(section_name, \"stack_owner_role\",\n CONF.openstack.heat_stack_owner_role)\n self.conf.set(section_name, \"stack_user_role\",\n CONF.openstack.heat_stack_user_role)\n\n def create(self, conf_path, extra_options=None):\n self.conf.read(os.path.join(os.path.dirname(__file__), \"config.ini\"))\n\n for name, method in inspect.getmembers(self, inspect.ismethod):\n if name.startswith(\"_configure_\"):\n method()\n\n if extra_options:\n utils.add_extra_options(extra_options, self.conf)\n\n with open(conf_path, \"w\") as configfile:\n self.conf.write(configfile)\n\n raw_conf = io.StringIO()\n raw_conf.write(\"# Some empty values of options will be replaced while \"\n \"creating required resources (images, flavors, etc).\\n\")\n self.conf.write(raw_conf)\n\n return raw_conf.getvalue()\n" }, { "alpha_fraction": 0.6165368556976318, "alphanum_fraction": 0.6193329095840454, "avg_line_length": 40.380165100097656, "blob_id": "c4a3377659f37d5d742914f0e361e48a975cf658", "content_id": "11415324e8c127db938127763dc4034f615fdd01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5007, "license_type": "permissive", "max_line_length": 78, "num_lines": 121, "path": "/tests/unit/common/services/image/test_image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.common.services.image import glance_v1\nfrom rally_openstack.common.services.image import glance_v2\nfrom rally_openstack.common.services.image import image\nfrom tests.unit import test\n\n\[email protected]\nclass ImageTestCase(test.TestCase):\n\n def setUp(self):\n super(ImageTestCase, self).setUp()\n self.clients = mock.MagicMock()\n\n def get_service_with_fake_impl(self):\n path = \"rally_openstack.common.services.image.image\"\n with mock.patch(\"%s.Image.discover_impl\" % path) as mock_discover:\n mock_discover.return_value = mock.MagicMock(), None\n service = image.Image(self.clients)\n return service\n\n @ddt.data((\"image_name\", \"container_format\", \"image_location\",\n \"disk_format\", \"visibility\", \"min_disk\", \"min_ram\"))\n def test_create_image(self, params):\n (image_name, container_format, image_location, disk_format,\n visibility, min_disk, min_ram) = params\n service = self.get_service_with_fake_impl()\n properties = {\"fakeprop\": \"fake\"}\n\n service.create_image(image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n\n service._impl.create_image.assert_called_once_with(\n image_name=image_name, container_format=container_format,\n image_location=image_location, disk_format=disk_format,\n visibility=visibility, min_disk=min_disk, min_ram=min_ram,\n properties=properties)\n\n @ddt.data((\"image_id\", \"image_name\", \"min_disk\", \"min_ram\",\n \"remove_props\"))\n def test_update_image(self, params):\n (image_id, image_name, min_disk, min_ram, remove_props) = params\n service = self.get_service_with_fake_impl()\n service.update_image(image_id,\n image_name=image_name,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n service._impl.update_image.assert_called_once_with(\n image_id, image_name=image_name, min_disk=min_disk,\n min_ram=min_ram, remove_props=remove_props)\n\n @ddt.data(\"image_id\")\n def test_get_image(self, param):\n image_id = param\n service = self.get_service_with_fake_impl()\n service.get_image(image=image_id)\n service._impl.get_image.assert_called_once_with(image_id)\n\n @ddt.data((\"status\", \"visibility\", \"owner\"))\n def test_list_images(self, params):\n status, visibility, owner = params\n service = self.get_service_with_fake_impl()\n service.list_images(status=status, visibility=visibility, owner=owner)\n service._impl.list_images.assert_called_once_with(\n status=status, visibility=visibility, owner=owner)\n\n @ddt.data((\"image_id\", \"visibility\"))\n def test_set_visibility(self, params):\n image_id, visibility = params\n service = self.get_service_with_fake_impl()\n service.set_visibility(image_id=image_id, visibility=visibility)\n service._impl.set_visibility.assert_called_once_with(\n image_id, visibility=visibility)\n\n def test_delete_image(self):\n image_id = \"image_id\"\n service = self.get_service_with_fake_impl()\n service.delete_image(image_id=image_id)\n service._impl.delete_image.assert_called_once_with(image_id)\n\n def test_download_image(self):\n image_id = \"image_id\"\n service = self.get_service_with_fake_impl()\n service.download_image(image=image_id, do_checksum=True)\n service._impl.download_image.assert_called_once_with(image_id,\n do_checksum=True)\n\n def test_is_applicable(self):\n clients = mock.Mock()\n\n clients.glance().version = \"1.0\"\n self.assertTrue(\n glance_v1.UnifiedGlanceV1Service.is_applicable(clients))\n\n clients.glance().version = \"2.0\"\n self.assertTrue(\n glance_v2.UnifiedGlanceV2Service.is_applicable(clients))\n" }, { "alpha_fraction": 0.6655359268188477, "alphanum_fraction": 0.6662144064903259, "avg_line_length": 25.799999237060547, "blob_id": "99c81d15b03b34b957c4cb85acaa4bd326799d0f", "content_id": "e3579de14a3a12b57fe9de7cb10a8c87cdf49bf7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1474, "license_type": "permissive", "max_line_length": 131, "num_lines": 55, "path": "/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "===============\nrally-openstack\n===============\n\nRally plugins for `OpenStack platform <https://openstack.org>`_\n\n\nUsage\n-----\n\n.. code-block:: bash\n\n # Install this package (will install rally if not installed)\n pip install rally-openstack\n\n # List all installed plugins\n rally plugin list --platform openstack\n\n # Create OpenStack Env\n\n cat <<EOT >> env.yaml\n ---\n openstack:\n auth_url: \"https://keystone.net/identity\"\n region_name: RegionOne\n https_insecure: False\n users:\n - username: user_that_runs_commands\n password: his password\n project_name: project_that_users_belong_to\n EOT\n\n rally env create --name my_openstack --spec env.yaml\n\n # Check that you provide correct credentials\n rally env check\n\n # Collect key Open Stack metrics\n rally task start ./tasks/openstack_metrics/task.yaml --task-args {\"image_name\": \"image_to_use\", \"flavor_name\": \"flavor_to_use\"}\n\n # Generate Report\n rally task report --out report.html\n\n\nLinks\n----------------------\n\n* Free software: Apache license\n* Documentation: https://rally.readthedocs.org/en/latest/\n* Source: https://opendev.org/openstack/rally-openstack/\n* Bugs: https://bugs.launchpad.net/rally\n* Step-by-step tutorial: https://rally.readthedocs.io/en/latest/quick_start/tutorial.html\n* Launchpad page: https://launchpad.net/rally\n* Gitter chat: https://gitter.im/rally-dev/Lobby\n* Trello board: https://trello.com/b/DoD8aeZy/rally\n" }, { "alpha_fraction": 0.6203066110610962, "alphanum_fraction": 0.6240835189819336, "avg_line_length": 42.69902801513672, "blob_id": "5fb7c37c69df7a0808fb55fb3ee53f7ff5cbefbd", "content_id": "614f5cc674589d5ac836f58d1c9398a5239d6585", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4501, "license_type": "permissive", "max_line_length": 78, "num_lines": 103, "path": "/tests/unit/task/contexts/test_api_versions.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import utils\nfrom rally import exceptions\nfrom rally.task import context\n\nfrom rally_openstack.task.contexts import api_versions\nfrom tests.unit import test\n\n\[email protected]\nclass OpenStackServicesTestCase(test.TestCase):\n\n def setUp(self):\n super(OpenStackServicesTestCase, self).setUp()\n self.mock_clients = mock.patch(\n \"rally_openstack.common.osclients.Clients\").start()\n osclient_kc = self.mock_clients.return_value.keystone\n self.mock_kc = osclient_kc.return_value\n self.service_catalog = osclient_kc.service_catalog\n self.service_catalog.get_endpoints.return_value = []\n self.mock_kc.services.list.return_value = []\n\n @ddt.data(({\"nova\": {\"service_type\": \"compute\", \"version\": 2},\n \"cinder\": {\"service_name\": \"cinderv2\", \"version\": 2},\n \"neutron\": {\"service_type\": \"network\"},\n \"glance\": {\"service_name\": \"glance\"},\n \"heat\": {\"version\": 1}}, True),\n ({\"nova\": {\"service_type\": \"compute\",\n \"service_name\": \"nova\"}}, False),\n ({\"keystone\": {\"service_type\": \"foo\"}}, False),\n ({\"nova\": {\"version\": \"foo\"}}, False),\n ({}, False))\n @ddt.unpack\n def test_validate(self, config, valid):\n results = context.Context.validate(\"api_versions\", None, None, config)\n if valid:\n self.assertEqual([], results)\n else:\n self.assertGreater(len(results), 0)\n\n def test_setup_with_wrong_service_name(self):\n context_obj = {\n \"config\": {api_versions.OpenStackAPIVersions.get_fullname(): {\n \"nova\": {\"service_name\": \"service_name\"}}},\n \"admin\": {\"credential\": mock.MagicMock()},\n \"users\": [{\"credential\": mock.MagicMock()}]}\n ctx = api_versions.OpenStackAPIVersions(context_obj)\n self.assertRaises(exceptions.ValidationError, ctx.setup)\n self.service_catalog.get_endpoints.assert_called_once_with()\n self.mock_kc.services.list.assert_called_once_with()\n\n def test_setup_with_wrong_service_name_and_without_admin(self):\n context_obj = {\n \"config\": {api_versions.OpenStackAPIVersions.get_fullname(): {\n \"nova\": {\"service_name\": \"service_name\"}}},\n \"users\": [{\"credential\": mock.MagicMock()}]}\n ctx = api_versions.OpenStackAPIVersions(context_obj)\n self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)\n self.service_catalog.get_endpoints.assert_called_once_with()\n self.assertFalse(self.mock_kc.services.list.called)\n\n def test_setup_with_wrong_service_type(self):\n context_obj = {\n \"config\": {api_versions.OpenStackAPIVersions.get_fullname(): {\n \"nova\": {\"service_type\": \"service_type\"}}},\n \"users\": [{\"credential\": mock.MagicMock()}]}\n ctx = api_versions.OpenStackAPIVersions(context_obj)\n self.assertRaises(exceptions.ValidationError, ctx.setup)\n self.service_catalog.get_endpoints.assert_called_once_with()\n\n def test_setup_with_service_name(self):\n self.mock_kc.services.list.return_value = [\n utils.Struct(type=\"computev21\", name=\"NovaV21\")]\n name = api_versions.OpenStackAPIVersions.get_fullname()\n context = {\n \"config\": {name: {\"nova\": {\"service_name\": \"NovaV21\"}}},\n \"admin\": {\"credential\": mock.MagicMock()},\n \"users\": [{\"credential\": mock.MagicMock()}]}\n ctx = api_versions.OpenStackAPIVersions(context)\n ctx.setup()\n\n self.service_catalog.get_endpoints.assert_called_once_with()\n self.mock_kc.services.list.assert_called_once_with()\n\n versions = ctx.context[\"config\"][\"api_versions@openstack\"]\n self.assertEqual(\n \"computev21\",\n versions[\"nova\"][\"service_type\"])\n" }, { "alpha_fraction": 0.5559514760971069, "alphanum_fraction": 0.5595526695251465, "avg_line_length": 39.522274017333984, "blob_id": "6a361afcb9d209e8fc4f2aa6769978b797847b67", "content_id": "671d71b736e4b51706caf0a050dc1313008bbc78", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26380, "license_type": "permissive", "max_line_length": 79, "num_lines": 651, "path": "/rally_openstack/task/scenarios/vm/vmtasks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Rackspace UK\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nimport pkgutil\nimport re\n\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\nfrom rally.plugins.common import validators\nfrom rally.task import atomic\nfrom rally.task import types\nfrom rally.task import utils as rally_utils\nfrom rally.utils import sshutils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services import heat\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\nfrom rally_openstack.task.scenarios.vm import utils as vm_utils\n\n\n\"\"\"Scenarios that are to be run inside VM instances.\"\"\"\n\n\nLOG = logging.getLogger(__name__)\n\n\n# TODO(andreykurilin): replace by advanced jsonschema(lollipop?!) someday\[email protected](name=\"valid_command\", platform=\"openstack\")\nclass ValidCommandValidator(validators.FileExistsValidator):\n\n def __init__(self, param_name, required=True):\n \"\"\"Checks that parameter is a proper command-specifying dictionary.\n\n Ensure that the command dictionary is a proper command-specifying\n dictionary described in 'vmtasks.VMTasks.boot_runcommand_delete'\n docstring.\n\n :param param_name: Name of parameter to validate\n :param required: Boolean indicating that the command dictionary is\n required\n \"\"\"\n super(ValidCommandValidator, self).__init__(param_name=param_name)\n\n self.required = required\n\n def check_command_dict(self, command):\n \"\"\"Check command-specifying dict `command'\n\n :raises ValueError: on error\n \"\"\"\n\n if not isinstance(command, dict):\n self.fail(\"Command must be a dictionary\")\n\n # NOTE(pboldin): Here we check for the values not for presence of the\n # keys due to template-driven configuration generation that can leave\n # keys defined but values empty.\n if command.get(\"interpreter\"):\n script_file = command.get(\"script_file\")\n if script_file:\n if \"script_inline\" in command:\n self.fail(\n \"Exactly one of script_inline or script_file with \"\n \"interpreter is expected: %r\" % command)\n # User tries to upload a shell? Make sure it is same as interpreter\n interpreter = command.get(\"interpreter\")\n interpreter = (interpreter[-1]\n if isinstance(interpreter, (tuple, list))\n else interpreter)\n if (command.get(\"local_path\")\n and command.get(\"remote_path\") != interpreter):\n self.fail(\n \"When uploading an interpreter its path should be as well\"\n \" specified as the `remote_path' string: %r\" % command)\n elif not command.get(\"remote_path\"):\n # No interpreter and no remote command to execute is given\n self.fail(\n \"Supplied dict specifies no command to execute, either \"\n \"interpreter or remote_path is required: %r\" % command)\n\n unexpected_keys = set(command) - {\"script_file\", \"script_inline\",\n \"interpreter\", \"remote_path\",\n \"local_path\", \"command_args\"}\n if unexpected_keys:\n self.fail(\n \"Unexpected command parameters: %s\" % \", \".join(\n unexpected_keys))\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n command = config.get(\"args\", {}).get(self.param_name)\n if command is None and not self.required:\n return\n\n try:\n self.check_command_dict(command)\n except ValueError as e:\n return self.fail(str(e))\n\n for key in \"script_file\", \"local_path\":\n if command.get(key):\n self._file_access_ok(\n filename=command[key], mode=os.R_OK,\n param_name=self.param_name, required=self.required)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", fail_on_404_image=False)\[email protected](\"valid_command\", param_name=\"command\")\[email protected](\"number\", param_name=\"port\", minval=1, maxval=65535,\n nullable=True, integer_only=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_param_or_context\",\n param_name=\"image\", ctx_name=\"image_command_customizer\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"],\n \"keypair@openstack\": {},\n \"allow_ssh@openstack\": None},\n name=\"VMTasks.boot_runcommand_delete\",\n platform=\"openstack\")\nclass BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic):\n\n def run(self, flavor, username, password=None,\n image=None,\n command=None,\n volume_args=None, floating_network=None, port=22,\n use_floating_ip=True, force_delete=False, wait_for_ping=True,\n max_log_length=None, **kwargs):\n \"\"\"Boot a server, run script specified in command and delete server.\n\n :param image: glance image name to use for the vm. Optional\n in case of specified \"image_command_customizer\" context\n :param flavor: VM flavor name\n :param username: ssh username on server, str\n :param password: Password on SSH authentication\n :param command: Command-specifying dictionary that either specifies\n remote command path via `remote_path' (can be uploaded from a\n local file specified by `local_path`), an inline script via\n `script_inline' or a local script file path using `script_file'.\n Both `script_file' and `local_path' are checked to be accessible\n by the `file_exists' validator code.\n\n The `script_inline' and `script_file' both require an `interpreter'\n value to specify the interpreter script should be run with.\n\n Note that any of `interpreter' and `remote_path' can be an array\n prefixed with environment variables and suffixed with args for\n the `interpreter' command. `remote_path's last component must be\n a path to a command to execute (also upload destination if a\n `local_path' is given). Uploading an interpreter is possible\n but requires that `remote_path' and `interpreter' path do match.\n\n Examples:\n\n .. code-block:: python\n\n # Run a `local_script.pl' file sending it to a remote\n # Perl interpreter\n command = {\n \"script_file\": \"local_script.pl\",\n \"interpreter\": \"/usr/bin/perl\"\n }\n\n # Run an inline script sending it to a remote interpreter\n command = {\n \"script_inline\": \"echo 'Hello, World!'\",\n \"interpreter\": \"/bin/sh\"\n }\n\n # Run a remote command\n command = {\n \"remote_path\": \"/bin/false\"\n }\n\n # Copy a local command and run it\n command = {\n \"remote_path\": \"/usr/local/bin/fio\",\n \"local_path\": \"/home/foobar/myfiodir/bin/fio\"\n }\n\n # Copy a local command and run it with environment variable\n command = {\n \"remote_path\": [\"HOME=/root\", \"/usr/local/bin/fio\"],\n \"local_path\": \"/home/foobar/myfiodir/bin/fio\"\n }\n\n # Run an inline script sending it to a remote interpreter\n command = {\n \"script_inline\": \"echo \\\"Hello, ${NAME:-World}\\\"\",\n \"interpreter\": [\"NAME=Earth\", \"/bin/sh\"]\n }\n\n # Run an inline script sending it to an uploaded remote\n # interpreter\n command = {\n \"script_inline\": \"echo \\\"Hello, ${NAME:-World}\\\"\",\n \"interpreter\": [\"NAME=Earth\", \"/tmp/sh\"],\n \"remote_path\": \"/tmp/sh\",\n \"local_path\": \"/home/user/work/cve/sh-1.0/bin/sh\"\n }\n\n\n :param volume_args: volume args for booting server from volume\n :param floating_network: external network name, for floating ip\n :param port: ssh port for SSH connection\n :param use_floating_ip: bool, floating or fixed IP for SSH connection\n :param force_delete: whether to use force_delete for servers\n :param wait_for_ping: whether to check connectivity on server creation\n :param max_log_length: The number of tail nova console-log lines user\n would like to retrieve\n :param kwargs: extra arguments for booting the server\n \"\"\"\n if volume_args:\n volume = self.cinder.create_volume(volume_args[\"size\"],\n imageRef=None)\n kwargs[\"block_device_mapping\"] = {\"vdrally\": \"%s:::1\" % volume.id}\n\n if not image:\n image = self.context[\"tenant\"][\"custom_image\"][\"id\"]\n\n server, fip = self._boot_server_with_fip(\n image, flavor, use_floating_ip=use_floating_ip,\n floating_network=floating_network,\n key_name=self.context[\"user\"][\"keypair\"][\"name\"],\n **kwargs)\n try:\n if wait_for_ping:\n self._wait_for_ping(fip[\"ip\"])\n\n code, out, err = self._run_command(\n fip[\"ip\"], port, username, password, command=command)\n text_area_output = [\"StdErr: %s\" % (err or \"(none)\"),\n \"StdOut:\"]\n if code:\n raise exceptions.ScriptError(\n \"Error running command %(command)s. \"\n \"Error %(code)s: %(error)s\" % {\n \"command\": command, \"code\": code, \"error\": err})\n # Let's try to load output data\n try:\n data = json.loads(out)\n # 'echo 42' produces very json-compatible result\n # - check it here\n if not isinstance(data, dict):\n raise ValueError\n except ValueError:\n # It's not a JSON, probably it's 'script_inline' result\n data = []\n except (exceptions.TimeoutException,\n exceptions.SSHTimeout):\n console_logs = self._get_server_console_output(server,\n max_log_length)\n LOG.debug(\"VM console logs:\\n%s\" % console_logs)\n raise\n\n finally:\n self._delete_server_with_fip(server, fip,\n force_delete=force_delete)\n\n if isinstance(data, dict) and set(data) == {\"additive\", \"complete\"}:\n for chart_type, charts in data.items():\n for chart in charts:\n self.add_output(**{chart_type: chart})\n else:\n # it's a dict with several unknown lines\n text_area_output.extend(out.split(\"\\n\"))\n self.add_output(complete={\"title\": \"Script Output\",\n \"chart_plugin\": \"TextArea\",\n \"data\": text_area_output})\n\n\[email protected](context={\"cleanup@openstack\": [\"nova\", \"heat\"],\n \"keypair@openstack\": {}, \"network@openstack\": {}},\n name=\"VMTasks.runcommand_heat\")\nclass RuncommandHeat(vm_utils.VMScenario):\n\n def run(self, workload, template, files, parameters):\n \"\"\"Run workload on stack deployed by heat.\n\n Workload can be either file or resource:\n\n .. code-block:: json\n\n {\"file\": \"/path/to/file.sh\"}\n {\"resource\": [\"package.module\", \"workload.py\"]}\n\n\n Also it should contain \"username\" key.\n\n Given file will be uploaded to `gate_node` and started. This script\n should print `key` `value` pairs separated by colon. These pairs will\n be presented in results.\n\n Gate node should be accessible via ssh with keypair `key_name`, so\n heat template should accept parameter `key_name`.\n\n :param workload: workload to run\n :param template: path to heat template file\n :param files: additional template files\n :param parameters: parameters for heat template\n \"\"\"\n keypair = self.context[\"user\"][\"keypair\"]\n parameters[\"key_name\"] = keypair[\"name\"]\n network = self.context[\"tenant\"][\"networks\"][0]\n parameters[\"router_id\"] = network[\"router_id\"]\n self.stack = heat.main.Stack(self, self.task,\n template, files=files,\n parameters=parameters)\n self.stack.create()\n for output in self.stack.stack.outputs:\n if output[\"output_key\"] == \"gate_node\":\n ip = output[\"output_value\"]\n break\n ssh = sshutils.SSH(workload[\"username\"], ip, pkey=keypair[\"private\"])\n ssh.wait()\n script = workload.get(\"resource\")\n if script:\n script = pkgutil.get_data(*script)\n else:\n script = open(workload[\"file\"]).read()\n ssh.execute(\"cat > /tmp/.rally-workload\", stdin=script)\n ssh.execute(\"chmod +x /tmp/.rally-workload\")\n with atomic.ActionTimer(self, \"runcommand_heat.workload\"):\n status, out, err = ssh.execute(\n \"/tmp/.rally-workload\",\n stdin=json.dumps(self.stack.stack.outputs))\n rows = []\n for line in out.splitlines():\n row = line.split(\":\")\n if len(row) != 2:\n raise exceptions.ScriptError(\"Invalid data '%s'\" % line)\n rows.append(row)\n if not rows:\n raise exceptions.ScriptError(\"No data returned. Original error \"\n \"message is %s\" % err)\n self.add_output(\n complete={\"title\": \"Workload summary\",\n \"description\": \"Data generated by workload\",\n \"chart_plugin\": \"Table\",\n \"data\": {\n \"cols\": [\"key\", \"value\"],\n \"rows\": rows}}\n )\n\n\nBASH_DD_LOAD_TEST = \"\"\"\n#!/bin/sh\n# Load server and output JSON results ready to be processed\n# by Rally scenario\n\nfor ex in awk top grep free tr df dc dd gzip\ndo\n if ! type ${ex} >/dev/null\n then\n echo \"Executable is required by script but not available\\\n on a server: ${ex}\" >&2\n return 1\n fi\ndone\n\nget_used_cpu_percent() {\n echo 100\\\n $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %)\\\n - p | dc\n}\n\nget_used_ram_percent() {\n local total=$(free | grep Mem: | awk '{print $2}')\n local used=$(free | grep -- -/+\\\\ buffers | awk '{print $3}')\n echo ${used} 100 \\\\* ${total} / p | dc\n}\n\nget_used_disk_percent() {\n df -P / | grep -v Filesystem | awk '{print $5}' | tr -d %\n}\n\nget_seconds() {\n (time -p ${1}) 2>&1 | awk '/real/{print $2}'\n}\n\ncomplete_load() {\n local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh}\n local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop}\n local processes_num=${LOAD_PROCESSES_COUNT:-20}\n local size=${LOAD_SIZE_MB:-5}\n\n cat << EOF > ${script_file}\nuntil test -e ${stop_file}\ndo dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done\nEOF\n\n local sep\n local cpu\n local ram\n local dis\n rm -f ${stop_file}\n for i in $(seq ${processes_num})\n do\n i=$((i-1))\n sh ${script_file} &\n cpu=\"${cpu}${sep}[${i}, $(get_used_cpu_percent)]\"\n ram=\"${ram}${sep}[${i}, $(get_used_ram_percent)]\"\n dis=\"${dis}${sep}[${i}, $(get_used_disk_percent)]\"\n sep=\", \"\n done\n > ${stop_file}\n cat << EOF\n {\n \"title\": \"Generate load by spawning processes\",\n \"description\": \"Each process runs gzip for ${size}M urandom data\\\n in a loop\",\n \"chart_plugin\": \"Lines\",\n \"axis_label\": \"Number of processes\",\n \"label\": \"Usage, %\",\n \"data\": [\n [\"CPU\", [${cpu}]],\n [\"Memory\", [${ram}]],\n [\"Disk\", [${dis}]]]\n }\nEOF\n}\n\nadditive_dd() {\n local c=${1:-50} # Megabytes\n local file=/tmp/dd_test.img\n local write=$(get_seconds \"dd if=/dev/zero of=${file} bs=1M count=${c}\")\n local read=$(get_seconds \"dd if=${file} of=/dev/null bs=1M count=${c}\")\n local gzip=$(get_seconds \"gzip ${file}\")\n rm ${file}.gz\n cat << EOF\n {\n \"title\": \"Write, read and gzip file\",\n \"description\": \"Using file '${file}', size ${c}Mb.\",\n \"chart_plugin\": \"StackedArea\",\n \"data\": [\n [\"write_${c}M\", ${write}],\n [\"read_${c}M\", ${read}],\n [\"gzip_${c}M\", ${gzip}]]\n },\n {\n \"title\": \"Statistics for write/read/gzip\",\n \"chart_plugin\": \"StatsTable\",\n \"data\": [\n [\"write_${c}M\", ${write}],\n [\"read_${c}M\", ${read}],\n [\"gzip_${c}M\", ${gzip}]]\n }\n\nEOF\n}\n\ncat << EOF\n{\n \"additive\": [$(additive_dd)],\n \"complete\": [$(complete_load)]\n}\nEOF\n\"\"\"\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"number\", param_name=\"port\", minval=1, maxval=65535,\n nullable=True, integer_only=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"],\n \"keypair@openstack\": {},\n \"allow_ssh@openstack\": None},\n name=\"VMTasks.dd_load_test\",\n platform=\"openstack\")\nclass DDLoadTest(BootRuncommandDelete):\n @logging.log_deprecated_args(\n \"Use 'interpreter' to specify the interpreter to execute script from.\",\n \"0.10.0\", [\"command\"], once=True)\n def run(self, flavor, username, password=None,\n image=None, command=None, interpreter=\"/bin/sh\",\n volume_args=None, floating_network=None, port=22,\n use_floating_ip=True, force_delete=False, wait_for_ping=True,\n max_log_length=None, **kwargs):\n \"\"\"Boot a server from a custom image and performs dd load test.\n\n .. note:: dd load test is prepared script by Rally team. It checks\n writing and reading metrics from the VM.\n\n :param image: glance image name to use for the vm. Optional\n in case of specified \"image_command_customizer\" context\n :param flavor: VM flavor name\n :param username: ssh username on server, str\n :param password: Password on SSH authentication\n :param interpreter: the interpreter to execute script with dd load test\n (defaults to /bin/sh)\n :param command: DEPRECATED. use interpreter instead.\n :param volume_args: volume args for booting server from volume\n :param floating_network: external network name, for floating ip\n :param port: ssh port for SSH connection\n :param use_floating_ip: bool, floating or fixed IP for SSH connection\n :param force_delete: whether to use force_delete for servers\n :param wait_for_ping: whether to check connectivity on server creation\n :param max_log_length: The number of tail nova console-log lines user\n would like to retrieve\n :param kwargs: extra arguments for booting the server\n \"\"\"\n cmd = {\"interpreter\": interpreter,\n \"script_inline\": BASH_DD_LOAD_TEST}\n if command and \"interpreter\" in command:\n cmd[\"interpreter\"] = command[\"interpreter\"]\n return super(DDLoadTest, self).run(\n flavor=flavor, username=username, password=password,\n image=image, command=cmd,\n volume_args=volume_args, floating_network=floating_network,\n port=port, use_floating_ip=use_floating_ip,\n force_delete=force_delete,\n wait_for_ping=wait_for_ping, max_log_length=max_log_length,\n **kwargs)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", fail_on_404_image=False)\[email protected](\"number\", param_name=\"port\", minval=1, maxval=65535,\n nullable=True, integer_only=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](\"required_services\", services=[consts.Service.DESIGNATE,\n consts.Service.NEUTRON,\n consts.Service.NOVA])\[email protected](\"required_contexts\", contexts=[\"network\", \"zones\"])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_context_config\", context_name=\"zones\",\n context_config={\"set_zone_in_network\": True})\[email protected](context={\"cleanup@openstack\": [\"designate\",\n \"nova\", \"neutron\"],\n \"keypair@openstack\": {},\n \"allow_ssh@openstack\": None},\n name=\"VMTasks.check_designate_dns_resolving\",\n platform=\"openstack\")\nclass CheckDesignateDNSResolving(vm_utils.VMScenario):\n\n def run(self, image, flavor, username, password=None,\n floating_network=None, port=22,\n use_floating_ip=True, force_delete=False, max_log_length=None,\n **kwargs):\n \"\"\"Try to resolve hostname from VM against existing designate DNS.\n\n - requires zone context with set_zone_in_network parameter\n\n > zones:\n > set_zone_in_network: True\n\n - designate IP should be in default dns_nameservers list for new\n networks or it can be specified in a network context\n\n > network:\n > dns_nameservers:\n > - 8.8.8.8\n > - 192.168.210.45\n\n :param image: glance image name to use for the vm\n :param flavor: VM flavor name\n :param username: ssh username on server\n :param password: Password on SSH authentication\n :param floating_network: external network name, for floating ip\n :param port: ssh port for SSH connection\n :param use_floating_ip: bool, floating or fixed IP for SSH connection\n :param force_delete: whether to use force_delete for servers\n :param max_log_length: The number of tail nova console-log lines user\n would like to retrieve\n :param kwargs: optional args\n \"\"\"\n\n zone = self.context[\"tenant\"][\"zones\"][0][\"name\"]\n\n server, fip = self._boot_server_with_fip(\n image, flavor, use_floating_ip=use_floating_ip,\n floating_network=floating_network,\n key_name=self.context[\"user\"][\"keypair\"][\"name\"],\n **kwargs)\n\n script = f\"cloud-init status -w; resolvectl status; \"\\\n f\"dig $(hostname).{zone}\"\n\n command = {\n \"script_inline\": script,\n \"interpreter\": \"/bin/bash\"\n }\n try:\n rally_utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=rally_utils.get_from_manager(),\n )\n\n code, out, err = self._run_command(\n fip[\"ip\"], port, username, password, command=command)\n if code:\n raise exceptions.ScriptError(\n \"Error running command %(command)s. \"\n \"Error %(code)s: %(error)s\" % {\n \"command\": command, \"code\": code, \"error\": err})\n else:\n if not re.findall(\".*ANSWER SECTION.*\", out, re.MULTILINE):\n raise exceptions.ScriptError(\n f\"Error running {script}. \"\n f\"Error: Missing ANSWER section in the output {out}\")\n\n except (exceptions.TimeoutException,\n exceptions.SSHTimeout):\n console_logs = self._get_server_console_output(server,\n max_log_length)\n LOG.debug(\"VM console logs:\\n%s\" % console_logs)\n raise\n\n finally:\n self._delete_server_with_fip(server, fip,\n force_delete=force_delete)\n\n self.add_output(complete={\n \"title\": \"Script StdOut\",\n \"chart_plugin\": \"TextArea\",\n \"data\": str(out).split(\"\\n\")\n })\n if err:\n self.add_output(complete={\n \"title\": \"Script StdErr\",\n \"chart_plugin\": \"TextArea\",\n \"data\": err.split(\"\\n\")\n })\n" }, { "alpha_fraction": 0.6872428059577942, "alphanum_fraction": 0.6927297711372375, "avg_line_length": 44.5625, "blob_id": "b9d5fdd39f8cad8f82b1be9aa487a49bd83f1bc3", "content_id": "40951d40d227fb7ea60777a7f33c10e3232c12c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1458, "license_type": "permissive", "max_line_length": 78, "num_lines": 32, "path": "/rally_openstack/task/scenarios/barbican/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally_openstack.common.services.key_manager import barbican\nfrom rally_openstack.task import scenario\n\n\nclass BarbicanBase(scenario.OpenStackScenario):\n \"\"\"Base class for Barbican scenarios with basic atomic actions.\"\"\"\n\n def __init__(self, context=None, admin_context=None, clients=None):\n super(BarbicanBase, self).__init__(context, admin_context, clients)\n if hasattr(self, \"_admin_clients\"):\n self.admin_barbican = barbican.BarbicanService(\n self._admin_clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n if hasattr(self, \"_clients\"):\n self.barbican = barbican.BarbicanService(\n self._clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n" }, { "alpha_fraction": 0.6340418457984924, "alphanum_fraction": 0.6373079419136047, "avg_line_length": 41.935630798339844, "blob_id": "cc2f258f2880a8609d96fa8f062bdd6accbeb152", "content_id": "bb3212e9f41d03a647b7348b02c68d9675e56b33", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18677, "license_type": "permissive", "max_line_length": 79, "num_lines": 435, "path": "/rally_openstack/task/scenarios/keystone/basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.task import scenario\n\n\nclass KeystoneBasic(scenario.OpenStackScenario):\n \"\"\"Base class for Keystone scenarios with initialized service object.\"\"\"\n\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(KeystoneBasic, self).__init__(context, admin_clients, clients)\n if hasattr(self, \"_admin_clients\"):\n self.admin_keystone = identity.Identity(\n self._admin_clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n if hasattr(self, \"_clients\"):\n self.keystone = identity.Identity(\n self._clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_user\",\n platform=\"openstack\")\nclass CreateUser(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_user is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=10, **kwargs):\n \"\"\"Create a keystone user with random name.\n\n :param kwargs: Other optional parameters to create users like\n \"tenant_id\", \"enabled\".\n \"\"\"\n self.admin_keystone.create_user(**kwargs)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_delete_user\",\n platform=\"openstack\")\nclass CreateDeleteUser(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_delete_user is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=10, **kwargs):\n \"\"\"Create a keystone user with random name and then delete it.\n\n :param kwargs: Other optional parameters to create users like\n \"tenant_id\", \"enabled\".\n \"\"\"\n user = self.admin_keystone.create_user(**kwargs)\n self.admin_keystone.delete_user(user.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_user_set_enabled_and_delete\",\n platform=\"openstack\")\nclass CreateUserSetEnabledAndDelete(KeystoneBasic):\n\n def run(self, enabled=True, **kwargs):\n \"\"\"Create a keystone user, enable or disable it, and delete it.\n\n :param enabled: Initial state of user 'enabled' flag. The user\n will be created with 'enabled' set to this\n value, and then it will be toggled.\n :param kwargs: Other optional parameters to create user.\n \"\"\"\n user = self.admin_keystone.create_user(enabled=enabled, **kwargs)\n self.admin_keystone.update_user(user.id, enabled=(not enabled))\n self.admin_keystone.delete_user(user.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_tenant\",\n platform=\"openstack\")\nclass CreateTenant(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_tenant is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=10, **kwargs):\n \"\"\"Create a keystone tenant with random name.\n\n :param kwargs: Other optional parameters\n \"\"\"\n self.admin_keystone.create_project(**kwargs)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.authenticate_user_and_validate_token\",\n platform=\"openstack\")\nclass AuthenticateUserAndValidateToken(KeystoneBasic):\n\n def run(self):\n \"\"\"Authenticate and validate a keystone token.\"\"\"\n token = self.admin_keystone.fetch_token()\n self.admin_keystone.validate_token(token)\n\n\[email protected](\"number\", param_name=\"users_per_tenant\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_tenant_with_users\",\n platform=\"openstack\")\nclass CreateTenantWithUsers(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_tenant_with_users is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, users_per_tenant, name_length=10, **kwargs):\n \"\"\"Create a keystone tenant and several users belonging to it.\n\n :param users_per_tenant: number of users to create for the tenant\n :param kwargs: Other optional parameters for tenant creation\n :returns: keystone tenant instance\n \"\"\"\n tenant = self.admin_keystone.create_project(**kwargs)\n self.admin_keystone.create_users(tenant.id,\n number_of_users=users_per_tenant)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_list_users\",\n platform=\"openstack\")\nclass CreateAndListUsers(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_and_list_users is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=10, **kwargs):\n \"\"\"Create a keystone user with random name and list all users.\n\n :param kwargs: Other optional parameters to create users like\n \"tenant_id\", \"enabled\".\n \"\"\"\n\n kwargs.pop(\"name\", None)\n self.admin_keystone.create_user(**kwargs)\n self.admin_keystone.list_users()\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_list_tenants\",\n platform=\"openstack\")\nclass CreateAndListTenants(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_and_list_tenants is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=10, **kwargs):\n \"\"\"Create a keystone tenant with random name and list all tenants.\n\n :param kwargs: Other optional parameters\n \"\"\"\n self.admin_keystone.create_project(**kwargs)\n self.admin_keystone.list_projects()\n\n\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.add_and_remove_user_role\",\n platform=\"openstack\")\nclass AddAndRemoveUserRole(KeystoneBasic):\n\n def run(self):\n \"\"\"Create a user role add to a user and disassociate.\"\"\"\n tenant_id = self.context[\"tenant\"][\"id\"]\n user_id = self.context[\"user\"][\"id\"]\n role = self.admin_keystone.create_role()\n self.admin_keystone.add_role(role_id=role.id, user_id=user_id,\n project_id=tenant_id)\n self.admin_keystone.revoke_role(role.id, user_id=user_id,\n project_id=tenant_id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_delete_role\",\n platform=\"openstack\")\nclass CreateAndDeleteRole(KeystoneBasic):\n\n def run(self):\n \"\"\"Create a user role and delete it.\"\"\"\n role = self.admin_keystone.create_role()\n self.admin_keystone.delete_role(role.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_add_and_list_user_roles\",\n platform=\"openstack\")\nclass CreateAddAndListUserRoles(KeystoneBasic):\n\n def run(self):\n \"\"\"Create user role, add it and list user roles for given user.\"\"\"\n tenant_id = self.context[\"tenant\"][\"id\"]\n user_id = self.context[\"user\"][\"id\"]\n role = self.admin_keystone.create_role()\n self.admin_keystone.add_role(user_id=user_id, role_id=role.id,\n project_id=tenant_id)\n self.admin_keystone.list_roles(user_id=user_id, project_id=tenant_id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.get_entities\",\n platform=\"openstack\")\nclass GetEntities(KeystoneBasic):\n\n def run(self, service_name=\"keystone\"):\n \"\"\"Get instance of a tenant, user, role and service by id's.\n\n An ephemeral tenant, user, and role are each created. By\n default, fetches the 'keystone' service. This can be\n overridden (for instance, to get the 'Identity Service'\n service on older OpenStack), or None can be passed explicitly\n to service_name to create a new service and then query it by\n ID.\n\n :param service_name: The name of the service to get by ID; or\n None, to create an ephemeral service and\n get it by ID.\n \"\"\"\n project = self.admin_keystone.create_project()\n user = self.admin_keystone.create_user(project_id=project.id)\n role = self.admin_keystone.create_role()\n self.admin_keystone.get_project(project.id)\n self.admin_keystone.get_user(user.id)\n self.admin_keystone.get_role(role.id)\n if service_name is None:\n service = self.admin_keystone.create_service()\n else:\n service = self.admin_keystone.get_service_by_name(service_name)\n self.admin_keystone.get_service(service.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_delete_service\",\n platform=\"openstack\")\nclass CreateAndDeleteService(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name' argument to create_and_delete_service will be ignored\",\n \"0.0.5\", [\"name\"])\n def run(self, name=None, service_type=None, description=None):\n \"\"\"Create and delete service.\n\n :param service_type: type of the service\n :param description: description of the service\n \"\"\"\n service = self.admin_keystone.create_service(service_type=service_type,\n description=description)\n self.admin_keystone.delete_service(service.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_update_and_delete_tenant\",\n platform=\"openstack\")\nclass CreateUpdateAndDeleteTenant(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_update_and_delete_tenant is \"\n \"ignored\", \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=None, **kwargs):\n \"\"\"Create, update and delete tenant.\n\n :param kwargs: Other optional parameters for tenant creation\n \"\"\"\n project = self.admin_keystone.create_project(**kwargs)\n new_name = self.generate_random_name()\n new_description = self.generate_random_name()\n self.admin_keystone.update_project(project.id, name=new_name,\n description=new_description)\n self.admin_keystone.delete_project(project.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_user_update_password\",\n platform=\"openstack\")\nclass CreateUserUpdatePassword(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name_length' and 'password_length' arguments to \"\n \"create_user_update_password are ignored\",\n \"0.1.2\", [\"name_length\", \"password_length\"], once=True)\n def run(self, name_length=None, password_length=None):\n \"\"\"Create user and update password for that user.\"\"\"\n user = self.admin_keystone.create_user()\n password = self.generate_random_name()\n self.admin_keystone.update_user(user.id, password=password)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_list_services\",\n platform=\"openstack\")\nclass CreateAndListServices(KeystoneBasic):\n\n @logging.log_deprecated_args(\n \"The 'name' argument to create_and_list_services will be ignored\",\n \"0.0.5\", [\"name\"])\n def run(self, name=None, service_type=None, description=None):\n \"\"\"Create and list services.\n\n :param service_type: type of the service\n :param description: description of the service\n \"\"\"\n self.admin_keystone.create_service(service_type=service_type,\n description=description)\n self.admin_keystone.list_services()\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_list_ec2credentials\",\n platform=\"openstack\")\nclass CreateAndListEc2Credentials(KeystoneBasic):\n\n def run(self):\n \"\"\"Create and List all keystone ec2-credentials.\"\"\"\n self.keystone.create_ec2credentials(\n self.context[\"user\"][\"id\"],\n project_id=self.context[\"tenant\"][\"id\"])\n self.keystone.list_ec2credentials(self.context[\"user\"][\"id\"])\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_delete_ec2credential\",\n platform=\"openstack\")\nclass CreateAndDeleteEc2Credential(KeystoneBasic):\n\n def run(self):\n \"\"\"Create and delete keystone ec2-credential.\"\"\"\n creds = self.keystone.create_ec2credentials(\n self.context[\"user\"][\"id\"],\n project_id=self.context[\"tenant\"][\"id\"])\n self.keystone.delete_ec2credential(\n self.context[\"user\"][\"id\"], access=creds.access)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_get_role\",\n platform=\"openstack\")\nclass CreateAndGetRole(KeystoneBasic):\n\n def run(self, **kwargs):\n \"\"\"Create a user role and get it detailed information.\n\n :param kwargs: Optional additional arguments for roles creation\n \"\"\"\n role = self.admin_keystone.create_role(**kwargs)\n self.admin_keystone.get_role(role.id)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_list_roles\",\n platform=\"openstack\")\nclass CreateAddListRoles(KeystoneBasic):\n\n def run(self, create_role_kwargs=None, list_role_kwargs=None):\n \"\"\"Create a role, then list all roles.\n\n :param create_role_kwargs: Optional additional arguments for\n roles create\n :param list_role_kwargs: Optional additional arguments for roles list\n \"\"\"\n create_role_kwargs = create_role_kwargs or {}\n list_role_kwargs = list_role_kwargs or {}\n\n role = self.admin_keystone.create_role(**create_role_kwargs)\n msg = \"Role isn't created\"\n self.assertTrue(role, err_msg=msg)\n all_roles = self.admin_keystone.list_roles(**list_role_kwargs)\n msg = (\"Created role is not in the\"\n \" list of all available roles\")\n self.assertIn(role, all_roles, err_msg=msg)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"keystone\"]},\n name=\"KeystoneBasic.create_and_update_user\",\n platform=\"openstack\")\nclass CreateAndUpdateUser(KeystoneBasic):\n\n def run(self, create_user_kwargs=None, update_user_kwargs=None):\n \"\"\"Create user and update the user.\n\n :param create_user_kwargs: Optional additional arguments for user\n creation\n :param update_user_kwargs: Optional additional arguments for user\n updation\n \"\"\"\n create_user_kwargs = create_user_kwargs or {}\n\n user = self.admin_keystone.create_user(**create_user_kwargs)\n self.admin_keystone.update_user(user.id, **update_user_kwargs)\n user_data = self.admin_clients(\"keystone\").users.get(user.id)\n\n for args in update_user_kwargs:\n msg = (\"%s isn't updated\" % args)\n self.assertEqual(getattr(user_data, str(args)),\n update_user_kwargs[args], err_msg=msg)\n" }, { "alpha_fraction": 0.5041402578353882, "alphanum_fraction": 0.5064134001731873, "avg_line_length": 36.785274505615234, "blob_id": "891c44576b8c1894ad3909eed057c47a733341e0", "content_id": "b4dae2158a8c209a5c65b056ba524838c22e5236", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6159, "license_type": "permissive", "max_line_length": 78, "num_lines": 163, "path": "/tests/unit/rally_jobs/test_zuul_jobs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport re\n\nimport yaml\n\nimport rally_openstack\nfrom tests.unit import test\n\n\nclass RallyJobsTestCase(test.TestCase):\n root_dir = os.path.dirname(os.path.dirname(rally_openstack.__file__))\n zuul_jobs_path = os.path.join(root_dir, \".zuul.d\")\n\n def setUp(self):\n super(RallyJobsTestCase, self).setUp()\n with open(os.path.join(self.zuul_jobs_path, \"zuul.yaml\")) as f:\n self.zuul_cfg = yaml.safe_load(f)\n\n self.project_cfg = None\n for item in self.zuul_cfg:\n if \"project\" in item:\n self.project_cfg = item[\"project\"]\n break\n if self.project_cfg is None:\n self.fail(\"Cannot detect project section from zuul config.\")\n\n @staticmethod\n def _parse_job(job):\n if isinstance(job, dict):\n job_name = list(job)[0]\n job_cfg = job[job_name]\n return job_name, job_cfg\n return job, None\n\n @staticmethod\n def _tox_job_sorter(job_name):\n python_maj_version = 0\n python_min_version = 0\n _rally, _tox, job_name = job_name.split(\"-\", 3)\n if job_name.startswith(\"py\"):\n python_maj_version = int(job_name[2])\n python_min_version = int(job_name[3:])\n job_name = \"py\"\n return job_name, python_maj_version, python_min_version\n\n def _check_order_of_jobs(self, pipeline):\n jobs = self.project_cfg[pipeline][\"jobs\"]\n\n specific_jobs = [\"rally-dsvm-tox-functional\",\n \"rally-openstack-docker-build\",\n \"rally-task-basic-with-existing-users\",\n \"rally-task-simple-job\"]\n error_message = (\n f\"[{pipeline} pipeline] We are trying to display jobs in a \"\n f\"specific order to simplify search and reading. Tox jobs should \"\n f\"go first in alphabetic order. Next several specific jobs are \"\n f\"expected ({', '.join(specific_jobs)}). \"\n f\"Next - all other jobs in alphabetic order.\"\n )\n error_message += \"\\nPlease place '%s' at the position of '%s'.\"\n\n jobs_names = [self._parse_job(job)[0] for job in jobs]\n\n tox_jobs = sorted(\n (job for job in jobs_names if job.startswith(\"rally-tox-\")),\n key=self._tox_job_sorter\n )\n for i, job in enumerate(tox_jobs):\n if job != jobs[i]:\n self.fail(error_message % (job, jobs[i]))\n\n for job in specific_jobs:\n if job not in jobs_names:\n continue\n i += 1\n if job != jobs_names[i]:\n self.fail(error_message % (job, jobs_names[i]))\n\n i += 1\n other_jobs = sorted(jobs_names[i: len(jobs_names)])\n for j, job in enumerate(other_jobs):\n if job != jobs_names[i + j]:\n self.fail(error_message % (job, jobs_names[i + j]))\n\n def test_order_of_displaying_jobs(self):\n for pipeline in (\"check\", \"gate\"):\n self._check_order_of_jobs(pipeline=pipeline)\n\n JOB_FILES_PARAMS = {\"files\", \"irrelevant-files\"}\n\n def test_job_configs(self):\n\n file_matchers = {}\n\n for pipeline in (\"check\", \"gate\"):\n for job in self.project_cfg[pipeline][\"jobs\"]:\n job_name, job_cfg = self._parse_job(job)\n if job_cfg is None:\n continue\n\n if pipeline == \"gate\":\n params = set(job_cfg) - self.JOB_FILES_PARAMS\n if params:\n self.fail(\n f\"Invalid parameter(s) for '{job_name}' job at \"\n f\"gate pipeline: {', '.join(params)}.\")\n\n for param in self.JOB_FILES_PARAMS:\n if param in job_cfg:\n for file_matcher in job_cfg[param]:\n file_matchers.setdefault(\n file_matcher,\n {\n \"matcher\": re.compile(file_matcher),\n \"used_by\": []\n }\n )\n file_matchers[file_matcher][\"used_by\"].append(\n {\n \"pipeline\": pipeline,\n \"job\": job_name,\n \"param\": param\n }\n )\n not_matched = set(file_matchers)\n\n for dir_name, _, files in os.walk(self.root_dir):\n dir_name = os.path.relpath(dir_name, self.root_dir)\n if dir_name in (\".tox\", \".git\"):\n continue\n for f in files:\n full_path = os.path.join(dir_name, f)\n for key in list(not_matched):\n if file_matchers[key][\"matcher\"].match(full_path):\n not_matched.remove(key)\n if not not_matched:\n # stop iterating files if no more matchers to check\n break\n if not not_matched:\n # stop iterating files if no more matchers to check\n break\n\n for key in not_matched:\n user = file_matchers[key][\"used_by\"][0]\n self.fail(\n f\"'{user['job']}' job configuration for \"\n f\"'{user['pipeline']}' pipeline includes wrong \"\n f\"matcher '{key}' at '{user['param']}'.\"\n )\n" }, { "alpha_fraction": 0.6640827059745789, "alphanum_fraction": 0.669089138507843, "avg_line_length": 38.948387145996094, "blob_id": "4584b023cda6d81140f3ce6a94ff8e3d6499ac03", "content_id": "b58d9917594d6cf8186a13ef0db55987e939c441", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6192, "license_type": "permissive", "max_line_length": 78, "num_lines": 155, "path": "/rally_openstack/task/scenarios/swift/objects.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport tempfile\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.swift import utils\n\n\n\"\"\"Scenarios for Swift Objects.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.SWIFT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"swift\"]},\n name=\"SwiftObjects.create_container_and_object_then_list_objects\",\n platform=\"openstack\")\nclass CreateContainerAndObjectThenListObjects(utils.SwiftScenario):\n\n def run(self, objects_per_container=1, object_size=1024, **kwargs):\n \"\"\"Create container and objects then list all objects.\n\n :param objects_per_container: int, number of objects to upload\n :param object_size: int, temporary local object size\n :param kwargs: dict, optional parameters to create container\n \"\"\"\n\n with tempfile.TemporaryFile() as dummy_file:\n # set dummy file to specified object size\n dummy_file.truncate(object_size)\n container_name = self._create_container(**kwargs)\n for i in range(objects_per_container):\n dummy_file.seek(0)\n self._upload_object(container_name, dummy_file)\n self._list_objects(container_name)\n\n\[email protected](\"required_services\", services=[consts.Service.SWIFT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"swift\"]},\n name=\"SwiftObjects.create_container_and_object_then_delete_all\",\n platform=\"openstack\")\nclass CreateContainerAndObjectThenDeleteAll(utils.SwiftScenario):\n\n def run(self, objects_per_container=1, object_size=1024, **kwargs):\n \"\"\"Create container and objects then delete everything created.\n\n :param objects_per_container: int, number of objects to upload\n :param object_size: int, temporary local object size\n :param kwargs: dict, optional parameters to create container\n \"\"\"\n container_name = None\n objects_list = []\n with tempfile.TemporaryFile() as dummy_file:\n # set dummy file to specified object size\n dummy_file.truncate(object_size)\n container_name = self._create_container(**kwargs)\n for i in range(objects_per_container):\n dummy_file.seek(0)\n object_name = self._upload_object(container_name,\n dummy_file)[1]\n objects_list.append(object_name)\n\n for object_name in objects_list:\n self._delete_object(container_name, object_name)\n self._delete_container(container_name)\n\n\[email protected](\"required_services\", services=[consts.Service.SWIFT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"swift\"]},\n name=\"SwiftObjects.create_container_and_object_then_download_object\",\n platform=\"openstack\")\nclass CreateContainerAndObjectThenDownloadObject(utils.SwiftScenario):\n\n def run(self, objects_per_container=1, object_size=1024, **kwargs):\n \"\"\"Create container and objects then download all objects.\n\n :param objects_per_container: int, number of objects to upload\n :param object_size: int, temporary local object size\n :param kwargs: dict, optional parameters to create container\n \"\"\"\n container_name = None\n objects_list = []\n with tempfile.TemporaryFile() as dummy_file:\n # set dummy file to specified object size\n dummy_file.truncate(object_size)\n container_name = self._create_container(**kwargs)\n for i in range(objects_per_container):\n dummy_file.seek(0)\n object_name = self._upload_object(container_name,\n dummy_file)[1]\n objects_list.append(object_name)\n\n for object_name in objects_list:\n self._download_object(container_name, object_name)\n\n\[email protected](\"required_services\", services=[consts.Service.SWIFT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"swift_objects@openstack\": {}},\n name=\"SwiftObjects.list_objects_in_containers\",\n platform=\"openstack\")\nclass ListObjectsInContainers(utils.SwiftScenario):\n\n def run(self):\n \"\"\"List objects in all containers.\"\"\"\n\n containers = self._list_containers()[1]\n\n for container in containers:\n self._list_objects(container[\"name\"])\n\n\[email protected](\"required_services\", services=[consts.Service.SWIFT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"swift_objects@openstack\": {}},\n name=\"SwiftObjects.list_and_download_objects_in_containers\",\n platform=\"openstack\")\nclass ListAndDownloadObjectsInContainers(utils.SwiftScenario):\n\n def run(self):\n \"\"\"List and download objects in all containers.\"\"\"\n\n containers = self._list_containers()[1]\n\n objects_dict = {}\n for container in containers:\n container_name = container[\"name\"]\n objects_dict[container_name] = self._list_objects(\n container_name)[1]\n\n for container_name, objects in objects_dict.items():\n for obj in objects:\n self._download_object(container_name, obj[\"name\"])\n" }, { "alpha_fraction": 0.626404345035553, "alphanum_fraction": 0.6274794340133667, "avg_line_length": 44.26277542114258, "blob_id": "b84e79858d655b93108e870934ab62e71169e9a5", "content_id": "a4b292fddc31e4c146cded46f166f361eb16a8ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18603, "license_type": "permissive", "max_line_length": 79, "num_lines": 411, "path": "/rally_openstack/task/scenarios/cinder/volume_types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\n\n\nLOG = logging.getLogger(__name__)\n\n\n\"\"\"Scenarios for Cinder Volume Type.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_delete_volume_type\",\n platform=\"openstack\")\nclass CreateAndDeleteVolumeType(cinder_utils.CinderBasic):\n\n def run(self, description=None, is_public=True):\n \"\"\"Create and delete a volume Type.\n\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n self.admin_cinder.delete_volume_type(volume_type)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_get_volume_type\",\n platform=\"openstack\")\nclass CreateAndGetVolumeType(cinder_utils.CinderBasic):\n\n def run(self, description=None, is_public=True):\n \"\"\"Create a volume Type, then get the details of the type.\n\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n self.admin_cinder.get_volume_type(volume_type)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_api_versions\", component=\"cinder\",\n versions=[\"2\", \"3\"])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_update_volume_type\",\n platform=\"openstack\")\nclass CreateAndUpdateVolumeType(cinder_utils.CinderBasic):\n\n def run(self, description=None, is_public=True, update_name=False,\n update_description=None, update_is_public=None):\n \"\"\"create a volume type, then update the type.\n\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n :param update_name: if True, can update name by generating random name.\n if False, don't update name.\n :param update_description: a description to set while update\n :param update_is_public: update Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n\n updated_name = self.generate_random_name() if update_name else None\n if not update_name and not update_description and not update_is_public:\n LOG.warning(\"Something should be updated.\")\n # transmit at least some value to update api call\n updated_name = volume_type.name\n\n updated_is_public = not is_public if update_is_public else None\n\n self.admin_cinder.update_volume_type(\n volume_type,\n name=updated_name,\n description=update_description,\n is_public=updated_is_public)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_list_volume_types\",\n platform=\"openstack\")\nclass CreateAndListVolumeTypes(cinder_utils.CinderBasic):\n\n def run(self, description=None, is_public=True):\n \"\"\"Create a volume Type, then list all types.\n\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n\n pool_list = self.admin_cinder.list_types()\n msg = (\"type not included into list of available types \"\n \"created type: {}\\n\"\n \"pool of types: {}\\n\").format(volume_type, pool_list)\n self.assertIn(volume_type.id,\n [vtype.id for vtype in pool_list],\n err_msg=msg)\n\n\[email protected](\"required_params\", params=[(\"create_specs\", \"provider\")])\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_volume_type_and_encryption_type\",\n platform=\"openstack\")\nclass CreateVolumeTypeAndEncryptionType(cinder_utils.CinderBasic):\n\n def run(self, create_specs=None, provider=None, cipher=None,\n key_size=None, control_location=\"front-end\", description=None,\n is_public=True):\n \"\"\"Create encryption type\n\n This scenario first creates a volume type, then creates an encryption\n type for the volume type.\n\n :param create_specs: The encryption type specifications to add.\n DEPRECATED, specify arguments explicitly.\n :param provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param cipher: The encryption algorithm or mode.\n :param key_size: Size of encryption key, in bits.\n :param control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n if create_specs is None:\n specs = {\n \"provider\": provider,\n \"cipher\": cipher,\n \"key_size\": key_size,\n \"control_location\": control_location\n }\n else:\n LOG.warning(\"The argument `create_spec` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n specs = create_specs\n self.admin_cinder.create_encryption_type(volume_type,\n specs=specs)\n\n\[email protected](\"required_params\", params=[(\"create_specs\", \"provider\")])\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_list_encryption_type\",\n platform=\"openstack\")\nclass CreateAndListEncryptionType(cinder_utils.CinderBasic):\n\n def run(self, create_specs=None, provider=None, cipher=None,\n key_size=None, control_location=\"front-end\", search_opts=None):\n \"\"\"Create and list encryption type\n\n This scenario firstly creates a volume type, secondly creates an\n encryption type for the volume type, thirdly lists all encryption\n types.\n\n :param create_specs: The encryption type specifications to add.\n DEPRECATED, specify arguments explicitly.\n :param provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param cipher: The encryption algorithm or mode.\n :param key_size: Size of encryption key, in bits.\n :param control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n :param search_opts: Options used when search for encryption types\n \"\"\"\n vt_idx = self.context[\"iteration\"] % len(self.context[\"volume_types\"])\n volume_type = self.context[\"volume_types\"][vt_idx]\n if create_specs is None:\n specs = {\n \"provider\": provider,\n \"cipher\": cipher,\n \"key_size\": key_size,\n \"control_location\": control_location\n }\n else:\n LOG.warning(\"The argument `create_spec` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n specs = create_specs\n self.admin_cinder.create_encryption_type(volume_type[\"id\"],\n specs=specs)\n self.admin_cinder.list_encryption_type(search_opts)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_set_volume_type_keys\",\n platform=\"openstack\")\nclass CreateAndSetVolumeTypeKeys(cinder_utils.CinderBasic):\n\n def run(self, volume_type_key, description=None, is_public=True):\n \"\"\"Create and set a volume type's extra specs.\n\n :param volume_type_key: A dict of key/value pairs to be set\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description,\n is_public=is_public)\n self.admin_cinder.set_volume_type_keys(volume_type,\n metadata=volume_type_key)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_contexts\", contexts=\"volume_types\")\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_get_and_delete_encryption_type\",\n platform=\"openstack\")\nclass CreateGetAndDeleteEncryptionType(cinder_utils.CinderBasic):\n\n def run(self, provider=None, cipher=None,\n key_size=None, control_location=\"front-end\"):\n \"\"\"Create get and delete an encryption type\n\n This scenario firstly creates an encryption type for a volume\n type created in the context, then gets detailed information of\n the created encryption type, finally deletes the created\n encryption type.\n\n :param provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param cipher: The encryption algorithm or mode.\n :param key_size: Size of encryption key, in bits.\n :param control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n \"\"\"\n vt_idx = self.context[\"iteration\"] % len(self.context[\"volume_types\"])\n volume_type = self.context[\"volume_types\"][vt_idx]\n specs = {\n \"provider\": provider,\n \"cipher\": cipher,\n \"key_size\": key_size,\n \"control_location\": control_location\n }\n self.admin_cinder.create_encryption_type(volume_type[\"id\"],\n specs=specs)\n self.admin_cinder.get_encryption_type(volume_type[\"id\"])\n self.admin_cinder.delete_encryption_type(volume_type[\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_contexts\", contexts=\"volume_types\")\[email protected](\"required_params\", params=[(\"create_specs\", \"provider\")])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_delete_encryption_type\",\n platform=\"openstack\")\nclass CreateAndDeleteEncryptionType(cinder_utils.CinderBasic):\n\n def run(self, create_specs=None, provider=None, cipher=None,\n key_size=None, control_location=\"front-end\"):\n \"\"\"Create and delete encryption type\n\n This scenario firstly creates an encryption type for a given\n volume type, then deletes the created encryption type.\n\n :param create_specs: the encryption type specifications to add\n :param provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param cipher: The encryption algorithm or mode.\n :param key_size: Size of encryption key, in bits.\n :param control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n \"\"\"\n vt_idx = self.context[\"iteration\"] % len(self.context[\"volume_types\"])\n volume_type = self.context[\"volume_types\"][vt_idx]\n if create_specs is None:\n specs = {\n \"provider\": provider,\n \"cipher\": cipher,\n \"key_size\": key_size,\n \"control_location\": control_location\n }\n else:\n LOG.warning(\"The argument `create_spec` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n specs = create_specs\n self.admin_cinder.create_encryption_type(volume_type[\"id\"],\n specs=specs)\n self.admin_cinder.delete_encryption_type(volume_type[\"id\"])\n\n\[email protected](\"required_services\", services=consts.Service.CINDER)\[email protected](\"required_contexts\", contexts=\"volume_types\")\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_and_update_encryption_type\",\n platform=\"openstack\")\nclass CreateAndUpdateEncryptionType(cinder_utils.CinderBasic):\n\n def run(self, create_provider=None, create_cipher=None,\n create_key_size=None, create_control_location=\"front-end\",\n update_provider=None, update_cipher=None,\n update_key_size=None, update_control_location=None):\n \"\"\"Create and update encryption type\n\n This scenario firstly creates a volume type, secondly creates an\n encryption type for the volume type, thirdly updates the encryption\n type.\n\n :param create_provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param create_cipher: The encryption algorithm or mode.\n :param create_key_size: Size of encryption key, in bits.\n :param create_control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n :param update_provider: The class that provides encryption support. For\n example, LuksEncryptor.\n :param update_cipher: The encryption algorithm or mode.\n :param update_key_size: Size of encryption key, in bits.\n :param update_control_location: Notional service where encryption is\n performed. Valid values are \"front-end\"\n or \"back-end.\"\n \"\"\"\n vt_idx = self.context[\"iteration\"] % len(self.context[\"volume_types\"])\n volume_type = self.context[\"volume_types\"][vt_idx]\n create_specs = {\n \"provider\": create_provider,\n \"cipher\": create_cipher,\n \"key_size\": create_key_size,\n \"control_location\": create_control_location\n }\n update_specs = {\n \"provider\": update_provider,\n \"cipher\": update_cipher,\n \"key_size\": update_key_size,\n \"control_location\": update_control_location\n }\n self.admin_cinder.create_encryption_type(volume_type[\"id\"],\n specs=create_specs)\n self.admin_cinder.update_encryption_type(volume_type[\"id\"],\n specs=update_specs)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\"required_api_versions\", component=\"cinder\",\n versions=[\"2\", \"3\"])\[email protected](\"required_services\", services=consts.Service.CINDER)\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeTypes.create_volume_type_add_and_list_type_access\",\n platform=\"openstack\")\nclass CreateVolumeTypeAddAndListTypeAccess(cinder_utils.CinderBasic):\n\n def run(self, description=None, is_public=False):\n \"\"\"Add and list volume type access for the given project.\n\n This scenario first creates a private volume type, then add project\n access and list project access to it.\n\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n \"\"\"\n volume_type = self.admin_cinder.create_volume_type(\n description=description, is_public=is_public\n )\n self.admin_cinder.add_type_access(\n volume_type, project=self.context[\"tenant\"][\"id\"]\n )\n self.admin_cinder.list_type_access(volume_type)\n" }, { "alpha_fraction": 0.5903614163398743, "alphanum_fraction": 0.6313253045082092, "avg_line_length": 38.52381134033203, "blob_id": "16c1e41ab4133b5d40cefcfdd87ff88c7543ca17", "content_id": "86c29b83a1d59168e40eb198e3e8d7c39db692f4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "permissive", "max_line_length": 78, "num_lines": 42, "path": "/tests/unit/common/services/network/test_net_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common.services.network import net_utils\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.services.network.net_utils\"\n\n\nclass FunctionsTestCase(test.TestCase):\n\n def test_generate_cidr(self):\n with mock.patch(\"%s._IPv4_CIDR_INCR\" % PATH, iter(range(1, 4))):\n self.assertEqual((4, \"10.2.1.0/24\"), net_utils.generate_cidr())\n self.assertEqual((4, \"10.2.2.0/24\"), net_utils.generate_cidr())\n self.assertEqual((4, \"10.2.3.0/24\"), net_utils.generate_cidr())\n\n with mock.patch(\"%s._IPv4_CIDR_INCR\" % PATH, iter(range(1, 4))):\n start_cidr = \"1.1.0.0/26\"\n self.assertEqual(\n (4, \"1.1.0.64/26\"),\n net_utils.generate_cidr(start_cidr=start_cidr))\n self.assertEqual(\n (4, \"1.1.0.128/26\"),\n net_utils.generate_cidr(start_cidr=start_cidr))\n self.assertEqual(\n (4, \"1.1.0.192/26\"),\n net_utils.generate_cidr(start_cidr=start_cidr))\n" }, { "alpha_fraction": 0.6170598864555359, "alphanum_fraction": 0.6297640800476074, "avg_line_length": 24.045454025268555, "blob_id": "ce5286cad4a4bc9297b4d07b55459c672abac0fb", "content_id": "eda8dd2660bfbf2cb5d5d45f94ad56c1fe04f65d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 551, "license_type": "permissive", "max_line_length": 57, "num_lines": 22, "path": "/devstack/plugin.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# DevStack extras script to install Rally\n\n# Save trace setting\nXTRACE=$(set +o | grep xtrace)\nset -o xtrace\n\nDIR=$(dirname ${BASH_SOURCE[0]})\nsource $DIR/lib/rally\n\nif [[ \"$1\" == \"stack\" && \"$2\" == \"install\" ]]; then\n echo_summary \"Installing Rally-OpenStack\"\n install_rally\nelif [[ \"$1\" == \"stack\" && \"$2\" == \"post-config\" ]]; then\n echo_summary \"Configuring Rally-OpenStack\"\n configure_rally\nelif [[ \"$1\" == \"stack\" && \"$2\" == \"extra\" ]]; then\n echo_summary \"Initializing Rally-OpenStack\"\n init_rally\nfi\n\n# Restore xtrace\n$XTRACE\n" }, { "alpha_fraction": 0.6407857537269592, "alphanum_fraction": 0.6496725678443909, "avg_line_length": 34.04917907714844, "blob_id": "1089dfa2b64eb9b577b029a4ccf032e983b2ec92", "content_id": "5d1657af7d500e59a88bef229b038f5305aa4833", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2138, "license_type": "permissive", "max_line_length": 78, "num_lines": 61, "path": "/tests/unit/task/contexts/quotas/test_manila_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.quotas import manila_quotas\nfrom tests.unit import test\n\n\nclass ManilaQuotasTestCase(test.TestCase):\n\n def test_update(self):\n clients = mock.MagicMock()\n instance = manila_quotas.ManilaQuotas(clients)\n tenant_id = mock.MagicMock()\n quotas_values = {\n \"shares\": 10,\n \"gigabytes\": 13,\n \"snapshots\": 7,\n \"snapshot_gigabytes\": 51,\n \"share_networks\": 1014,\n }\n\n instance.update(tenant_id, **quotas_values)\n\n clients.manila.return_value.quotas.update.assert_called_once_with(\n tenant_id, **quotas_values)\n\n def test_delete(self):\n clients = mock.MagicMock()\n instance = manila_quotas.ManilaQuotas(clients)\n tenant_id = mock.MagicMock()\n\n instance.delete(tenant_id)\n\n clients.manila.return_value.quotas.delete.assert_called_once_with(\n tenant_id)\n\n def test_get(self):\n tenant_id = \"tenant_id\"\n quotas = {\"gigabytes\": \"gb\", \"snapshots\": \"ss\", \"shares\": \"v\",\n \"snapshot_gigabytes\": \"sg\", \"share_networks\": \"sn\"}\n quota_set = mock.MagicMock(**quotas)\n clients = mock.MagicMock()\n clients.manila.return_value.quotas.get.return_value = quota_set\n manila_quo = manila_quotas.ManilaQuotas(clients)\n\n self.assertEqual(quotas, manila_quo.get(tenant_id))\n clients.manila().quotas.get.assert_called_once_with(tenant_id)\n" }, { "alpha_fraction": 0.5675926208496094, "alphanum_fraction": 0.5710589289665222, "avg_line_length": 43.33549880981445, "blob_id": "70a443f6225724a58f5dc091c0919a7b5625e46b", "content_id": "eda49f2d4364975ef789981a19484d81cbe2e891", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20483, "license_type": "permissive", "max_line_length": 79, "num_lines": 462, "path": "/tests/unit/common/services/loadbalancer/test_octavia.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport fixtures\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.common.services.loadbalancer import octavia\nfrom tests.unit import test\n\nBASE_PATH = \"rally_openstack.common.services.loadbalancer\"\nCONF = cfg.CONF\n\n\nclass LoadBalancerServiceTestCase(test.TestCase):\n def setUp(self):\n super(LoadBalancerServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.service = octavia.Octavia(self.clients,\n name_generator=self.name_generator)\n self.mock_wait_for_status = fixtures.MockPatch(\n \"rally.task.utils.wait_for_status\")\n self.useFixture(self.mock_wait_for_status)\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant\",\n \"networks\": [{\"id\": \"fake_net\",\n \"subnets\": [\"fake_subnet\"]}]}})\n return context\n\n def atomic_actions(self):\n return self.service._atomic_actions\n\n def test_load_balancer_list(self):\n self.service.load_balancer_list(),\n self.service._clients.octavia().load_balancer_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_list\")\n\n def test_load_balancer_show(self):\n lb = {\"id\": \"loadbalancer-id\"}\n self.service.load_balancer_show(lb[\"id\"])\n self.service._clients.octavia().load_balancer_show \\\n .assert_called_once_with(lb[\"id\"])\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_show\")\n\n def test_load_balancer_show_fail_404(self):\n fake_lb = {\"id\": \"fake_lb\"}\n ex = Exception()\n ex.code = 404\n self.service._clients.octavia().load_balancer_show.side_effect = ex\n self.assertRaises(\n exceptions.GetResourceNotFound,\n self.service.load_balancer_show, fake_lb[\"id\"])\n\n def test_load_balancer_show_resource_fail(self):\n fake_lb = {\"id\": \"fake_lb\"}\n ex = Exception()\n self.service._clients.octavia().load_balancer_show.side_effect = ex\n self.assertRaises(\n exceptions.GetResourceFailure,\n self.service.load_balancer_show, fake_lb[\"id\"])\n\n def test_load_balancer_create(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"lb\")\n self.service.load_balancer_create(\"subnet_id\")\n self.service._clients.octavia().load_balancer_create \\\n .assert_called_once_with(json={\n \"loadbalancer\": {\"name\": \"lb\",\n \"admin_state_up\": True,\n \"vip_qos_policy_id\": None,\n \"listeners\": None,\n \"project_id\": None,\n \"provider\": None,\n \"vip_subnet_id\": \"subnet_id\",\n \"description\": None}})\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_create\")\n\n def test_load_balancer_delete(self):\n self.service.load_balancer_delete(\"lb-id\")\n self.service._clients.octavia().load_balancer_delete \\\n .assert_called_once_with(\"lb-id\", cascade=False)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_delete\")\n\n def test_load_balancer_set(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"new_lb\")\n lb_update_args = {\"name\": \"new_lb_name\"}\n self.service.load_balancer_set(\n \"lb-id\", lb_update_args=lb_update_args)\n self.service._clients.octavia().load_balancer_set \\\n .assert_called_once_with(\n \"lb-id\", json={\"loadbalancer\": {\"name\": \"new_lb_name\"}})\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_set\")\n\n def test_load_balancer_stats_show(self):\n lb = {\"id\": \"new_lb\"}\n self.assertEqual(\n self.service.load_balancer_stats_show(lb, kwargs={}),\n self.service._clients.octavia()\n .load_balancer_stats_show.return_value)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_stats_show\")\n\n def test_load_balancer_failover(self):\n lb = {\"id\": \"new_lb\"}\n self.service.load_balancer_failover(lb[\"id\"])\n self.service._clients.octavia().load_balancer_failover \\\n .assert_called_once_with(lb[\"id\"])\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.load_balancer_failover\")\n\n def test_listener_list(self):\n self.service.listener_list()\n self.service._clients.octavia().listener_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_list\")\n\n def test_listener_show(self):\n self.service.listener_show(listener_id=\"listener_id\")\n self.service._clients.octavia().listener_show \\\n .assert_called_once_with(\"listener_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_show\")\n\n def test_listener_create(self):\n self.service.listener_create()\n self.service._clients.octavia().listener_create \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_create\")\n\n def test_listener_delete(self):\n self.service.listener_delete(listener_id=\"listener_id\")\n self.service._clients.octavia().listener_delete \\\n .assert_called_once_with(\"listener_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_delete\")\n\n def test_listener_set(self):\n self.service.listener_set(listener_id=\"listener_id\")\n self.service._clients.octavia().listener_set \\\n .assert_called_once_with(\"listener_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_set\")\n\n def test_listener_stats_show(self):\n self.service.listener_stats_show(listener_id=\"listener_id\")\n self.service._clients.octavia().listener_stats_show \\\n .assert_called_once_with(\"listener_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.listener_stats_show\")\n\n def test_pool_list(self):\n self.service.pool_list()\n self.service._clients.octavia().pool_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.pool_list\")\n\n def test_update_pool_resource(self):\n fake_pool = {\"id\": \"pool-id\"}\n self.service.update_pool_resource(fake_pool)\n self.service._clients.octavia().pool_show \\\n .assert_called_once_with(\"pool-id\")\n\n def test_update_pool_resource_fail_404(self):\n fake_pool = {\"id\": \"pool-id\"}\n ex = Exception()\n ex.status_code = 404\n self.service._clients.octavia().pool_show.side_effect = ex\n self.assertRaises(\n exceptions.GetResourceNotFound,\n self.service.update_pool_resource, fake_pool)\n\n def test_update_pool_resource_fail(self):\n fake_pool = {\"id\": \"pool-id\"}\n ex = Exception()\n self.service._clients.octavia().pool_show.side_effect = ex\n self.assertRaises(\n exceptions.GetResourceFailure,\n self.service.update_pool_resource, fake_pool)\n\n def test_pool_create(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"pool\")\n self.service.pool_create(\n lb_id=\"loadbalancer-id\",\n protocol=\"HTTP\",\n lb_algorithm=\"ROUND_ROBIN\")\n self.service._clients.octavia().pool_create \\\n .assert_called_once_with(\n json={\"pool\": {\n \"lb_algorithm\": \"ROUND_ROBIN\",\n \"project_id\": None,\n \"protocol\": \"HTTP\",\n \"listener_id\": None,\n \"description\": None,\n \"admin_state_up\": True,\n \"session_persistence\": None,\n \"loadbalancer_id\": \"loadbalancer-id\",\n \"name\": \"pool\"}})\n\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.pool_create\")\n\n def test_pool_delete(self):\n self.service.pool_delete(pool_id=\"fake_pool\")\n self.service._clients.octavia().pool_delete \\\n .assert_called_once_with(\"fake_pool\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.pool_delete\")\n\n def test_pool_show(self):\n self.service.pool_show(pool_id=\"fake_pool\")\n self.service._clients.octavia().pool_show \\\n .assert_called_once_with(\"fake_pool\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.pool_show\")\n\n def test_pool_set(self):\n pool_update_args = {\"name\": \"new-pool-name\"}\n self.service.pool_set(\n pool_id=\"fake_pool\",\n pool_update_args=pool_update_args)\n self.service._clients.octavia().pool_set \\\n .assert_called_once_with(\n \"fake_pool\",\n json={\"pool\": {\"name\": \"new-pool-name\"}})\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.pool_set\")\n\n def test_member_list(self):\n self.service.member_list(pool_id=\"fake_pool\")\n self.service._clients.octavia().member_list \\\n .assert_called_once_with(\"fake_pool\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.member_list\")\n\n def test_member_show(self):\n self.service.member_show(pool_id=\"fake_pool\", member_id=\"fake_member\")\n self.service._clients.octavia().member_show \\\n .assert_called_once_with(\"fake_pool\", \"fake_member\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.member_show\")\n\n def test_member_create(self):\n self.service.member_create(pool_id=\"fake_pool\")\n self.service._clients.octavia().member_create \\\n .assert_called_once_with(\"fake_pool\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.member_create\")\n\n def test_member_delete(self):\n self.service.member_delete(\n pool_id=\"fake_pool\", member_id=\"fake_member\")\n self.service._clients.octavia().member_delete \\\n .assert_called_once_with(\"fake_pool\", \"fake_member\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.member_delete\")\n\n def test_member_set(self):\n self.service.member_set(pool_id=\"fake_pool\", member_id=\"fake_member\")\n self.service._clients.octavia().member_set \\\n .assert_called_once_with(\"fake_pool\", \"fake_member\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.member_set\")\n\n def test_l7policy_list(self):\n self.service.l7policy_list()\n self.service._clients.octavia().l7policy_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7policy_list\")\n\n def test_l7policy_create(self):\n self.service.l7policy_create()\n self.service._clients.octavia().l7policy_create \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7policy_create\")\n\n def test_l7policy_delete(self):\n self.service.l7policy_delete(l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7policy_delete \\\n .assert_called_once_with(\"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7policy_delete\")\n\n def test_l7policy_show(self):\n self.service.l7policy_show(l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7policy_show \\\n .assert_called_once_with(\"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7policy_show\")\n\n def test_l7policy_set(self):\n self.service.l7policy_set(l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7policy_set \\\n .assert_called_once_with(\"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7policy_set\")\n\n def test_l7rule_list(self):\n self.service.l7rule_list(l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7rule_list \\\n .assert_called_once_with(\"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7rule_list\")\n\n def test_l7rule_create(self):\n self.service.l7rule_create(l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7rule_create \\\n .assert_called_once_with(\"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7rule_create\")\n\n def test_l7rule_delete(self):\n self.service.l7rule_delete(\n l7rule_id=\"fake_id\", l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7rule_delete \\\n .assert_called_once_with(\"fake_id\", \"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7rule_delete\")\n\n def test_l7rule_show(self):\n self.service.l7rule_show(\n l7rule_id=\"fake_id\", l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7rule_show \\\n .assert_called_once_with(\"fake_id\", \"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7rule_show\")\n\n def test_l7rule_set(self):\n self.service.l7rule_set(l7rule_id=\"fake_id\", l7policy_id=\"fake_policy\")\n self.service._clients.octavia().l7rule_set \\\n .assert_called_once_with(\"fake_id\", \"fake_policy\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.l7rule_set\")\n\n def test_health_monitor_list(self):\n self.service.health_monitor_list()\n self.service._clients.octavia().health_monitor_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.health_monitor_list\")\n\n def test_health_monitor_create(self):\n self.service.health_monitor_create()\n self.service._clients.octavia().health_monitor_create \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.health_monitor_create\")\n\n def test_health_monitor_delete(self):\n self.service.health_monitor_delete(health_monitor_id=\"fake_monitor_id\")\n self.service._clients.octavia().health_monitor_delete \\\n .assert_called_once_with(\"fake_monitor_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.health_monitor_delete\")\n\n def test_health_monitor_show(self):\n self.service.health_monitor_show(health_monitor_id=\"fake_monitor_id\")\n self.service._clients.octavia().health_monitor_show \\\n .assert_called_once_with(\"fake_monitor_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.health_monitor_show\")\n\n def test_health_monitor_set(self):\n self.service.health_monitor_set(health_monitor_id=\"fake_monitor_id\")\n self.service._clients.octavia().health_monitor_set \\\n .assert_called_once_with(\"fake_monitor_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.health_monitor_set\")\n\n def test_quota_list(self):\n self.service.quota_list(params=\"fake_params\")\n self.service._clients.octavia().quota_list \\\n .assert_called_once_with(\"fake_params\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.quota_list\")\n\n def test_quota_show(self):\n self.service.quota_show(project_id=\"fake_project\")\n self.service._clients.octavia().quota_show \\\n .assert_called_once_with(\"fake_project\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.quota_show\")\n\n def test_quota_reset(self):\n self.service.quota_reset(project_id=\"fake_project\")\n self.service._clients.octavia().quota_reset \\\n .assert_called_once_with(\"fake_project\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.quota_reset\")\n\n def test_quota_set(self):\n self.service.quota_set(project_id=\"fake_project\",\n params=\"fake_params\")\n self.service._clients.octavia().quota_set \\\n .assert_called_once_with(\"fake_project\", \"fake_params\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.quota_set\")\n\n def test_quota_defaults_show(self):\n self.service.quota_defaults_show()\n self.service._clients.octavia().quota_defaults_show \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.quota_defaults_show\")\n\n def test_amphora_show(self):\n self.service.amphora_show(amphora_id=\"fake_amphora\")\n self.service._clients.octavia().amphora_show \\\n .assert_called_once_with(\"fake_amphora\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.amphora_show\")\n\n def test_amphora_list(self):\n self.service.amphora_list()\n self.service._clients.octavia().amphora_list \\\n .assert_called_once_with()\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.amphora_list\")\n\n @mock.patch(\"%s.Ocvita.wait_for_loadbalancer_prov_status\" % BASE_PATH)\n def wait_for_loadbalancer_prov_status(self, mock_wait_for_status):\n fake_lb = {}\n self.service.wait_for_loadbalancer_prov_status(lb=fake_lb)\n self.assertTrue(mock_wait_for_status.called)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"octavia.wait_for_loadbalancers\")\n" }, { "alpha_fraction": 0.5734419822692871, "alphanum_fraction": 0.5754796862602234, "avg_line_length": 40.181819915771484, "blob_id": "5bf6967088404957f9d22e8df6f790258f432d16", "content_id": "d29fdc1f74884cb9283eefa53472d3db105aa7bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5889, "license_type": "permissive", "max_line_length": 78, "num_lines": 143, "path": "/rally_openstack/task/scenarios/sahara/jobs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_contexts\", contexts=[\"users\", \"sahara_image\",\n \"sahara_job_binaries\",\n \"sahara_cluster\"])\[email protected](context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaJob.create_launch_job\",\n platform=\"openstack\")\nclass CreateLaunchJob(utils.SaharaScenario):\n\n def run(self, job_type, configs, job_idx=0):\n \"\"\"Create and execute a Sahara EDP Job.\n\n This scenario Creates a Job entity and launches an execution on a\n Cluster.\n\n :param job_type: type of the Data Processing Job\n :param configs: config dict that will be passed to a Job Execution\n :param job_idx: index of a job in a sequence. This index will be\n used to create different atomic actions for each job\n in a sequence\n \"\"\"\n\n mains = self.context[\"tenant\"][\"sahara\"][\"mains\"]\n libs = self.context[\"tenant\"][\"sahara\"][\"libs\"]\n\n name = self.generate_random_name()\n job = self.clients(\"sahara\").jobs.create(name=name,\n type=job_type,\n description=\"\",\n mains=mains,\n libs=libs)\n\n cluster_id = self.context[\"tenant\"][\"sahara\"][\"cluster\"]\n\n if job_type.lower() == \"java\":\n input_id = None\n output_id = None\n else:\n input_id = self.context[\"tenant\"][\"sahara\"][\"input\"]\n output_id = self._create_output_ds().id\n\n self._run_job_execution(job_id=job.id,\n cluster_id=cluster_id,\n input_id=input_id,\n output_id=output_id,\n configs=configs,\n job_idx=job_idx)\n\n\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_contexts\", contexts=[\"users\", \"sahara_image\",\n \"sahara_job_binaries\",\n \"sahara_cluster\"])\[email protected](context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaJob.create_launch_job_sequence\",\n platform=\"openstack\")\nclass CreateLaunchJobSequence(utils.SaharaScenario):\n\n def run(self, jobs):\n \"\"\"Create and execute a sequence of the Sahara EDP Jobs.\n\n This scenario Creates a Job entity and launches an execution on a\n Cluster for every job object provided.\n\n :param jobs: list of jobs that should be executed in one context\n \"\"\"\n\n launch_job = CreateLaunchJob(self.context)\n\n for idx, job in enumerate(jobs):\n LOG.debug(\"Launching Job. Sequence #%d\" % idx)\n launch_job.run(job[\"job_type\"], job[\"configs\"], idx)\n\n\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_contexts\", contexts=[\"users\", \"sahara_image\",\n \"sahara_job_binaries\",\n \"sahara_cluster\"])\[email protected](context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaJob.create_launch_job_sequence_with_scaling\",\n platform=\"openstack\")\nclass CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,):\n\n def run(self, jobs, deltas):\n \"\"\"Create and execute Sahara EDP Jobs on a scaling Cluster.\n\n This scenario Creates a Job entity and launches an execution on a\n Cluster for every job object provided. The Cluster is scaled according\n to the deltas values and the sequence is launched again.\n\n :param jobs: list of jobs that should be executed in one context\n :param deltas: list of integers which will be used to add or\n remove worker nodes from the cluster\n \"\"\"\n\n cluster_id = self.context[\"tenant\"][\"sahara\"][\"cluster\"]\n\n launch_job_sequence = CreateLaunchJobSequence(self.context)\n launch_job_sequence.run(jobs)\n\n for delta in deltas:\n # The Cluster is fetched every time so that its node groups have\n # correct 'count' values.\n cluster = self.clients(\"sahara\").clusters.get(cluster_id)\n\n LOG.debug(\"Scaling cluster %s with delta %d\"\n % (cluster.name, delta))\n if delta == 0:\n # Zero scaling makes no sense.\n continue\n elif delta > 0:\n self._scale_cluster_up(cluster, delta)\n elif delta < 0:\n self._scale_cluster_down(cluster, delta)\n\n LOG.debug(\"Starting Job sequence\")\n launch_job_sequence.run(jobs)\n" }, { "alpha_fraction": 0.6736030578613281, "alphanum_fraction": 0.6774566769599915, "avg_line_length": 34.06756591796875, "blob_id": "1dc66076efa0d5159d95fc13be6af804cb49a6c5", "content_id": "961893c339c9c36299c44329b32e38179245222a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2595, "license_type": "permissive", "max_line_length": 78, "num_lines": 74, "path": "/rally_openstack/task/scenarios/magnum/k8s_pods.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport yaml\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.magnum import utils\n\n\n\"\"\"Scenarios for Kubernetes pods and rcs.\"\"\"\n\n\[email protected](\"required_services\", services=consts.Service.MAGNUM)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"K8sPods.list_pods\", platform=\"openstack\")\nclass ListPods(utils.MagnumScenario):\n\n def run(self):\n \"\"\"List all pods.\n\n \"\"\"\n self._list_v1pods()\n\n\[email protected](\"required_services\", services=consts.Service.MAGNUM)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"K8sPods.create_pods\", platform=\"openstack\")\nclass CreatePods(utils.MagnumScenario):\n\n def run(self, manifests):\n \"\"\"create pods and wait for them to be ready.\n\n :param manifests: manifest files used to create the pods\n \"\"\"\n for manifest in manifests:\n with open(manifest, \"r\") as f:\n manifest_str = f.read()\n manifest = yaml.safe_load(manifest_str)\n pod = self._create_v1pod(manifest)\n msg = (\"Pod isn't created\")\n self.assertTrue(pod, err_msg=msg)\n\n\[email protected](\"required_services\", services=consts.Service.MAGNUM)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"K8sPods.create_rcs\", platform=\"openstack\")\nclass CreateRcs(utils.MagnumScenario):\n\n def run(self, manifests):\n \"\"\"create rcs and wait for them to be ready.\n\n :param manifests: manifest files use to create the rcs\n \"\"\"\n for manifest in manifests:\n with open(manifest, \"r\") as f:\n manifest_str = f.read()\n manifest = yaml.safe_load(manifest_str)\n rc = self._create_v1rc(manifest)\n msg = (\"RC isn't created\")\n self.assertTrue(rc, err_msg=msg)\n" }, { "alpha_fraction": 0.6077514886856079, "alphanum_fraction": 0.6143485307693481, "avg_line_length": 39.42222213745117, "blob_id": "8c6b0d3cb80e6d6bf0e820a8bda2dc7022d1857c", "content_id": "3c3ede93f33fe9ad0da498f20ea93d616e932dbc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3638, "license_type": "permissive", "max_line_length": 79, "num_lines": 90, "path": "/rally_openstack/task/scenarios/quotas/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Kylin Cloud\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.task import atomic\n\nfrom rally_openstack.task import scenario\n\n\nclass QuotasScenario(scenario.OpenStackScenario):\n \"\"\"Base class for quotas scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"quotas.update_quotas\")\n def _update_quotas(self, component, tenant_id, max_quota=1024,\n quota_update_fn=None):\n \"\"\"Updates quotas.\n\n :param component: Component for the quotas.\n :param tenant_id: The project_id for the quotas to be updated.\n :param max_quota: Max value to be updated for quota.\n :param quota_update_fn: Client quota update function.\n\n Standard OpenStack clients use quotas.update().\n Use `quota_update_fn` to override for non-standard clients.\n\n :returns: Updated quotas dictionary.\n \"\"\"\n quotas = self._generate_quota_values(max_quota, component)\n if quota_update_fn:\n return quota_update_fn(tenant_id, **quotas)\n return self.admin_clients(component).quotas.update(tenant_id, **quotas)\n\n @atomic.action_timer(\"quotas.delete_quotas\")\n def _delete_quotas(self, component, tenant_id):\n \"\"\"Delete quotas.\n\n :param component: Component for the quotas.\n :param tenant_id: The project_id for the quotas to be updated.\n \"\"\"\n self.admin_clients(component).quotas.delete(tenant_id)\n\n def _generate_quota_values(self, max_quota, component):\n quotas = {}\n if component == \"nova\":\n quotas = {\n \"metadata_items\": random.randint(-1, max_quota),\n \"key_pairs\": random.randint(-1, max_quota),\n \"injected_file_content_bytes\": random.randint(-1, max_quota),\n \"injected_file_path_bytes\": random.randint(-1, max_quota),\n \"ram\": random.randint(-1, max_quota),\n \"instances\": random.randint(-1, max_quota),\n \"injected_files\": random.randint(-1, max_quota),\n \"cores\": random.randint(-1, max_quota)\n }\n elif component == \"cinder\":\n quotas = {\n \"volumes\": random.randint(-1, max_quota),\n \"snapshots\": random.randint(-1, max_quota),\n \"gigabytes\": random.randint(-1, max_quota),\n }\n elif component == \"neutron\":\n quota = {}\n for key in [\"network\", \"subnet\", \"port\", \"router\", \"floatingip\",\n \"security_group\", \"security_group_rule\"]:\n quota[key] = random.randint(-1, max_quota)\n quotas = {\"body\": {\"quota\": quota}}\n return quotas\n\n @atomic.action_timer(\"quotas.get_quotas\")\n def _get_quotas(self, component, tenant_id):\n \"\"\"Get quotas for a project.\n\n :param component: Openstack component for the quotas.\n :param tenant_id: The project_id for the quotas to show.\n :return: Get quotas for a project.\n \"\"\"\n return self.admin_clients(component).quotas.get(tenant_id)\n" }, { "alpha_fraction": 0.5965293049812317, "alphanum_fraction": 0.5995661616325378, "avg_line_length": 34.46154022216797, "blob_id": "c85dc4ee458c38a2bd3a8826419bc0d5d8c3bd57", "content_id": "cebd31c3d8025e4584d41b2a4b4cac1d1174efa3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2305, "license_type": "permissive", "max_line_length": 78, "num_lines": 65, "path": "/rally_openstack/task/contexts/senlin/profiles.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.senlin import utils as senlin_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"profiles\", platform=\"openstack\", order=190)\nclass ProfilesGenerator(context.OpenStackContext):\n \"\"\"Context creates a temporary profile for Senlin test.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"type\": {\n \"type\": \"string\",\n },\n \"version\": {\n \"type\": \"string\",\n },\n \"properties\": {\n \"type\": \"object\",\n \"additionalProperties\": True,\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"type\", \"version\", \"properties\"]\n }\n\n def setup(self):\n \"\"\"Create test profiles.\"\"\"\n for user, tenant_id in self._iterate_per_tenants():\n\n senlin_scenario = senlin_utils.SenlinScenario({\n \"user\": user,\n \"task\": self.context[\"task\"]\n })\n profile = senlin_scenario._create_profile(self.config)\n\n self.context[\"tenants\"][tenant_id][\"profile\"] = profile.id\n\n def cleanup(self):\n \"\"\"Delete created test profiles.\"\"\"\n for user, tenant_id in self._iterate_per_tenants():\n\n senlin_scenario = senlin_utils.SenlinScenario({\n \"user\": user,\n \"task\": self.context[\"task\"]\n })\n senlin_scenario._delete_profile(\n self.context[\"tenants\"][tenant_id][\"profile\"])\n" }, { "alpha_fraction": 0.6182771921157837, "alphanum_fraction": 0.6239615082740784, "avg_line_length": 42.980770111083984, "blob_id": "1861e091740e4402d085d35d5dda72dddba59d52", "content_id": "599eb362500949cec3d6f37163ba91c8abcda3ff", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4574, "license_type": "permissive", "max_line_length": 78, "num_lines": 104, "path": "/tests/unit/task/scenarios/authenticate/test_authenticate.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.authenticate import authenticate\nfrom tests.unit import test\n\n\nclass AuthenticateTestCase(test.ScenarioTestCase):\n\n def test_keystone(self):\n scenario_inst = authenticate.Keystone()\n scenario_inst.run()\n self.assertTrue(self.client_created(\"keystone\"))\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.keystone\")\n\n def test_validate_glance(self):\n scenario_inst = authenticate.ValidateGlance()\n scenario_inst.run(5)\n\n # NOTE(stpierre): We can't use assert_has_calls() here because\n # that includes calls on the return values of the mock object\n # as well. Glance (and Heat and Monasca, tested below) returns\n # an iterator that the scenario wraps in list() in order to\n # force glanceclient to actually make the API call, and this\n # results in a bunch of call().__iter__() and call().__len__()\n # calls that aren't matched if we use assert_has_calls().\n self.assertCountEqual(\n self.clients(\"glance\").images.list.call_args_list,\n [mock.call(name=mock.ANY)] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_glance\")\n\n def test_validate_nova(self):\n scenario_inst = authenticate.ValidateNova()\n scenario_inst.run(5)\n self.clients(\"nova\").flavors.list.assert_has_calls([mock.call()] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_nova\")\n\n def test_validate_ceilometer(self):\n scenario_inst = authenticate.ValidateCeilometer()\n scenario_inst.run(5)\n self.clients(\"ceilometer\").meters.list.assert_has_calls(\n [mock.call()] * 5)\n self._test_atomic_action_timer(\n scenario_inst.atomic_actions(),\n \"authenticate.validate_ceilometer\")\n\n def test_validate_cinder(self):\n scenario_inst = authenticate.ValidateCinder()\n scenario_inst.run(5)\n self.clients(\"cinder\").volume_types.list.assert_has_calls(\n [mock.call()] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_cinder\")\n\n def test_validate_neutron(self):\n scenario_inst = authenticate.ValidateNeutron()\n scenario_inst.run(5)\n self.clients(\"neutron\").list_networks.assert_has_calls(\n [mock.call()] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_neutron\")\n\n def test_validate_octavia(self):\n scenario_inst = authenticate.ValidateOctavia()\n scenario_inst.run(5)\n self.clients(\"octavia\").load_balancer_list.assert_has_calls(\n [mock.call()] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_octavia\")\n\n def test_validate_heat(self):\n scenario_inst = authenticate.ValidateHeat()\n scenario_inst.run(5)\n self.assertCountEqual(\n self.clients(\"heat\").stacks.list.call_args_list,\n [mock.call(limit=0)] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_heat\")\n\n def test_validate_monasca(self):\n scenario_inst = authenticate.ValidateMonasca()\n scenario_inst.run(5)\n self.assertCountEqual(\n self.clients(\"monasca\").metrics.list.call_args_list,\n [mock.call(limit=0)] * 5)\n self._test_atomic_action_timer(scenario_inst.atomic_actions(),\n \"authenticate.validate_monasca\")\n" }, { "alpha_fraction": 0.564388632774353, "alphanum_fraction": 0.5689374804496765, "avg_line_length": 39.31877899169922, "blob_id": "f8e0504bc5f9888a3cf61d7f614703d936c34737", "content_id": "ca22cc90cecfcc01b805567a4a16bfbd2db5b7d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9233, "license_type": "permissive", "max_line_length": 78, "num_lines": 229, "path": "/tests/unit/common/services/image/test_glance_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport fixtures\n\nfrom rally_openstack.common.services.image import glance_v2\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.services.image\"\n\n\[email protected]\nclass GlanceV2ServiceTestCase(test.TestCase):\n def setUp(self):\n super(GlanceV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.gc = self.clients.glance.return_value\n self.name_generator = mock.MagicMock()\n self.service = glance_v2.GlanceV2Service(\n self.clients, name_generator=self.name_generator)\n self.mock_wait_for_status = fixtures.MockPatch(\n \"rally.task.utils.wait_for_status\")\n self.useFixture(self.mock_wait_for_status)\n\n def _get_temp_file_name(self):\n # return a temp file that will be cleaned automatically\n temp_dir = self.useFixture(fixtures.TempDir())\n return temp_dir.join(\"temp-file-name\")\n\n @ddt.data({\"location\": \"image_location\", \"temp\": False},\n {\"location\": \"image location\", \"temp\": True})\n @ddt.unpack\n @mock.patch(\"requests.get\")\n def test_upload(self, mock_requests_get, location, temp):\n image_id = \"foo\"\n\n # override the location with a private temp file\n if temp:\n location = self._get_temp_file_name()\n\n self.service.upload_data(image_id, image_location=location)\n\n mock_requests_get.assert_called_once_with(location, stream=True,\n verify=False)\n self.gc.images.upload.assert_called_once_with(\n image_id, mock_requests_get.return_value.raw)\n\n @mock.patch(\"%s.glance_v2.GlanceV2Service.upload_data\" % PATH)\n def test_create_image(self, mock_upload_data):\n image_name = \"image_name\"\n container_format = \"container_format\"\n disk_format = \"disk_format\"\n visibility = \"public\"\n properties = {\"fakeprop\": \"fake\"}\n location = \"location\"\n\n image = self.service.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=location,\n disk_format=disk_format,\n visibility=visibility,\n properties=properties)\n\n call_args = {\"container_format\": container_format,\n \"disk_format\": disk_format,\n \"name\": image_name,\n \"visibility\": visibility,\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"fakeprop\": \"fake\"}\n self.gc.images.create.assert_called_once_with(**call_args)\n self.assertEqual(image, self.mock_wait_for_status.mock.return_value)\n mock_upload_data.assert_called_once_with(\n self.mock_wait_for_status.mock.return_value.id,\n image_location=location)\n\n def test_update_image(self):\n image_id = \"image_id\"\n image_name1 = self.name_generator.return_value\n image_name2 = \"image_name\"\n min_disk = 0\n min_ram = 0\n remove_props = None\n\n # case: image_name is None:\n call_args1 = {\"image_id\": image_id,\n \"name\": image_name1,\n \"min_disk\": min_disk,\n \"min_ram\": min_ram,\n \"remove_props\": remove_props}\n image1 = self.service.update_image(image_id=image_id,\n image_name=None,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n self.assertEqual(self.gc.images.update.return_value, image1)\n self.gc.images.update.assert_called_once_with(**call_args1)\n\n # case: image_name is not None:\n call_args2 = {\"image_id\": image_id,\n \"name\": image_name2,\n \"min_disk\": min_disk,\n \"min_ram\": min_ram,\n \"remove_props\": remove_props}\n image2 = self.service.update_image(image_id=image_id,\n image_name=image_name2,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n self.assertEqual(self.gc.images.update.return_value, image2)\n self.gc.images.update.assert_called_with(**call_args2)\n\n def test_list_images(self):\n status = \"active\"\n kwargs = {\"status\": status}\n filters = {\"filters\": kwargs}\n self.gc.images.list.return_value = iter([1, 2, 3])\n\n self.assertEqual([1, 2, 3], self.service.list_images())\n self.gc.images.list.assert_called_once_with(**filters)\n\n def test_set_visibility(self):\n image_id = \"image_id\"\n visibility = \"shared\"\n self.service.set_visibility(image_id=image_id)\n self.gc.images.update.assert_called_once_with(\n image_id,\n visibility=visibility)\n\n def test_deactivate_image(self):\n image_id = \"image_id\"\n self.service.deactivate_image(image_id)\n self.gc.images.deactivate.assert_called_once_with(image_id)\n\n def test_reactivate_image(self):\n image_id = \"image_id\"\n self.service.reactivate_image(image_id)\n self.gc.images.reactivate.assert_called_once_with(image_id)\n\n\[email protected]\nclass UnifiedGlanceV2ServiceTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedGlanceV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = glance_v2.UnifiedGlanceV2Service(self.clients)\n self.service._impl = mock.create_autospec(self.service._impl)\n\n @mock.patch(\"%s.glance_common.UnifiedGlanceMixin._unify_image\" % PATH)\n def test_create_image(self, mock_image__unify_image):\n image_name = \"image_name\"\n container_format = \"container_format\"\n image_location = \"image_location\"\n disk_format = \"disk_format\"\n visibility = \"public\"\n properties = {\"fakeprop\": \"fake\"}\n callargs = {\"image_name\": image_name,\n \"container_format\": container_format,\n \"image_location\": image_location,\n \"disk_format\": disk_format,\n \"visibility\": visibility,\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n image = self.service.create_image(image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n properties=properties)\n\n self.assertEqual(mock_image__unify_image.return_value, image)\n self.service._impl.create_image.assert_called_once_with(**callargs)\n\n @mock.patch(\"%s.glance_common.UnifiedGlanceMixin._unify_image\" % PATH)\n def test_update_image(self, mock_image__unify_image):\n image_id = \"image_id\"\n image_name = \"image_name\"\n callargs = {\"image_id\": image_id,\n \"image_name\": image_name,\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"remove_props\": None}\n\n image = self.service.update_image(image_id,\n image_name=image_name)\n\n self.assertEqual(mock_image__unify_image.return_value, image)\n self.service._impl.update_image.assert_called_once_with(**callargs)\n\n @mock.patch(\"%s.glance_common.UnifiedGlanceMixin._unify_image\" % PATH)\n def test_list_images(self, mock_image__unify_image):\n images = [mock.MagicMock()]\n self.service._impl.list_images.return_value = images\n\n status = \"active\"\n self.assertEqual([mock_image__unify_image.return_value],\n self.service.list_images(owner=\"foo\",\n visibility=\"shared\"))\n self.service._impl.list_images.assert_called_once_with(\n status=status,\n visibility=\"shared\",\n owner=\"foo\"\n )\n\n def test_set_visibility(self):\n image_id = \"image_id\"\n visibility = \"private\"\n\n self.service.set_visibility(image_id=image_id, visibility=visibility)\n self.service._impl.set_visibility.assert_called_once_with(\n image_id=image_id, visibility=visibility)\n" }, { "alpha_fraction": 0.5894308686256409, "alphanum_fraction": 0.5974298715591431, "avg_line_length": 41.36666488647461, "blob_id": "1c0a8d71ea6b0e278fc76726442b8a566dfc794b", "content_id": "aebf5654b72e13e677aa2e8a9b9170cde445d1fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7626, "license_type": "permissive", "max_line_length": 79, "num_lines": 180, "path": "/tests/unit/task/ui/charts/test_osprofilerchart.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport datetime as dt\nimport os\nfrom unittest import mock\n\nfrom rally_openstack.task.ui.charts import osprofilerchart as osp_chart\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.task.ui.charts.osprofilerchart\"\nCHART_PATH = \"%s.OSProfilerChart\" % PATH\n\n\nclass OSProfilerChartTestCase(test.TestCase):\n\n def test__datetime_json_serialize(self):\n ts = dt.datetime(year=2018, month=7, day=3, hour=2)\n self.assertEqual(\"2018-07-03T02:00:00\",\n osp_chart._datetime_json_serialize(ts))\n self.assertEqual(\"A\", osp_chart._datetime_json_serialize(\"A\"))\n\n def test__return_raw_response_for_complete_data(self):\n title = \"TITLE\"\n trace_id = \"trace-id\"\n r = osp_chart.OSProfilerChart._return_raw_response_for_complete_data(\n {\"title\": title, \"data\": {\"trace_id\": trace_id}}\n )\n self.assertEqual(\n {\"title\": title, \"widget\": \"TextArea\", \"data\": [trace_id]},\n r\n )\n\n def test__generate_osprofiler_report(self):\n data = {\"ts\": dt.datetime(year=2018, month=7, day=3, hour=2)}\n\n mock_open = mock.mock_open(read_data=\"local=$LOCAL | data=$DATA\")\n with mock.patch.object(osp_chart, \"open\", mock_open):\n r = osp_chart.OSProfilerChart._generate_osprofiler_report(data)\n self.assertEqual(\n \"local=false | data={\\n \\\"ts\\\": \\\"2018-07-03T02:00:00\\\"\\n}\",\n r\n )\n self.assertEqual(1, mock_open.call_count)\n m_args, _m_kwargs = mock_open.call_args_list[0]\n self.assertTrue(os.path.exists(m_args[0]))\n\n def test__fetch_osprofiler_data(self):\n connection_str = \"https://example.com\"\n trace_id = \"trace-id\"\n\n mock_osp_drivers = mock.Mock()\n mock_osp_driver = mock_osp_drivers.base\n with mock.patch.dict(\n \"sys.modules\", {\"osprofiler.drivers\": mock_osp_drivers}):\n r = osp_chart.OSProfilerChart._fetch_osprofiler_data(\n connection_str, trace_id)\n self.assertIsNotNone(r)\n\n mock_osp_driver.get_driver.assert_called_once_with(connection_str)\n engine = mock_osp_driver.get_driver.return_value\n engine.get_report.assert_called_once_with(trace_id)\n self.assertEqual(engine.get_report.return_value, r)\n\n mock_osp_driver.get_driver.side_effect = Exception(\"Something\")\n with mock.patch.dict(\n \"sys.modules\", {\"osprofiler.drivers\": mock_osp_drivers}):\n r = osp_chart.OSProfilerChart._fetch_osprofiler_data(\n connection_str, trace_id)\n self.assertIsNone(r)\n\n @mock.patch(\"%s.charts.OutputEmbeddedExternalChart\" % PATH)\n @mock.patch(\"%s.charts.OutputEmbeddedChart\" % PATH)\n @mock.patch(\"%s._return_raw_response_for_complete_data\" % CHART_PATH)\n @mock.patch(\"%s._fetch_osprofiler_data\" % CHART_PATH)\n @mock.patch(\"%s._generate_osprofiler_report\" % CHART_PATH)\n def test_render_complete_data(\n self, mock__generate_osprofiler_report,\n mock__fetch_osprofiler_data,\n mock__return_raw_response_for_complete_data,\n mock_output_embedded_chart,\n mock_output_embedded_external_chart\n ):\n trace_id = \"trace-id\"\n title = \"TITLE\"\n\n # case 1: no connection-id, so data fpr text chart should be returned\n pdata = {\"data\": {\"trace_id\": trace_id}, \"title\": title}\n self.assertEqual(\n mock__return_raw_response_for_complete_data.return_value,\n osp_chart.OSProfilerChart.render_complete_data(\n copy.deepcopy(pdata))\n )\n mock__return_raw_response_for_complete_data.assert_called_once_with(\n pdata\n )\n mock__return_raw_response_for_complete_data.reset_mock()\n\n # case 2: check support for an old format when `trace_id` key is a list\n pdata = {\"data\": {\"trace_id\": [trace_id]}, \"title\": title}\n self.assertEqual(\n mock__return_raw_response_for_complete_data.return_value,\n osp_chart.OSProfilerChart.render_complete_data(\n copy.deepcopy(pdata))\n )\n pdata[\"data\"][\"trace_id\"] = pdata[\"data\"][\"trace_id\"][0]\n mock__return_raw_response_for_complete_data.assert_called_once_with(\n pdata\n )\n mock__return_raw_response_for_complete_data.reset_mock()\n\n # case 3: connection-id is provided, but osp backed is not available\n mock__fetch_osprofiler_data.return_value = None\n pdata = {\"data\": {\"trace_id\": trace_id, \"conn_str\": \"conn\"},\n \"title\": title}\n self.assertEqual(\n mock__return_raw_response_for_complete_data.return_value,\n osp_chart.OSProfilerChart.render_complete_data(\n copy.deepcopy(pdata))\n )\n mock__return_raw_response_for_complete_data.assert_called_once_with(\n pdata\n )\n mock__return_raw_response_for_complete_data.reset_mock()\n\n # case 4: connection-id is provided\n mock__fetch_osprofiler_data.return_value = \"OSP_DATA\"\n mock__generate_osprofiler_report.return_value = \"DD\"\n pdata = {\"data\": {\"trace_id\": trace_id, \"conn_str\": \"conn\"},\n \"title\": title}\n self.assertEqual(\n mock_output_embedded_chart.render_complete_data.return_value,\n osp_chart.OSProfilerChart.render_complete_data(\n copy.deepcopy(pdata))\n )\n mock_output_embedded_chart.render_complete_data.\\\n assert_called_once_with({\"title\": \"%s : %s\" % (title, trace_id),\n \"widget\": \"EmbeddedChart\",\n \"data\": \"DD\"})\n self.assertFalse(mock__return_raw_response_for_complete_data.called)\n mock_output_embedded_chart.render_complete_data.reset_mock()\n\n # case 5: connection-id is provided with workload-id an\n pdata = {\"data\": {\"trace_id\": trace_id,\n \"conn_str\": \"conn\",\n \"workload_uuid\": \"W_ID\",\n \"iteration\": 777},\n \"title\": title}\n\n mock_open = mock.mock_open()\n with mock.patch.object(osp_chart, \"open\", mock_open):\n with mock.patch(\"%s.CONF.openstack\" % PATH) as mock_cfg_os:\n mock_cfg_os.osprofiler_chart_mode = \"/path\"\n\n r = osp_chart.OSProfilerChart.render_complete_data(\n copy.deepcopy(pdata))\n\n mock_external_chat = mock_output_embedded_external_chart\n self.assertEqual(\n mock_external_chat.render_complete_data.return_value,\n r\n )\n mock_external_chat.render_complete_data.\\\n assert_called_once_with({\"title\": \"%s : %s\" % (title, trace_id),\n \"widget\": \"EmbeddedChart\",\n \"data\": \"/path/w_W_ID-777.html\"})\n self.assertFalse(mock__return_raw_response_for_complete_data.called)\n" }, { "alpha_fraction": 0.6408056616783142, "alphanum_fraction": 0.650130569934845, "avg_line_length": 39.621212005615234, "blob_id": "54be1933619fda15987d280a9a967382cbadc379", "content_id": "3d08e4329c9b18e1672452243eb7f00250097b42", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2681, "license_type": "permissive", "max_line_length": 78, "num_lines": 66, "path": "/tests/unit/task/scenarios/gnocchi/test_metric.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.gnocchi import metric\nfrom tests.unit import test\n\n\nclass GnocchiMetricTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(GnocchiMetricTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(GnocchiMetricTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.gnocchi.metric.GnocchiService\")\n self.addCleanup(patch.stop)\n self.mock_metric = patch.start()\n\n def test_list_metric(self):\n metric_service = self.mock_metric.return_value\n scenario = metric.ListMetric(self.context)\n scenario.run(limit=42)\n metric_service.list_metric.assert_called_once_with(limit=42)\n\n def test_create_metric(self):\n metric_service = self.mock_metric.return_value\n scenario = metric.CreateMetric(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(archive_policy_name=\"foo\", resource_id=\"123\", unit=\"u\")\n metric_service.create_metric.assert_called_once_with(\n \"name\", archive_policy_name=\"foo\", resource_id=\"123\", unit=\"u\")\n\n def test_create_delete_metric(self):\n metric_service = self.mock_metric.return_value\n scenario = metric.CreateDeleteMetric(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(archive_policy_name=\"bar\", resource_id=\"123\", unit=\"v\")\n metric_service.create_metric.assert_called_once_with(\n \"name\", archive_policy_name=\"bar\", resource_id=\"123\", unit=\"v\")\n self.assertEqual(1, metric_service.delete_metric.call_count)\n" }, { "alpha_fraction": 0.5783658027648926, "alphanum_fraction": 0.580110490322113, "avg_line_length": 37.21111297607422, "blob_id": "9c742c9e885546c70594cef50f1afeaa22691913", "content_id": "a0c30b649726f47472d72900f62b5d4d81bf31ae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3439, "license_type": "permissive", "max_line_length": 78, "num_lines": 90, "path": "/tests/unit/rally_jobs/test_jobs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport traceback\nfrom unittest import mock\n\nfrom rally import api\nfrom rally.cli import yamlutils as yaml\nfrom rally.common.plugin import discover\nfrom rally.task import engine\nfrom rally.task import task_cfg\n\nimport rally_openstack\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nclass RallyJobsTestCase(test.TestCase):\n rally_jobs_path = os.path.join(\n os.path.dirname(rally_openstack.__file__), \"..\", \"rally-jobs\")\n\n def setUp(self):\n super(RallyJobsTestCase, self).setUp()\n self.tmp_dir = tempfile.mkdtemp()\n os.makedirs(os.path.join(self.tmp_dir, \".rally\"))\n shutil.copytree(os.path.join(self.rally_jobs_path, \"extra\"),\n os.path.join(self.tmp_dir, \".rally\", \"extra\"))\n\n self.original_home = os.environ[\"HOME\"]\n os.environ[\"HOME\"] = self.tmp_dir\n\n def return_home():\n os.environ[\"HOME\"] = self.original_home\n self.addCleanup(shutil.rmtree, self.tmp_dir)\n\n self.addCleanup(return_home)\n\n def test_schema_is_valid(self):\n discover.load_plugins(os.path.join(self.rally_jobs_path, \"plugins\"))\n\n files = {f for f in os.listdir(self.rally_jobs_path)\n if (os.path.isfile(os.path.join(self.rally_jobs_path, f))\n and f.endswith(\".yaml\")\n and not f.endswith(\"_args.yaml\"))}\n\n # TODO(andreykurilin): figure out why it fails\n files -= {\"rally-mos.yaml\", \"sahara-clusters.yaml\"}\n\n for filename in files:\n full_path = os.path.join(self.rally_jobs_path, filename)\n\n with open(full_path) as task_file:\n try:\n args_file = os.path.join(\n self.rally_jobs_path,\n filename.rsplit(\".\", 1)[0] + \"_args.yaml\")\n\n args = {}\n if os.path.exists(args_file):\n args = yaml.safe_load(open(args_file).read())\n if not isinstance(args, dict):\n raise TypeError(\n \"args file %s must be dict in yaml or json \"\n \"presentation\" % args_file)\n\n task_inst = api._Task(api.API(skip_db_check=True))\n task = task_inst.render_template(\n task_template=task_file.read(), **args)\n task = task_cfg.TaskConfig(yaml.safe_load(task))\n task_obj = fakes.FakeTask({\"uuid\": full_path})\n\n eng = engine.TaskEngine(task, task_obj, mock.Mock())\n eng.validate(only_syntax=True)\n except Exception:\n print(traceback.format_exc())\n self.fail(\"Wrong task input file: %s\" % full_path)\n" }, { "alpha_fraction": 0.671819806098938, "alphanum_fraction": 0.6753533482551575, "avg_line_length": 41.71697998046875, "blob_id": "c7a2a1506f6df8dcc4c9a462bfa0ef9806a6263b", "content_id": "6bee5595eb603a990eae44b4e66349b93fccd452", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2264, "license_type": "permissive", "max_line_length": 78, "num_lines": 53, "path": "/tests/unit/task/scenarios/watcher/test_basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Servionica LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.watcher import basic\nfrom tests.unit import test\n\n\nclass WatcherTestCase(test.ScenarioTestCase):\n\n def test_create_audit_template_and_delete(self):\n scenario = basic.CreateAuditTemplateAndDelete(self.context)\n audit_template = mock.Mock()\n scenario._create_audit_template = mock.MagicMock(\n return_value=audit_template)\n scenario._delete_audit_template = mock.MagicMock()\n scenario.run(\"goal\", \"strategy\")\n scenario._create_audit_template.assert_called_once_with(\"goal\",\n \"strategy\")\n scenario._delete_audit_template.assert_called_once_with(\n audit_template.uuid)\n\n def test_list_audit_template(self):\n scenario = basic.ListAuditTemplates(self.context)\n scenario._list_audit_templates = mock.MagicMock()\n scenario.run()\n scenario._list_audit_templates.assert_called_once_with(\n detail=False, goal=None, limit=None, name=None, sort_dir=None,\n sort_key=None, strategy=None)\n\n def test_create_audit_and_delete(self):\n mock_audit = mock.MagicMock()\n scenario = basic.CreateAuditAndDelete(self.context)\n scenario.context = mock.MagicMock()\n scenario._create_audit = mock.MagicMock(return_value=mock_audit)\n scenario.sleep_between = mock.MagicMock()\n scenario._delete_audit = mock.MagicMock()\n scenario.run()\n scenario._create_audit.assert_called_once_with(mock.ANY)\n scenario._delete_audit.assert_called_once_with(mock_audit)\n" }, { "alpha_fraction": 0.6144356727600098, "alphanum_fraction": 0.6215223073959351, "avg_line_length": 35.28571319580078, "blob_id": "84d0f572e6e575338060edcf3a0011070b79e4e5", "content_id": "fbe4ec084872286eac52c8d837fb82c379ee5144", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3810, "license_type": "permissive", "max_line_length": 79, "num_lines": 105, "path": "/tests/unit/task/scenarios/magnum/test_k8s_pods.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.magnum import k8s_pods\nfrom tests.unit import test\n\n\[email protected]\nclass K8sPodsTestCase(test.ScenarioTestCase):\n\n def test_list_pods(self):\n scenario = k8s_pods.ListPods()\n scenario._list_v1pods = mock.Mock()\n\n scenario.run()\n\n scenario._list_v1pods.assert_called_once_with()\n\n @ddt.data([\"manifest.json\"], [\"manifest.yaml\"])\n def test_create_pods(self, manifests):\n manifest = manifests[0]\n scenario = k8s_pods.CreatePods()\n file_content = \"data: fake_content\"\n if manifest == \"manifest.json\":\n file_content = \"{\\\"data\\\": \\\"fake_content\\\"}\"\n file_mock = mock.mock_open(read_data=file_content)\n fake_pod = mock.Mock()\n scenario._create_v1pod = mock.MagicMock(return_value=fake_pod)\n\n with mock.patch(\n \"rally_openstack.task.scenarios.magnum.k8s_pods.open\",\n file_mock, create=True) as m:\n scenario.run(manifests)\n\n m.assert_called_once_with(manifest, \"r\")\n m.return_value.read.assert_called_once_with()\n scenario._create_v1pod.assert_called_once_with(\n {\"data\": \"fake_content\"})\n\n # test error cases:\n # 1. pod not created\n scenario._create_v1pod = mock.MagicMock(return_value=None)\n\n with mock.patch(\n \"rally_openstack.task.scenarios.magnum.k8s_pods.open\",\n file_mock, create=True) as m:\n self.assertRaises(\n exceptions.RallyAssertionError,\n scenario.run, manifests)\n\n m.assert_called_with(manifest, \"r\")\n m.return_value.read.assert_called_with()\n scenario._create_v1pod.assert_called_with(\n {\"data\": \"fake_content\"})\n\n @ddt.data([\"manifest.json\"], [\"manifest.yaml\"])\n def test_create_rcs(self, manifests):\n manifest = manifests[0]\n scenario = k8s_pods.CreateRcs()\n file_content = \"data: fake_content\"\n if manifest == \"manifest.json\":\n file_content = \"{\\\"data\\\": \\\"fake_content\\\"}\"\n file_mock = mock.mock_open(read_data=file_content)\n fake_rc = mock.Mock()\n scenario._create_v1rc = mock.MagicMock(return_value=fake_rc)\n\n with mock.patch(\n \"rally_openstack.task.scenarios.magnum.k8s_pods.open\",\n file_mock, create=True) as m:\n scenario.run(manifests)\n\n m.assert_called_once_with(manifest, \"r\")\n m.return_value.read.assert_called_once_with()\n scenario._create_v1rc.assert_called_once_with({\"data\": \"fake_content\"})\n\n # test error cases:\n # 1. rc not created\n scenario._create_v1rc = mock.MagicMock(return_value=None)\n\n with mock.patch(\n \"rally_openstack.task.scenarios.magnum.k8s_pods.open\",\n file_mock, create=True) as m:\n self.assertRaises(\n exceptions.RallyAssertionError,\n scenario.run, manifests)\n\n m.assert_called_with(manifest, \"r\")\n m.return_value.read.assert_called_with()\n scenario._create_v1rc.assert_called_with({\"data\": \"fake_content\"})\n" }, { "alpha_fraction": 0.6240966320037842, "alphanum_fraction": 0.6250114440917969, "avg_line_length": 43.98353958129883, "blob_id": "c49f650473adcde73327d859216ff747f1d9d3a8", "content_id": "30fcf52a124d5ac3f48e04016ab72cc89fce5493", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10931, "license_type": "permissive", "max_line_length": 79, "num_lines": 243, "path": "/tests/unit/verification/tempest/test_manager.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nimport subprocess\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.verification.tempest import manager\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.verification.tempest.manager\"\n\n\nclass TempestManagerTestCase(test.TestCase):\n\n def test_run_environ_property(self):\n mock.patch(\"%s.testr.TestrLauncher.run_environ\" % PATH,\n new={\"some\": \"key\"}).start()\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n env = {\"some\": \"key\",\n \"OS_TEST_PATH\": os.path.join(tempest.repo_dir,\n \"tempest/test_discover\"),\n \"TEMPEST_CONFIG\": \"tempest.conf\",\n \"TEMPEST_CONFIG_DIR\": os.path.dirname(tempest.configfile)}\n\n self.assertEqual(env, tempest.run_environ)\n\n def test_configfile_property(self):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n self.assertEqual(os.path.join(tempest.home_dir, \"tempest.conf\"),\n tempest.configfile)\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n def test_get_configuration(self, mock_open):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n tempest.get_configuration()\n\n mock_open.assert_called_once_with(tempest.configfile)\n mock_open.side_effect().read.assert_called_once_with()\n\n @mock.patch(\"%s.config.TempestConfigfileManager\" % PATH)\n def test_configure(self, mock_tempest_configfile_manager):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n cm = mock_tempest_configfile_manager.return_value\n extra_options = mock.Mock()\n\n self.assertEqual(cm.create.return_value,\n tempest.configure(extra_options))\n mock_tempest_configfile_manager.assert_called_once_with(\n tempest.verifier.env)\n cm.create.assert_called_once_with(tempest.configfile, extra_options)\n\n @mock.patch(\"%s.config.os.path.exists\" % PATH)\n def test_is_configured(self, mock_exists):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n self.assertTrue(tempest.is_configured())\n\n @mock.patch(\"rally.verification.utils.extend_configfile\")\n def test_extend_configuration(self, mock_extend_configfile):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n extra_options = mock.Mock()\n self.assertEqual(mock_extend_configfile.return_value,\n tempest.extend_configuration(extra_options))\n mock_extend_configfile.assert_called_once_with(extra_options,\n tempest.configfile)\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n def test_override_configuration(self, mock_open):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n new_content = mock.Mock()\n\n tempest.override_configuration(new_content)\n\n mock_open.assert_called_once_with(tempest.configfile, \"w\")\n mock_open.side_effect().write.assert_called_once_with(new_content)\n\n @mock.patch(\"%s.os.path.exists\" % PATH)\n @mock.patch(\"%s.utils.check_output\" % PATH)\n @mock.patch(\"%s.TempestManager.check_system_wide\" % PATH)\n def test_install_extension(self, mock_check_system_wide, mock_check_output,\n mock_exists):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\",\n system_wide=True))\n e = self.assertRaises(NotImplementedError, tempest.install_extension,\n None, None, {\"key\": \"value\"})\n self.assertIn(\"verifiers don't support extra installation settings\",\n \"%s\" % e)\n\n test_reqs_path = os.path.join(tempest.base_dir, \"extensions\",\n \"example\", \"test-requirements.txt\")\n\n # case #1 system-wide installation\n source = \"https://github.com/example/example\"\n tempest.install_extension(source)\n\n path = os.path.join(tempest.base_dir, \"extensions\")\n mock_check_output.assert_called_once_with(\n [\"pip\", \"install\", \"--no-deps\", \"--src\", path, \"-e\",\n \"git+https://github.com/example/example@master#egg=example\"],\n cwd=tempest.base_dir, env=tempest.environ)\n mock_check_system_wide.assert_called_once_with(\n reqs_file_path=test_reqs_path)\n\n mock_check_output.reset_mock()\n\n # case #2 virtual env with specified version\n tempest.verifier.system_wide = False\n version = \"some\"\n tempest.install_extension(source, version=version)\n\n self.assertEqual([\n mock.call([\n \"pip\", \"install\", \"--src\", path, \"-e\",\n \"git+https://github.com/example/example@some#egg=example\"],\n cwd=tempest.base_dir, env=tempest.environ),\n mock.call([\"pip\", \"install\", \"-r\", test_reqs_path],\n cwd=tempest.base_dir, env=tempest.environ)],\n mock_check_output.call_args_list)\n\n @mock.patch(\"%s.utils.check_output\" % PATH)\n def test_list_extensions(self, mock_check_output):\n plugins_list = [\n {\"name\": \"some\", \"entry_point\": \"foo.bar\", \"location\": \"/tmp\"},\n {\"name\": \"another\", \"entry_point\": \"bar.foo\", \"location\": \"/tmp\"}\n ]\n mock_check_output.return_value = json.dumps(plugins_list)\n\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(plugins_list, tempest.list_extensions())\n self.assertEqual(1, mock_check_output.call_count)\n mock_check_output.reset_mock()\n\n mock_check_output.side_effect = subprocess.CalledProcessError(\"\", \"\")\n self.assertRaises(exceptions.RallyException, tempest.list_extensions)\n self.assertEqual(1, mock_check_output.call_count)\n\n @mock.patch(\"%s.TempestManager.list_extensions\" % PATH)\n @mock.patch(\"%s.os.path.exists\" % PATH)\n @mock.patch(\"%s.shutil.rmtree\" % PATH)\n def test_uninstall_extension(self, mock_rmtree, mock_exists,\n mock_list_extensions):\n plugins_list = [\n {\"name\": \"some\", \"entry_point\": \"foo.bar\", \"location\": \"/tmp\"},\n {\"name\": \"another\", \"entry_point\": \"bar.foo\", \"location\": \"/tmp\"}\n ]\n mock_list_extensions.return_value = plugins_list\n\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n tempest.uninstall_extension(\"some\")\n mock_rmtree.assert_called_once_with(plugins_list[0][\"location\"])\n mock_list_extensions.assert_called_once_with()\n\n mock_rmtree.reset_mock()\n mock_list_extensions.reset_mock()\n\n self.assertRaises(exceptions.RallyException,\n tempest.uninstall_extension, \"unexist\")\n\n mock_list_extensions.assert_called_once_with()\n self.assertFalse(mock_rmtree.called)\n\n @mock.patch(\"%s.TempestManager._transform_pattern\" % PATH)\n @mock.patch(\"%s.testr.TestrLauncher.list_tests\" % PATH)\n def test_list_tests(self, mock_testr_launcher_list_tests,\n mock__transform_pattern):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(mock_testr_launcher_list_tests.return_value,\n tempest.list_tests())\n mock_testr_launcher_list_tests.assert_called_once_with(\"\")\n self.assertFalse(mock__transform_pattern.called)\n mock_testr_launcher_list_tests.reset_mock()\n\n pattern = mock.Mock()\n\n self.assertEqual(mock_testr_launcher_list_tests.return_value,\n tempest.list_tests(pattern))\n mock_testr_launcher_list_tests.assert_called_once_with(\n mock__transform_pattern.return_value)\n mock__transform_pattern.assert_called_once_with(pattern)\n\n @mock.patch(\"%s.testr.TestrLauncher.validate_args\" % PATH)\n def test_validate_args(self, mock_testr_launcher_validate_args):\n tm = manager.TempestManager(mock.Mock())\n tm.validate_args({})\n tm.validate_args({\"pattern\": \"some.test\"})\n tm.validate_args({\"pattern\": \"set=smoke\"})\n tm.validate_args({\"pattern\": \"set=compute\"})\n tm.validate_args({\"pattern\": \"set=full\"})\n\n e = self.assertRaises(exceptions.ValidationError, tm.validate_args,\n {\"pattern\": \"foo=bar\"})\n self.assertEqual(\"Validation error: 'pattern' argument should be a \"\n \"regexp or set name (format: 'tempest.api.identity.\"\n \"v3', 'set=smoke').\", \"%s\" % e)\n\n e = self.assertRaises(exceptions.ValidationError, tm.validate_args,\n {\"pattern\": \"set=foo\"})\n self.assertIn(\"Test set 'foo' not found in available Tempest test \"\n \"sets. Available sets are \", \"%s\" % e)\n\n def test__transform_pattern(self):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual(\"foo\", tempest._transform_pattern(\"foo\"))\n self.assertEqual(\"foo=bar\", tempest._transform_pattern(\"foo=bar\"))\n self.assertEqual(\"\", tempest._transform_pattern(\"set=full\"))\n self.assertEqual(\"smoke\", tempest._transform_pattern(\"set=smoke\"))\n self.assertEqual(\"tempest.bar\", tempest._transform_pattern(\"set=bar\"))\n self.assertEqual(\"tempest.api.compute\",\n tempest._transform_pattern(\"set=compute\"))\n\n @mock.patch(\"%s.TempestManager._transform_pattern\" % PATH)\n def test_prepare_run_args(self, mock__transform_pattern):\n tempest = manager.TempestManager(mock.MagicMock(uuid=\"uuuiiiddd\"))\n\n self.assertEqual({}, tempest.prepare_run_args({}))\n self.assertFalse(mock__transform_pattern.called)\n\n self.assertEqual({\"foo\": \"bar\"},\n tempest.prepare_run_args({\"foo\": \"bar\"}))\n self.assertFalse(mock__transform_pattern.called)\n\n pattern = mock.Mock()\n self.assertEqual({\"pattern\": mock__transform_pattern.return_value},\n tempest.prepare_run_args({\"pattern\": pattern}))\n mock__transform_pattern.assert_called_once_with(pattern)\n" }, { "alpha_fraction": 0.6124644875526428, "alphanum_fraction": 0.614900529384613, "avg_line_length": 38.725807189941406, "blob_id": "8305d72002243058c8165c7d9fe6424414eda575", "content_id": "9ec57d1625cab86ac44eaf547ad07ca828354765", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4926, "license_type": "permissive", "max_line_length": 78, "num_lines": 124, "path": "/tests/unit/task/scenarios/loadbalancer/test_loadbalancers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.octavia import loadbalancers\nfrom tests.unit import test\n\n\nclass LoadBalancersTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(LoadBalancersTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.loadbalancer.octavia.Octavia\")\n self.addCleanup(patch.stop)\n self.mock_loadbalancers = patch.start()\n\n def _get_context(self):\n context = super(LoadBalancersTestCase, self).get_test_context()\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant\",\n \"networks\": [{\"id\": \"fake_net\",\n \"subnets\": [\"fake_subnet\"]}]}})\n return context\n\n def test_create_and_list_loadbalancers(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = loadbalancers.CreateAndListLoadbalancers(\n self._get_context())\n scenario.run()\n\n loadbalancer_service.load_balancer_list.assert_called_once_with()\n\n def test_create_and_delete_loadbalancers(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = loadbalancers.CreateAndDeleteLoadbalancers(\n self._get_context())\n scenario.run()\n lb = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n loadbalancer_service.load_balancer_create.return_value = lb\n loadbalancer_service.load_balancer_create(\n admin_state=True, description=None, flavor_id=None,\n listeners=None, provider=None,\n subnet_id=\"fake_subnet\", vip_qos_policy_id=None)\n self.assertEqual(1,\n loadbalancer_service.load_balancer_delete.call_count)\n\n def test_create_and_update_loadbalancers(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = loadbalancers.CreateAndUpdateLoadBalancers(\n self._get_context())\n scenario.run()\n lb = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n loadbalancer_service.load_balancer_create.return_value = lb\n loadbalancer_service.load_balancer_create(\n admin_state=True, description=None, flavor_id=None,\n listeners=None, provider=None,\n subnet_id=\"fake_subnet\", vip_qos_policy_id=None)\n self.assertEqual(1,\n loadbalancer_service.load_balancer_set.call_count)\n\n def test_create_and_show_stats(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = loadbalancers.CreateAndShowStatsLoadBalancers(\n self._get_context())\n scenario.run()\n lb = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n loadbalancer_service.load_balancer_create.return_value = lb\n loadbalancer_service.load_balancer_create(\n admin_state=True, description=None, flavor_id=None,\n listeners=None, provider=None,\n subnet_id=\"fake_subnet\", vip_qos_policy_id=None)\n self.assertEqual(\n 1, loadbalancer_service.load_balancer_stats_show.call_count)\n\n def test_create_and_show_loadbalancers(self):\n loadbalancer_service = self.mock_loadbalancers.return_value\n scenario = loadbalancers.CreateAndShowLoadBalancers(\n self._get_context())\n scenario.run()\n lb = [{\n \"loadbalancer\": {\n \"id\": \"loadbalancer-id\"\n }\n }]\n lb_show = {\"id\": \"loadbalancer-id\"}\n loadbalancer_service.load_balancer_create.return_value = lb\n loadbalancer_service.load_balancer_show.return_value = lb_show\n loadbalancer_service.load_balancer_create(\n admin_state=True, description=None, flavor_id=None,\n listeners=None, provider=None,\n subnet_id=\"fake_subnet\", vip_qos_policy_id=None)\n self.assertEqual(1,\n loadbalancer_service.load_balancer_show.call_count)\n" }, { "alpha_fraction": 0.669413685798645, "alphanum_fraction": 0.6712973713874817, "avg_line_length": 41.470001220703125, "blob_id": "dc6c74ffa27de698176a7956735f5c0af8221826", "content_id": "ff109b9ab8e7e8f96d4574a7d538a15dfeff20e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4247, "license_type": "permissive", "max_line_length": 78, "num_lines": 100, "path": "/tests/unit/task/scenarios/barbican/test_containers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.barbican import containers\nfrom tests.unit import test\n\n\nclass BarbicanContainersTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(BarbicanContainersTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(BarbicanContainersTestCase, self).setUp()\n m = \"rally_openstack.common.services.key_manager.barbican\"\n patch = mock.patch(\"%s.BarbicanService\" % m)\n self.addCleanup(patch.stop)\n self.mock_secrets = patch.start()\n\n def test_list_containers(self):\n secrets_service = self.mock_secrets.return_value\n scenario = containers.BarbicanContainersList(self.context)\n scenario.run()\n secrets_service.list_container.assert_called_once_with()\n\n def test_generic_container_create_and_delete(self):\n secrets_service = self.mock_secrets.return_value\n fake_container = {\"container_ref\": \"fake_container_ref\"}\n fake_container = secrets_service.container_create.return_value\n scenario = containers.BarbicanContainersGenericCreateAndDelete(\n self.context)\n scenario.run()\n secrets_service.container_create.assert_called_once_with()\n secrets_service.container_delete.assert_called_once_with(\n fake_container.container_ref)\n\n def test_generic_container_create_and_add_secret(self):\n secrets_service = self.mock_secrets.return_value\n fake_container = {\"container_ref\": \"fake_container_ref\"}\n fake_secrets = {\"secret_ref\": \"fake_secret_ref\"}\n fake_container = secrets_service.container_create.return_value\n fake_secrets = secrets_service.create_secret.return_value\n scenario = containers.BarbicanContainersGenericCreateAndAddSecret(\n self.context)\n scenario.run()\n\n secrets_service.create_secret.assert_called_once_with()\n secrets_service.container_create.assert_called_once_with(\n secrets={\"secret\": fake_secrets})\n secrets_service.container_delete.assert_called_once_with(\n fake_container.container_ref)\n\n def test_certificate_coentaineri_create_and_delete(self):\n secrets_service = self.mock_secrets.return_value\n fake_container = {\"container_ref\": \"fake_container_ref\"}\n fake_container = secrets_service.create_certificate_container \\\n .return_value\n scenario = containers.BarbicanContainersCertificateCreateAndDelete(\n self.context)\n scenario.run()\n secrets_service.create_certificate_container.assert_called_once_with()\n secrets_service.container_delete.assert_called_once_with(\n fake_container.container_ref)\n\n def test_rsa_container_create_and_delete(self):\n secrets_service = self.mock_secrets.return_value\n fake_container = {\"container_ref\": \"fake_container_ref\"}\n fake_container = secrets_service.create_rsa_container.return_value\n scenario = containers.BarbicanContainersRSACreateAndDelete(\n self.context)\n scenario.run()\n secrets_service.create_rsa_container.assert_called_once_with()\n secrets_service.container_delete.assert_called_once_with(\n fake_container.container_ref)\n" }, { "alpha_fraction": 0.5603469610214233, "alphanum_fraction": 0.5629122853279114, "avg_line_length": 39.93000030517578, "blob_id": "d256fff294b36a79b96270516e52657116408486", "content_id": "295ca66b95aaa7368d4474e95fcdaa3d462e953a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8186, "license_type": "permissive", "max_line_length": 79, "num_lines": 200, "path": "/tests/unit/common/services/image/test_glance_v1.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport fixtures\n\nfrom rally_openstack.common.services.image import glance_v1\nfrom rally_openstack.common.services.image import image\nfrom tests.unit import test\n\n\nPATH = (\"rally_openstack.common.services.image.glance_common.\"\n \"UnifiedGlanceMixin._unify_image\")\n\n\[email protected]\nclass GlanceV1ServiceTestCase(test.TestCase):\n def setUp(self):\n super(GlanceV1ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.gc = self.clients.glance.return_value\n self.name_generator = mock.MagicMock()\n self.service = glance_v1.GlanceV1Service(\n self.clients, name_generator=self.name_generator)\n self.mock_wait_for_status = fixtures.MockPatch(\n \"rally.task.utils.wait_for_status\")\n self.useFixture(self.mock_wait_for_status)\n\n def _get_temp_file_name(self):\n # return a temp file that will be cleaned automatically\n temp_dir = self.useFixture(fixtures.TempDir())\n return temp_dir.join(\"temp-file-name\")\n\n @ddt.data({\"location\": \"image_location\", \"is_public\": True, \"temp\": False},\n {\"location\": \"image_location\", \"is_public\": False, \"temp\": True})\n @ddt.unpack\n def test_create_image(self, location, is_public, temp):\n image_name = \"image_name\"\n container_format = \"container_format\"\n disk_format = \"disk_format\"\n properties = {\"fakeprop\": \"fake\"}\n\n # override the location with a private temp file\n if temp:\n location = self._get_temp_file_name()\n\n image = self.service.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=location,\n disk_format=disk_format,\n is_public=is_public,\n properties=properties)\n\n call_args = {\"container_format\": container_format,\n \"disk_format\": disk_format,\n \"is_public\": is_public,\n \"name\": image_name,\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties,\n \"copy_from\": location}\n\n self.gc.images.create.assert_called_once_with(**call_args)\n self.assertEqual(image, self.mock_wait_for_status.mock.return_value)\n\n @ddt.data({\"image_name\": None},\n {\"image_name\": \"test_image_name\"})\n @ddt.unpack\n def test_update_image(self, image_name):\n image_id = \"image_id\"\n min_disk = 0\n min_ram = 0\n expected_image_name = image_name or self.name_generator.return_value\n\n image = self.service.update_image(image_id=image_id,\n image_name=image_name,\n min_disk=min_disk,\n min_ram=min_ram)\n self.assertEqual(self.gc.images.update.return_value, image)\n self.gc.images.update.assert_called_once_with(image_id,\n name=expected_image_name,\n min_disk=min_disk,\n min_ram=min_ram)\n\n @ddt.data({\"status\": \"activate\", \"is_public\": True, \"owner\": \"owner\"},\n {\"status\": \"activate\", \"is_public\": False, \"owner\": \"owner\"},\n {\"status\": \"activate\", \"is_public\": None, \"owner\": \"owner\"})\n @ddt.unpack\n def test_list_images(self, status, is_public, owner):\n self.service.list_images(is_public=is_public, status=status,\n owner=owner)\n self.gc.images.list.assert_called_once_with(status=status,\n owner=owner,\n is_public=is_public)\n\n def test_set_visibility(self):\n image_id = \"image_id\"\n is_public = True\n self.service.set_visibility(image_id=image_id)\n self.gc.images.update.assert_called_once_with(\n image_id, is_public=is_public)\n\n\[email protected]\nclass UnifiedGlanceV1ServiceTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedGlanceV1ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = glance_v1.UnifiedGlanceV1Service(self.clients)\n self.service._impl = mock.create_autospec(self.service._impl)\n\n @ddt.data({\"visibility\": \"public\"},\n {\"visibility\": \"private\"})\n @ddt.unpack\n @mock.patch(PATH)\n def test_create_image(self, mock_image__unify_image, visibility):\n image_name = \"image_name\"\n container_format = \"container_format\"\n image_location = \"image_location\"\n disk_format = \"disk_format\"\n properties = {\"fakeprop\": \"fake\"}\n\n image = self.service.create_image(image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n properties=properties)\n\n is_public = visibility == \"public\"\n callargs = {\"image_name\": image_name,\n \"container_format\": container_format,\n \"image_location\": image_location,\n \"disk_format\": disk_format,\n \"is_public\": is_public,\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n self.service._impl.create_image.assert_called_once_with(**callargs)\n self.assertEqual(mock_image__unify_image.return_value, image)\n\n @mock.patch(PATH)\n def test_update_image(self, mock_image__unify_image):\n image_id = \"image_id\"\n image_name = \"image_name\"\n callargs = {\"image_id\": image_id,\n \"image_name\": image_name,\n \"min_disk\": 0,\n \"min_ram\": 0}\n\n image = self.service.update_image(image_id,\n image_name=image_name)\n\n self.assertEqual(mock_image__unify_image.return_value, image)\n self.service._impl.update_image.assert_called_once_with(**callargs)\n\n @mock.patch(PATH)\n def test_list_images(self, mock_image__unify_image):\n images = [mock.MagicMock()]\n self.service._impl.list_images.return_value = images\n\n status = \"active\"\n visibility = \"public\"\n is_public = visibility == \"public\"\n self.assertEqual([mock_image__unify_image.return_value],\n self.service.list_images(status,\n visibility=visibility))\n self.service._impl.list_images.assert_called_once_with(\n status=status,\n is_public=is_public)\n\n def test_set_visibility(self):\n image_id = \"image_id\"\n visibility = \"private\"\n is_public = visibility == \"public\"\n self.service.set_visibility(image_id=image_id, visibility=visibility)\n self.service._impl.set_visibility.assert_called_once_with(\n image_id=image_id, is_public=is_public)\n\n def test_set_visibility_failure(self):\n image_id = \"image_id\"\n visibility = \"error\"\n self.assertRaises(image.VisibilityException,\n self.service.set_visibility,\n image_id=image_id,\n visibility=visibility)\n" }, { "alpha_fraction": 0.6530289053916931, "alphanum_fraction": 0.6534984707832336, "avg_line_length": 42.129112243652344, "blob_id": "5f2addb3c5383b3fdd54040b3b6d65b7766a78da", "content_id": "1dccefd9a8e13280801e9d179427111d61727b0f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17036, "license_type": "permissive", "max_line_length": 78, "num_lines": 395, "path": "/rally_openstack/task/scenarios/heat/stacks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.heat import utils\n\n\n\"\"\"Scenarios for Heat stacks.\"\"\"\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_and_list_stack\",\n platform=\"openstack\")\nclass CreateAndListStack(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create a stack and then list all stacks.\n\n Measure the \"heat stack-create\" and \"heat stack-list\" commands\n performance.\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n stack = self._create_stack(template_path, parameters,\n files, environment)\n self.assertTrue(stack)\n list_stacks = self._list_stacks()\n self.assertIn(stack.id, [i.id for i in list_stacks])\n\n\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"HeatStacks.list_stacks_and_resources\",\n platform=\"openstack\")\nclass ListStacksAndResources(utils.HeatScenario):\n\n def run(self):\n \"\"\"List all resources from tenant stacks.\"\"\"\n stacks = self._list_stacks()\n for stack in stacks:\n with atomic.ActionTimer(self, \"heat.list_resources\"):\n self.clients(\"heat\").resources.list(stack.id)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_and_delete_stack\",\n platform=\"openstack\")\nclass CreateAndDeleteStack(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create and then delete a stack.\n\n Measure the \"heat stack-create\" and \"heat stack-delete\" commands\n performance.\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n\n stack = self._create_stack(template_path, parameters,\n files, environment)\n self._delete_stack(stack)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_check_delete_stack\",\n platform=\"openstack\")\nclass CreateCheckDeleteStack(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create, check and delete a stack.\n\n Measure the performance of the following commands:\n - heat stack-create\n - heat action-check\n - heat stack-delete\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n\n stack = self._create_stack(template_path, parameters,\n files, environment)\n self._check_stack(stack)\n self._delete_stack(stack)\n\n\[email protected](template_path={\"type\": \"file\"},\n updated_template_path={\"type\": \"file\"},\n files={\"type\": \"file_dict\"},\n updated_files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_update_delete_stack\",\n platform=\"openstack\")\nclass CreateUpdateDeleteStack(utils.HeatScenario):\n\n def run(self, template_path, updated_template_path,\n parameters=None, updated_parameters=None,\n files=None, updated_files=None,\n environment=None, updated_environment=None):\n \"\"\"Create, update and then delete a stack.\n\n Measure the \"heat stack-create\", \"heat stack-update\"\n and \"heat stack-delete\" commands performance.\n\n :param template_path: path to stack template file\n :param updated_template_path: path to updated stack template file\n :param parameters: parameters to use in heat template\n :param updated_parameters: parameters to use in updated heat template\n If not specified then parameters will be\n used instead\n :param files: files used in template\n :param updated_files: files used in updated template. If not specified\n files value will be used instead\n :param environment: stack environment definition\n :param updated_environment: environment definition for updated stack\n \"\"\"\n\n stack = self._create_stack(template_path, parameters,\n files, environment)\n self._update_stack(stack, updated_template_path,\n updated_parameters or parameters,\n updated_files or files,\n updated_environment or environment)\n self._delete_stack(stack)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_stack_and_scale\",\n platform=\"openstack\")\nclass CreateStackAndScale(utils.HeatScenario):\n\n def run(self, template_path, output_key, delta,\n parameters=None, files=None,\n environment=None):\n \"\"\"Create an autoscaling stack and invoke a scaling policy.\n\n Measure the performance of autoscaling webhooks.\n\n :param template_path: path to template file that includes an\n OS::Heat::AutoScalingGroup resource\n :param output_key: the stack output key that corresponds to\n the scaling webhook\n :param delta: the number of instances the stack is expected to\n change by.\n :param parameters: parameters to use in heat template\n :param files: files used in template (dict of file name to\n file path)\n :param environment: stack environment definition (dict)\n \"\"\"\n # TODO(stpierre): Kilo Heat is *much* better than Juno for the\n # requirements of this scenario, so once Juno goes out of\n # support we should update this scenario to suck less. Namely:\n #\n # * Kilo Heat can supply alarm_url attributes without needing\n # an output key, so instead of getting the output key from\n # the user, just get the name of the ScalingPolicy to apply.\n # * Kilo Heat changes the status of a stack while scaling it,\n # so _scale_stack() can check for the stack to have changed\n # size and for it to be in UPDATE_COMPLETE state, so the\n # user no longer needs to specify the expected delta.\n stack = self._create_stack(template_path, parameters, files,\n environment)\n self._scale_stack(stack, output_key, delta)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_suspend_resume_delete_stack\",\n platform=\"openstack\")\nclass CreateSuspendResumeDeleteStack(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create, suspend-resume and then delete a stack.\n\n Measure performance of the following commands:\n heat stack-create\n heat action-suspend\n heat action-resume\n heat stack-delete\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n\n s = self._create_stack(template_path, parameters, files, environment)\n self._suspend_stack(s)\n self._resume_stack(s)\n self._delete_stack(s)\n\n\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"HeatStacks.list_stacks_and_events\",\n platform=\"openstack\")\nclass ListStacksAndEvents(utils.HeatScenario):\n\n def run(self):\n \"\"\"List events from tenant stacks.\"\"\"\n stacks = self._list_stacks()\n for stack in stacks:\n with atomic.ActionTimer(self, \"heat.list_events\"):\n self.clients(\"heat\").events.list(stack.id)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"validate_heat_template\", params=\"template_path\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_snapshot_restore_delete_stack\",\n platform=\"openstack\")\nclass CreateSnapshotRestoreDeleteStack(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create, snapshot-restore and then delete a stack.\n\n Measure performance of the following commands:\n heat stack-create\n heat stack-snapshot\n heat stack-restore\n heat stack-delete\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n\n stack = self._create_stack(\n template_path, parameters, files, environment)\n snapshot = self._snapshot_stack(stack)\n self._restore_stack(stack, snapshot[\"id\"])\n self._delete_stack(stack)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_stack_and_show_output_via_API\",\n platform=\"openstack\")\nclass CreateStackAndShowOutputViaAPI(utils.HeatScenario):\n\n def run(self, template_path, output_key,\n parameters=None, files=None, environment=None):\n \"\"\"Create stack and show output by using old algorithm.\n\n Measure performance of the following commands:\n heat stack-create\n heat output-show\n\n :param template_path: path to stack template file\n :param output_key: the stack output key that corresponds to\n the scaling webhook\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n stack = self._create_stack(\n template_path, parameters, files, environment)\n self._stack_show_output_via_API(stack, output_key)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_stack_and_show_output\",\n platform=\"openstack\")\nclass CreateStackAndShowOutput(utils.HeatScenario):\n\n def run(self, template_path, output_key,\n parameters=None, files=None, environment=None):\n \"\"\"Create stack and show output by using new algorithm.\n\n Measure performance of the following commands:\n heat stack-create\n heat output-show\n\n :param template_path: path to stack template file\n :param output_key: the stack output key that corresponds to\n the scaling webhook\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n stack = self._create_stack(\n template_path, parameters, files, environment)\n self._stack_show_output(stack, output_key)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_stack_and_list_output_via_API\",\n platform=\"openstack\")\nclass CreateStackAndListOutputViaAPI(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create stack and list outputs by using old algorithm.\n\n Measure performance of the following commands:\n heat stack-create\n heat output-list\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n stack = self._create_stack(\n template_path, parameters, files, environment)\n self._stack_list_output_via_API(stack)\n\n\[email protected](template_path={\"type\": \"file\"}, files={\"type\": \"file_dict\"})\[email protected](\"required_services\", services=[consts.Service.HEAT])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"heat\"]},\n name=\"HeatStacks.create_stack_and_list_output\",\n platform=\"openstack\")\nclass CreateStackAndListOutput(utils.HeatScenario):\n\n def run(self, template_path, parameters=None,\n files=None, environment=None):\n \"\"\"Create stack and list outputs by using new algorithm.\n\n Measure performance of the following commands:\n heat stack-create\n heat output-list\n\n :param template_path: path to stack template file\n :param parameters: parameters to use in heat template\n :param files: files used in template\n :param environment: stack environment definition\n \"\"\"\n stack = self._create_stack(\n template_path, parameters, files, environment)\n self._stack_list_output(stack)\n" }, { "alpha_fraction": 0.6044577956199646, "alphanum_fraction": 0.6071097254753113, "avg_line_length": 43.66933059692383, "blob_id": "4ddb6e4e2d90294ea405d8399fbeb3e03f69c0aa", "content_id": "6350c8153636627288739fbb9b6a538c2fde5780", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61471, "license_type": "permissive", "max_line_length": 100, "num_lines": 1376, "path": "/rally_openstack/common/services/network/neutron.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport itertools\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import service\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services.network import net_utils\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\ndef _args_adapter(arguments_map):\n def wrapper(func):\n def decorator(*args, **kwargs):\n for source, dest in arguments_map.items():\n if source in kwargs:\n if dest in kwargs:\n raise TypeError(\n f\"{func.__name__}() accepts either {dest} keyword \"\n f\"argument or {source} but both were specified.\")\n kwargs[dest] = kwargs.pop(source)\n return func(*args, **kwargs)\n return decorator\n return wrapper\n\n\n_NETWORK_ARGS_MAP = {\n \"provider:network_type\": \"provider_network_type\",\n \"provider:physical_network\": \"provider_physical_network\",\n \"provider:segmentation_id\": \"provider_segmentation_id\",\n \"router:external\": \"router_external\"\n}\n\n\ndef _create_network_arg_adapter():\n \"\"\"A decorator for converting neutron's create kwargs to look pythonic.\"\"\"\n return _args_adapter(_NETWORK_ARGS_MAP)\n\n\nclass _NoneObj(object):\n def __len__(self):\n return 0\n\n\n_NONE = _NoneObj()\n\n\ndef _clean_dict(**kwargs):\n \"\"\"Builds a dict object from keyword arguments ignoring nullable values.\"\"\"\n return dict((k, v) for k, v in kwargs.items() if v != _NONE)\n\n\[email protected](service_name=\"neutron\", service_type=\"network\", version=\"2.0\")\nclass NeutronService(service.Service):\n \"\"\"A helper class for Neutron API\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NeutronService, self).__init__(*args, **kwargs)\n self._cached_supported_extensions = None\n self._client = None\n\n @property\n def client(self):\n if self._client is None:\n self._client = self._clients.neutron()\n return self._client\n\n def create_network_topology(\n self, network_create_args=None,\n router_create_args=None, router_per_subnet=False,\n subnet_create_args=None, subnets_count=1, subnets_dualstack=False\n ):\n \"\"\"Create net infrastructure(network, router, subnets).\n\n :param network_create_args: A dict with creation arguments for a\n network. The format is equal to the create_network method\n :param router_create_args: A dict with creation arguments for an\n external router that will add an interface to each created subnet.\n The format is equal to the create_subnet method\n In case of None value (default behaviour), no router is created.\n :param router_per_subnet: whether or not to create router per subnet\n or use one router for all subnets.\n :param subnet_create_args: A dict with creation arguments for\n subnets. The format is equal to the create_subnet method.\n :param subnets_count: Number of subnets to create per network.\n Defaults to 1\n :param subnets_dualstack: Whether subnets should be of both IPv4 and\n IPv6 (i.e first subnet will be created for IPv4, the second for\n IPv6, the third for IPv4,..). If subnet_create_args includes one of\n ('cidr', 'start_cidr', 'ip_version') keys, subnets_dualstack\n parameter will be ignored.\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n\n network = self.create_network(**(network_create_args or {}))\n subnet_create_args[\"network_id\"] = network[\"id\"]\n\n routers = []\n if router_create_args is not None:\n for i in range(subnets_count if router_per_subnet else 1):\n routers.append(self.create_router(**router_create_args))\n\n subnets = []\n ip_versions = itertools.cycle([4, 6] if subnets_dualstack else [4])\n use_subnets_dualstack = (\n \"cidr\" not in subnet_create_args\n and \"start_cidr\" not in subnet_create_args\n and \"ip_version\" not in subnet_create_args\n )\n\n for i in range(subnets_count):\n if use_subnets_dualstack:\n subnet_create_args[\"ip_version\"] = next(ip_versions)\n if routers:\n if router_per_subnet:\n router = routers[i]\n else:\n router = routers[0]\n subnet_create_args[\"router_id\"] = router[\"id\"]\n\n subnets.append(self.create_subnet(**subnet_create_args))\n\n network[\"subnets\"] = [s[\"id\"] for s in subnets]\n\n return {\n \"network\": network,\n \"subnets\": subnets,\n \"routers\": routers\n }\n\n def delete_network_topology(self, topo):\n \"\"\"Delete network topology\n\n This method was developed to provide a backward compatibility with old\n neutron helpers. It is not recommended way and we suggest to use\n cleanup manager instead.\n\n :param topo: Network topology as create_network_topology returned\n \"\"\"\n for router in topo[\"routers\"]:\n self.remove_gateway_from_router(router[\"id\"])\n\n network_id = topo[\"network\"][\"id\"]\n\n for port in self.list_ports(network_id=network_id):\n self.delete_port(port)\n\n for subnet in self.list_subnets(network_id=network_id):\n self.delete_subnet(subnet[\"id\"])\n\n self.delete_network(network_id)\n\n for router in topo[\"routers\"]:\n self.delete_router(router[\"id\"])\n\n @atomic.action_timer(\"neutron.create_network\")\n @_create_network_arg_adapter()\n def create_network(self,\n project_id=_NONE,\n admin_state_up=_NONE,\n dns_domain=_NONE,\n mtu=_NONE,\n port_security_enabled=_NONE,\n provider_network_type=_NONE,\n provider_physical_network=_NONE,\n provider_segmentation_id=_NONE,\n qos_policy_id=_NONE,\n router_external=_NONE,\n segments=_NONE,\n shared=_NONE,\n vlan_transparent=_NONE,\n description=_NONE,\n availability_zone_hints=_NONE):\n \"\"\"Create neutron network.\n\n :param project_id: The ID of the project that owns the resource. Only\n administrative and users with advsvc role can specify a project ID\n other than their own. You cannot change this value through\n authorization policies.\n :param admin_state_up: The administrative state of the network,\n which is up (true) or down (false).\n :param dns_domain: A valid DNS domain.\n :param mtu: The maximum transmission unit (MTU) value to address\n fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.\n :param port_security_enabled: The port security status of the network.\n Valid values are enabled (true) and disabled (false). This value is\n used as the default value of port_security_enabled field of a\n newly created port.\n :param provider_network_type: The type of physical network that this\n network should be mapped to. For example, flat, vlan, vxlan,\n or gre. Valid values depend on a networking back-end.\n :param provider_physical_network: The physical network where this\n network should be implemented. The Networking API v2.0 does not\n provide a way to list available physical networks.\n For example, the Open vSwitch plug-in configuration file defines\n a symbolic name that maps to specific bridges on each compute host.\n :param provider_segmentation_id: The ID of the isolated segment on the\n physical network. The network_type attribute defines the\n segmentation model. For example, if the network_type value is vlan,\n this ID is a vlan identifier. If the network_type value is gre,\n this ID is a gre key.\n :param qos_policy_id: The ID of the QoS policy associated with the\n network.\n :param router_external: Indicates whether the network has an external\n routing facility that’s not managed by the networking service.\n :param segments: A list of provider segment objects.\n :param shared: Indicates whether this resource is shared across all\n projects. By default, only administrative users can change\n this value.\n :param vlan_transparent: Indicates the VLAN transparency mode of the\n network, which is VLAN transparent (true) or not VLAN\n transparent (false).\n :param description: A human-readable description for the resource.\n Default is an empty string.\n :param availability_zone_hints: The availability zone candidate for\n the network.\n :returns: neutron network dict\n \"\"\"\n body = _clean_dict(\n name=self.generate_random_name(),\n tenant_id=project_id,\n admin_state_up=admin_state_up,\n dns_domain=dns_domain,\n mtu=mtu,\n port_security_enabled=port_security_enabled,\n qos_policy_id=qos_policy_id,\n segments=segments,\n shared=shared,\n vlan_transparent=vlan_transparent,\n description=description,\n availability_zone_hints=availability_zone_hints,\n **{\n \"provider:network_type\": provider_network_type,\n \"provider:physical_network\": provider_physical_network,\n \"provider:segmentation_id\": provider_segmentation_id,\n \"router:external\": router_external\n }\n )\n resp = self.client.create_network({\"network\": body})\n return resp[\"network\"]\n\n @atomic.action_timer(\"neutron.show_network\")\n def get_network(self, network_id, fields=_NONE):\n \"\"\"Get network by ID\n\n :param network_id: Network ID to fetch data for\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(fields=fields)\n resp = self.client.show_network(network_id, **body)\n return resp[\"network\"]\n\n def find_network(self, network_id_or_name, external=_NONE):\n \"\"\"Find network by identifier (id or name)\n\n :param network_id_or_name: Network ID or name\n :param external: check target network is external or not\n \"\"\"\n network = None\n for net in self.list_networks():\n if network_id_or_name in (net[\"name\"], net[\"id\"]):\n network = net\n break\n if network is None:\n raise exceptions.GetResourceFailure(\n resource=\"network\",\n err=f\"no name or id matches {network_id_or_name}\")\n if external:\n if not network.get(\"router:external\", False):\n raise exceptions.NotFoundException(\n f\"Network '{network['name']} (id={network['id']})' is not \"\n f\"external.\")\n return network\n\n @atomic.action_timer(\"neutron.update_network\")\n @_create_network_arg_adapter()\n def update_network(self,\n network_id,\n name=_NONE,\n admin_state_up=_NONE,\n dns_domain=_NONE,\n mtu=_NONE,\n port_security_enabled=_NONE,\n provider_network_type=_NONE,\n provider_physical_network=_NONE,\n provider_segmentation_id=_NONE,\n qos_policy_id=_NONE,\n router_external=_NONE,\n segments=_NONE,\n shared=_NONE,\n description=_NONE,\n is_default=_NONE):\n \"\"\"Update neutron network.\n\n :param network_id: ID of the network to update\n :param name: Human-readable name of the network.\n :param admin_state_up: The administrative state of the network,\n which is up (true) or down (false).\n :param dns_domain: A valid DNS domain.\n :param mtu: The maximum transmission unit (MTU) value to address\n fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.\n :param port_security_enabled: The port security status of the network.\n Valid values are enabled (true) and disabled (false). This value is\n used as the default value of port_security_enabled field of a\n newly created port.\n :param provider_network_type: The type of physical network that this\n network should be mapped to. For example, flat, vlan, vxlan,\n or gre. Valid values depend on a networking back-end.\n :param provider_physical_network: The physical network where this\n network should be implemented. The Networking API v2.0 does not\n provide a way to list available physical networks.\n For example, the Open vSwitch plug-in configuration file defines\n a symbolic name that maps to specific bridges on each compute host.\n :param provider_segmentation_id: The ID of the isolated segment on the\n physical network. The network_type attribute defines the\n segmentation model. For example, if the network_type value is vlan,\n this ID is a vlan identifier. If the network_type value is gre,\n this ID is a gre key.\n :param qos_policy_id: The ID of the QoS policy associated with the\n network.\n :param router_external: Indicates whether the network has an external\n routing facility that’s not managed by the networking service.\n :param segments: A list of provider segment objects.\n :param shared: Indicates whether this resource is shared across all\n projects. By default, only administrative users can change\n this value.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n :param is_default: The network is default or not.\n :returns: neutron network dict\n \"\"\"\n body = _clean_dict(\n name=name,\n admin_state_up=admin_state_up,\n dns_domain=dns_domain,\n mtu=mtu,\n port_security_enabled=port_security_enabled,\n qos_policy_id=qos_policy_id,\n segments=segments,\n shared=shared,\n description=description,\n is_default=is_default,\n **{\n \"provider:network_type\": provider_network_type,\n \"provider:physical_network\": provider_physical_network,\n \"provider:segmentation_id\": provider_segmentation_id,\n \"router:external\": router_external\n }\n )\n if not body:\n raise TypeError(\"No updates for a network.\")\n resp = self.client.update_network(network_id, {\"network\": body})\n return resp[\"network\"]\n\n @atomic.action_timer(\"neutron.delete_network\")\n def delete_network(self, network_id):\n \"\"\"Delete network\n\n :param network_id: Network ID\n \"\"\"\n self.client.delete_network(network_id)\n\n @atomic.action_timer(\"neutron.list_networks\")\n def list_networks(self, name=_NONE, router_external=_NONE, status=_NONE,\n **kwargs):\n \"\"\"List networks.\n\n :param name: Filter the list result by the human-readable name of the\n resource.\n :param router_external: Filter the network list result based on whether\n the network has an external routing facility that’s not managed by\n the networking service.\n :param status: Filter the network list result by network status.\n Values are ACTIVE, DOWN, BUILD or ERROR.\n :param kwargs: additional network list filters\n \"\"\"\n kwargs[\"router:external\"] = router_external\n filters = _clean_dict(name=name, status=status, **kwargs)\n return self.client.list_networks(**filters)[\"networks\"]\n\n IPv4_DEFAULT_DNS_NAMESERVERS = [\"8.8.8.8\", \"8.8.4.4\"]\n IPv6_DEFAULT_DNS_NAMESERVERS = [\"dead:beaf::1\", \"dead:beaf::2\"]\n\n @atomic.action_timer(\"neutron.create_subnet\")\n def create_subnet(self, network_id, router_id=_NONE, project_id=_NONE,\n enable_dhcp=_NONE,\n dns_nameservers=_NONE, allocation_pools=_NONE,\n host_routes=_NONE, ip_version=_NONE, gateway_ip=_NONE,\n cidr=_NONE, start_cidr=_NONE, prefixlen=_NONE,\n ipv6_address_mode=_NONE, ipv6_ra_mode=_NONE,\n segment_id=_NONE, subnetpool_id=_NONE,\n use_default_subnetpool=_NONE, service_types=_NONE,\n dns_publish_fixed_ip=_NONE):\n \"\"\"Create neutron subnet.\n\n :param network_id: The ID of the network to which the subnet belongs.\n :param router_id: An external router and add as an interface to subnet.\n :param project_id: The ID of the project that owns the resource.\n Only administrative and users with advsvc role can specify a\n project ID other than their own. You cannot change this value\n through authorization policies.\n :param enable_dhcp: Indicates whether dhcp is enabled or disabled for\n the subnet. Default is true.\n :param dns_nameservers: List of dns name servers associated with the\n subnet. Default is a list of Google DNS\n :param allocation_pools: Allocation pools with start and end IP\n addresses for this subnet. If allocation_pools are not specified,\n OpenStack Networking automatically allocates pools for covering\n all IP addresses in the CIDR, excluding the address reserved for\n the subnet gateway by default.\n :param host_routes: Additional routes for the subnet. A list of\n dictionaries with destination and nexthop parameters. Default\n value is an empty list.\n :param gateway_ip: Gateway IP of this subnet. If the value is null that\n implies no gateway is associated with the subnet. If the gateway_ip\n is not specified, OpenStack Networking allocates an address from\n the CIDR for the gateway for the subnet by default.\n :param ip_version: The IP protocol version. Value is 4 or 6. If CIDR\n is specified, the value automatically can be detected from it,\n otherwise defaults to 4.\n Also, check start_cidr param description.\n :param cidr: The CIDR of the subnet. If not specified, it will be\n auto-generated based on start_cidr and ip_version parameters.\n :param start_cidr:\n :param prefixlen: he prefix length to use for subnet allocation from a\n subnet pool. If not specified, the default_prefixlen value of the\n subnet pool will be used.\n :param ipv6_address_mode: The IPv6 address modes specifies mechanisms\n for assigning IP addresses. Value is slaac, dhcpv6-stateful,\n dhcpv6-stateless.\n :param ipv6_ra_mode: The IPv6 router advertisement specifies whether\n the networking service should transmit ICMPv6 packets, for a\n subnet. Value is slaac, dhcpv6-stateful, dhcpv6-stateless.\n :param segment_id: The ID of a network segment the subnet is\n associated with. It is available when segment extension is enabled.\n :param subnetpool_id: The ID of the subnet pool associated with the\n subnet.\n :param use_default_subnetpool: Whether to allocate this subnet from\n the default subnet pool.\n :param service_types: The service types associated with the subnet.\n :param dns_publish_fixed_ip: Whether to publish DNS records for IPs\n from this subnet. Default is false.\n \"\"\"\n\n if cidr == _NONE:\n ip_version, cidr = net_utils.generate_cidr(\n ip_version=ip_version, start_cidr=(start_cidr or None))\n if ip_version == _NONE:\n ip_version = net_utils.get_ip_version(cidr)\n\n if dns_nameservers == _NONE:\n if ip_version == 4:\n dns_nameservers = self.IPv4_DEFAULT_DNS_NAMESERVERS\n else:\n dns_nameservers = self.IPv6_DEFAULT_DNS_NAMESERVERS\n\n body = _clean_dict(\n name=self.generate_random_name(),\n network_id=network_id,\n tenant_id=project_id,\n enable_dhcp=enable_dhcp,\n dns_nameservers=dns_nameservers,\n allocation_pools=allocation_pools,\n host_routes=host_routes,\n ip_version=ip_version,\n gateway_ip=gateway_ip,\n cidr=cidr,\n prefixlen=prefixlen,\n ipv6_address_mode=ipv6_address_mode,\n ipv6_ra_mode=ipv6_ra_mode,\n segment_id=segment_id,\n subnetpool_id=subnetpool_id,\n use_default_subnetpool=use_default_subnetpool,\n service_types=service_types,\n dns_publish_fixed_ip=dns_publish_fixed_ip\n )\n\n subnet = self.client.create_subnet({\"subnet\": body})[\"subnet\"]\n if router_id:\n self.add_interface_to_router(router_id=router_id,\n subnet_id=subnet[\"id\"])\n return subnet\n\n @atomic.action_timer(\"neutron.show_subnet\")\n def get_subnet(self, subnet_id):\n \"\"\"Get subnet\n\n :param subnet_id: Subnet ID\n \"\"\"\n return self.client.show_subnet(subnet_id)[\"subnet\"]\n\n @atomic.action_timer(\"neutron.update_subnet\")\n def update_subnet(self, subnet_id, name=_NONE, enable_dhcp=_NONE,\n dns_nameservers=_NONE, allocation_pools=_NONE,\n host_routes=_NONE, gateway_ip=_NONE, description=_NONE,\n service_types=_NONE, segment_id=_NONE,\n dns_publish_fixed_ip=_NONE):\n \"\"\"Update neutron subnet.\n\n :param subnet_id: The ID of the subnet to update.\n :param name: Human-readable name of the resource.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n :param enable_dhcp: Indicates whether dhcp is enabled or disabled for\n the subnet. Default is true.\n :param dns_nameservers: List of dns name servers associated with the\n subnet. Default is a list of Google DNS\n :param allocation_pools: Allocation pools with start and end IP\n addresses for this subnet. If allocation_pools are not specified,\n OpenStack Networking automatically allocates pools for covering\n all IP addresses in the CIDR, excluding the address reserved for\n the subnet gateway by default.\n :param host_routes: Additional routes for the subnet. A list of\n dictionaries with destination and nexthop parameters. Default\n value is an empty list.\n :param gateway_ip: Gateway IP of this subnet. If the value is null that\n implies no gateway is associated with the subnet. If the gateway_ip\n is not specified, OpenStack Networking allocates an address from\n the CIDR for the gateway for the subnet by default.\n :param segment_id: The ID of a network segment the subnet is\n associated with. It is available when segment extension is enabled.\n :param service_types: The service types associated with the subnet.\n :param dns_publish_fixed_ip: Whether to publish DNS records for IPs\n from this subnet. Default is false.\n \"\"\"\n\n body = _clean_dict(\n name=name,\n enable_dhcp=enable_dhcp,\n dns_nameservers=dns_nameservers,\n allocation_pools=allocation_pools,\n host_routes=host_routes,\n gateway_ip=gateway_ip,\n segment_id=segment_id,\n service_types=service_types,\n dns_publish_fixed_ip=dns_publish_fixed_ip,\n description=description\n )\n\n if not body:\n raise TypeError(\"No updates for a subnet.\")\n\n resp = self.client.update_subnet(subnet_id, {\"subnet\": body})[\"subnet\"]\n return resp\n\n @atomic.action_timer(\"neutron.delete_subnet\")\n def delete_subnet(self, subnet_id):\n \"\"\"Delete subnet\n\n :param subnet_id: Subnet ID\n \"\"\"\n self.client.delete_subnet(subnet_id)\n\n @atomic.action_timer(\"neutron.list_subnets\")\n def list_subnets(self, network_id=_NONE, **filters):\n \"\"\"List subnets.\n\n :param network_id: Filter the subnet list result by the ID of the\n network to which the subnet belongs.\n :param filters: additional subnet list filters\n \"\"\"\n if network_id:\n filters[\"network_id\"] = network_id\n return self.client.list_subnets(**filters)[\"subnets\"]\n\n @atomic.action_timer(\"neutron.create_router\")\n def create_router(self, project_id=_NONE, admin_state_up=_NONE,\n description=_NONE, discover_external_gw=False,\n external_gateway_info=_NONE, distributed=_NONE, ha=_NONE,\n availability_zone_hints=_NONE, service_type_id=_NONE,\n flavor_id=_NONE, enable_snat=_NONE):\n \"\"\"Create router.\n\n :param project_id: The ID of the project that owns the resource. Only\n administrative and users with advsvc role can specify a project ID\n other than their own. You cannot change this value through\n authorization policies.\n :param admin_state_up: The administrative state of the resource, which\n is up (true) or down (false). Default is true.\n :param description: A human-readable description for the resource.\n :param discover_external_gw: Take one of available external networks\n and use it as external gateway. The parameter can not be used in\n combination of external_gateway_info parameter.\n :param external_gateway_info: The external gateway information of\n the router. If the router has an external gateway, this would be\n a dict with network_id, enable_snat and external_fixed_ips.\n :param distributed: true indicates a distributed router. It is\n available when dvr extension is enabled.\n :param ha: true indicates a highly-available router. It is available\n when l3-ha extension is enabled.\n :param availability_zone_hints: The availability zone candidates for\n the router. It is available when router_availability_zone extension\n is enabled.\n :param service_type_id: The ID of the service type associated with\n the router.\n :param flavor_id: The ID of the flavor associated with the router.\n :param enable_snat: Whether to include `enable_snat: True` to\n external_gateway_info or not. By default, it is enabled if a user\n is admin and \"ext-gw-mode\" extension presents\n \"\"\"\n\n if external_gateway_info is _NONE and discover_external_gw:\n for external_network in self.list_networks(router_external=True):\n external_gateway_info = {\"network_id\": external_network[\"id\"]}\n if enable_snat is _NONE:\n permission = self._clients.credential.permission\n is_admin = permission == consts.EndpointPermission.ADMIN\n if (self.supports_extension(\"ext-gw-mode\", silent=True)\n and is_admin):\n external_gateway_info[\"enable_snat\"] = True\n elif enable_snat:\n external_gateway_info[\"enable_snat\"] = True\n break\n\n body = _clean_dict(\n name=self.generate_random_name(),\n # tenant_id should work for both new and old neutron instances\n tenant_id=project_id,\n external_gateway_info=external_gateway_info,\n description=description,\n distributed=distributed,\n ha=ha,\n availability_zone_hints=availability_zone_hints,\n service_type_id=service_type_id,\n flavor_id=flavor_id,\n admin_state_up=admin_state_up\n )\n\n resp = self.client.create_router({\"router\": body})\n return resp[\"router\"]\n\n @atomic.action_timer(\"neutron.show_router\")\n def get_router(self, router_id, fields=_NONE):\n \"\"\"Get router details\n\n :param router_id: Router ID\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(fields=fields)\n return self.client.show_router(router_id, **body)[\"router\"]\n\n @atomic.action_timer(\"neutron.add_interface_router\")\n def add_interface_to_router(self, router_id, subnet_id=_NONE,\n port_id=_NONE):\n \"\"\"Add interface to router.\n\n :param router_id: The ID of the router.\n :param subnet_id: The ID of the subnet. One of subnet_id or port_id\n must be specified.\n :param port_id: The ID of the port. One of subnet_id or port_id must\n be specified.\n \"\"\"\n if (subnet_id and port_id) or (not subnet_id and not port_id):\n raise TypeError(\"One of subnet_id or port_id must be specified \"\n \"while adding interface to router.\")\n body = _clean_dict(subnet_id=subnet_id, port_id=port_id)\n return self.client.add_interface_router(router_id, body)\n\n @atomic.action_timer(\"neutron.remove_interface_router\")\n def remove_interface_from_router(self, router_id, subnet_id=_NONE,\n port_id=_NONE):\n \"\"\"Remove interface from router\n\n :param router_id: The ID of the router.\n :param subnet_id: The ID of the subnet. One of subnet_id or port_id\n must be specified.\n :param port_id: The ID of the port. One of subnet_id or port_id must\n be specified.\n \"\"\"\n from neutronclient.common import exceptions as neutron_exceptions\n\n if (subnet_id and port_id) or (not subnet_id and not port_id):\n raise TypeError(\"One of subnet_id or port_id must be specified \"\n \"to remove interface from router.\")\n\n body = _clean_dict(subnet_id=subnet_id, port_id=port_id)\n\n try:\n self.client.remove_interface_router(router_id, body)\n except (neutron_exceptions.BadRequest,\n neutron_exceptions.NotFound):\n # Some neutron plugins don't use router as\n # the device ID. Also, some plugin doesn't allow\n # to update the ha router interface as there is\n # an internal logic to update the interface/data model\n # instead.\n LOG.exception(\"Failed to remove an interface from a router.\")\n\n @atomic.action_timer(\"neutron.add_gateway_router\")\n def add_gateway_to_router(self, router_id, network_id, enable_snat=None,\n external_fixed_ips=None):\n \"\"\"Adds an external network gateway to the specified router.\n\n :param router_id: Router ID\n :param enable_snat: whether SNAT should occur on the external gateway\n or not\n \"\"\"\n gw_info = {\"network_id\": network_id}\n if enable_snat is not None:\n if self.supports_extension(\"ext-gw-mode\", silent=True):\n gw_info[\"enable_snat\"] = enable_snat\n if external_fixed_ips is not None:\n gw_info[\"external_fixed_ips\"] = external_fixed_ips\n self.client.add_gateway_router(router_id, gw_info)\n\n @atomic.action_timer(\"neutron.remove_gateway_router\")\n def remove_gateway_from_router(self, router_id):\n \"\"\"Removes an external network gateway from the specified router.\n\n :param router_id: Router ID\n \"\"\"\n self.client.remove_gateway_router(router_id)\n\n @atomic.action_timer(\"neutron.update_router\")\n def update_router(self, router_id, name=_NONE, admin_state_up=_NONE,\n description=_NONE, external_gateway_info=_NONE,\n distributed=_NONE, ha=_NONE):\n \"\"\"Update router.\n\n :param router_id: The ID of the router to update.\n :param name: Human-readable name of the resource.\n :param admin_state_up: The administrative state of the resource, which\n is up (true) or down (false). Default is true.\n :param description: A human-readable description for the resource.\n :param external_gateway_info: The external gateway information of\n the router. If the router has an external gateway, this would be\n a dict with network_id, enable_snat and external_fixed_ips.\n :param distributed: true indicates a distributed router. It is\n available when dvr extension is enabled.\n :param ha: true indicates a highly-available router. It is available\n when l3-ha extension is enabled.\n \"\"\"\n body = _clean_dict(\n name=name,\n external_gateway_info=external_gateway_info,\n description=description,\n distributed=distributed,\n ha=ha,\n admin_state_up=admin_state_up\n )\n\n if not body:\n raise TypeError(\"No updates for a router.\")\n\n return self.client.update_router(router_id, {\"router\": body})[\"router\"]\n\n @atomic.action_timer(\"neutron.delete_router\")\n def delete_router(self, router_id):\n \"\"\"Delete router\n\n :param router_id: Router ID\n \"\"\"\n self.client.delete_router(router_id)\n\n @staticmethod\n def _filter_routers(routers, subnet_ids):\n for router in routers:\n gtw_info = router[\"external_gateway_info\"]\n if gtw_info is None:\n continue\n if any(fixed_ip[\"subnet_id\"] in subnet_ids\n for fixed_ip in gtw_info[\"external_fixed_ips\"]):\n yield router\n\n @atomic.action_timer(\"neutron.list_routers\")\n def list_routers(self, subnet_ids=_NONE, **kwargs):\n \"\"\"List routers.\n\n :param subnet_ids: Filter routers by attached subnet(s). Can be a\n string or and an array with strings.\n :param kwargs: additional router list filters\n \"\"\"\n routers = self.client.list_routers(**kwargs)[\"routers\"]\n if subnet_ids != _NONE:\n routers = list(self._filter_routers(routers,\n subnet_ids=subnet_ids))\n return routers\n\n @atomic.action_timer(\"neutron.create_port\")\n def create_port(self, network_id, **kwargs):\n \"\"\"Create neutron port.\n\n :param network_id: neutron network dict\n :param kwargs: other optional neutron port creation params\n (name is restricted param)\n :returns: neutron port dict\n \"\"\"\n kwargs[\"name\"] = self.generate_random_name()\n body = _clean_dict(\n network_id=network_id,\n **kwargs\n )\n return self.client.create_port({\"port\": body})[\"port\"]\n\n @atomic.action_timer(\"neutron.show_port\")\n def get_port(self, port_id, fields=_NONE):\n \"\"\"Get port details\n\n :param port_id: Port ID\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(fields=fields)\n return self.client.show_port(port_id, **body)[\"port\"]\n\n @atomic.action_timer(\"neutron.update_port\")\n def update_port(self, port_id, **kwargs):\n \"\"\"Update neutron port.\n\n :param port_id: The ID of the port to update.\n :param kwargs: other optional neutron port creation params\n (name is restricted param)\n :returns: neutron port dict\n \"\"\"\n body = _clean_dict(**kwargs)\n if not body:\n raise TypeError(\"No updates for a port.\")\n return self.client.update_port(port_id, {\"port\": body})[\"port\"]\n\n ROUTER_INTERFACE_OWNERS = (\"network:router_interface\",\n \"network:router_interface_distributed\",\n \"network:ha_router_replicated_interface\")\n\n ROUTER_GATEWAY_OWNER = \"network:router_gateway\"\n\n @atomic.action_timer(\"neutron.delete_port\")\n def delete_port(self, port):\n \"\"\"Delete port.\n\n :param port: Port ID or object\n :returns bool: False if neutron returns NotFound error on port delete\n \"\"\"\n\n from neutronclient.common import exceptions as neutron_exceptions\n\n if not isinstance(port, dict):\n port = {\"id\": port, \"device_owner\": False}\n\n if (port[\"device_owner\"] in self.ROUTER_INTERFACE_OWNERS\n or port[\"device_owner\"] == self.ROUTER_GATEWAY_OWNER):\n\n if port[\"device_owner\"] == self.ROUTER_GATEWAY_OWNER:\n self.remove_gateway_from_router(port[\"device_id\"])\n\n self.remove_interface_from_router(\n router_id=port[\"device_id\"], port_id=port[\"id\"])\n else:\n try:\n self.client.delete_port(port[\"id\"])\n except neutron_exceptions.PortNotFoundClient:\n # port is auto-removed\n return False\n return True\n\n @atomic.action_timer(\"neutron.list_ports\")\n def list_ports(self, network_id=_NONE, device_id=_NONE, device_owner=_NONE,\n status=_NONE, **kwargs):\n \"\"\"List ports.\n\n :param network_id: Filter the list result by the ID of the attached\n network.\n :param device_id: Filter the port list result by the ID of the device\n that uses this port. For example, a server instance or a logical\n router.\n :param device_owner: Filter the port result list by the entity type\n that uses this port. For example, compute:nova (server instance),\n network:dhcp (DHCP agent) or network:router_interface\n (router interface).\n :param status: Filter the port list result by the port status.\n Values are ACTIVE, DOWN, BUILD and ERROR.\n :param kwargs: additional port list filters\n \"\"\"\n filters = _clean_dict(\n network_id=network_id,\n device_id=device_id,\n device_owner=device_owner,\n status=status,\n **kwargs\n )\n return self.client.list_ports(**filters)[\"ports\"]\n\n @atomic.action_timer(\"neutron.create_floating_ip\")\n def create_floatingip(self, floating_network=None, project_id=_NONE,\n fixed_ip_address=_NONE, floating_ip_address=_NONE,\n port_id=_NONE, subnet_id=_NONE, dns_domain=_NONE,\n dns_name=_NONE):\n \"\"\"Create floating IP with floating_network.\n\n :param floating_network: external network associated with floating IP.\n :param project_id: The ID of the project.\n :param fixed_ip_address: The fixed IP address that is associated with\n the floating IP. If an internal port has multiple associated IP\n addresses, the service chooses the first IP address unless you\n explicitly define a fixed IP address in the fixed_ip_address\n parameter.\n :param floating_ip_address: The floating IP address. Default policy\n settings enable only administrative users to set floating IP\n addresses and some non-administrative users might require a\n floating IP address. If you do not specify a floating IP address\n in the request, the operation automatically allocates one.\n :param port_id: The ID of a port associated with the floating IP.\n To associate the floating IP with a fixed IP at creation time,\n you must specify the identifier of the internal port.\n :param subnet_id: The subnet ID on which you want to create the\n floating IP.\n :param dns_domain: A valid DNS domain.\n :param dns_name: A valid DNS name.\n \"\"\"\n\n from neutronclient.common import exceptions as neutron_exceptions\n\n if isinstance(floating_network, dict):\n net_id = floating_network[\"id\"]\n elif floating_network:\n net_id = self.find_network(floating_network, external=True)[\"id\"]\n else:\n ext_networks = self.list_networks(router_external=True)\n if not ext_networks:\n raise exceptions.NotFoundException(\n \"Failed to allocate floating IP since no external \"\n \"networks found.\")\n net_id = ext_networks[0][\"id\"]\n\n description = _NONE\n api_info = self._clients.credential.api_info.get(\"neutron\", {})\n if (not api_info.get(\"pre_newton\", False)\n and not CONF.openstack.pre_newton_neutron):\n description = self.generate_random_name()\n\n body = _clean_dict(\n tenant_id=project_id,\n description=description,\n floating_network_id=net_id,\n fixed_ip_address=fixed_ip_address,\n floating_ip_address=floating_ip_address,\n port_id=port_id,\n subnet_id=subnet_id,\n dns_domain=dns_domain,\n dns_name=dns_name\n )\n\n try:\n resp = self.client.create_floatingip({\"floatingip\": body})\n return resp[\"floatingip\"]\n except neutron_exceptions.BadRequest as e:\n error = \"%s\" % e\n if \"Unrecognized attribute\" in error and \"'description'\" in error:\n LOG.info(\"It looks like you have Neutron API of pre-Newton \"\n \"OpenStack release. Setting \"\n \"openstack.pre_newton_neutron option via Rally \"\n \"configuration should fix an issue.\")\n raise\n\n @atomic.action_timer(\"neutron.show_floating_ip\")\n def get_floatingip(self, floatingip_id, fields=_NONE):\n \"\"\"Get floating IP details\n\n :param floatingip_id: Floating IP ID\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(fields=fields)\n resp = self.client.show_floatingip(floatingip_id, **body)\n return resp[\"floatingip\"]\n\n @atomic.action_timer(\"neutron.update_floating_ip\")\n def update_floatingip(self, floating_ip_id, fixed_ip_address=_NONE,\n port_id=_NONE, description=_NONE):\n \"\"\"Update floating IP.\n\n :param floating_ip_id: The ID of the floating IP to update.\n :param fixed_ip_address: The fixed IP address that is associated with\n the floating IP. If an internal port has multiple associated IP\n addresses, the service chooses the first IP address unless you\n explicitly define a fixed IP address in the fixed_ip_address\n parameter.\n :param port_id: The ID of a port associated with the floating IP.\n To associate the floating IP with a fixed IP at creation time,\n you must specify the identifier of the internal port.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n \"\"\"\n\n body = _clean_dict(\n description=description,\n fixed_ip_address=fixed_ip_address,\n port_id=port_id\n )\n\n if not body:\n raise TypeError(\"No updates for a floating ip.\")\n\n return self.client.update_floatingip(\n floating_ip_id, {\"floatingip\": body})[\"floatingip\"]\n\n @atomic.action_timer(\"neutron.delete_floating_ip\")\n def delete_floatingip(self, floatingip_id):\n \"\"\"Delete floating IP.\n\n :param floatingip_id: floating IP id\n \"\"\"\n self.client.delete_floatingip(floatingip_id)\n\n @atomic.action_timer(\"neutron.associate_floating_ip\")\n def associate_floatingip(self, port_id=None, device_id=None,\n floatingip_id=None, floating_ip_address=None,\n fixed_ip_address=None):\n \"\"\"Add floating IP to an instance\n\n :param port_id: ID of the port to associate floating IP with\n :param device_id: ID of the device to find port to use\n :param floatingip_id: ID of the floating IP\n :param floating_ip_address: IP address to find floating IP to use\n :param fixed_ip_address: The fixed IP address to associate with the\n floating ip\n \"\"\"\n if (device_id is None and port_id is None) or (device_id and port_id):\n raise TypeError(\"One of device_id or port_id must be specified.\")\n\n if ((floating_ip_address is None and floatingip_id is None)\n or (floating_ip_address and floatingip_id)):\n raise TypeError(\"One of floating_ip_address or floatingip_id \"\n \"must be specified.\")\n\n if port_id is None:\n ports = self.list_ports(device_id=device_id)\n if not ports:\n raise exceptions.GetResourceFailure(\n resource=\"port\",\n err=f\"device '{device_id}' have no ports associated.\")\n port_id = ports[0][\"id\"]\n\n if floatingip_id is None:\n filtered_fips = self.list_floatingips(\n floating_ip_address=floating_ip_address)\n if not filtered_fips:\n raise exceptions.GetResourceFailure(\n resource=\"floating ip\",\n err=f\"There is no floating ip with '{floating_ip_address}'\"\n f\" address.\")\n\n floatingip_id = filtered_fips[0][\"id\"]\n\n additional = {}\n if fixed_ip_address:\n additional[\"fixed_ip_address\"] = fixed_ip_address\n return self.update_floatingip(floatingip_id, port_id=port_id,\n **additional)\n\n @atomic.action_timer(\"neutron.dissociate_floating_ip\")\n def dissociate_floatingip(self, floatingip_id=None,\n floating_ip_address=None):\n \"\"\"Remove floating IP from an instance\n\n :param floatingip_id: ID of the floating IP\n :param floating_ip_address: IP address to find floating IP to use\n \"\"\"\n if ((floating_ip_address is None and floatingip_id is None)\n or (floating_ip_address and floatingip_id)):\n raise TypeError(\"One of floating_ip_address or floatingip_id \"\n \"must be specified.\")\n\n if floatingip_id is None:\n filtered_fips = self.list_floatingips(\n floating_ip_address=floating_ip_address)\n if not filtered_fips:\n raise exceptions.GetResourceFailure(\n resource=\"floating ip\",\n err=f\"There is no floating ip with '{floating_ip_address}'\"\n f\" address.\")\n\n floatingip_id = filtered_fips[0][\"id\"]\n\n return self.update_floatingip(floatingip_id, port_id=None)\n\n @atomic.action_timer(\"neutron.list_floating_ips\")\n def list_floatingips(self, router_id=_NONE, port_id=_NONE, status=_NONE,\n description=_NONE, floating_network_id=_NONE,\n floating_ip_address=_NONE, fixed_ip_address=_NONE,\n **kwargs):\n \"\"\"List floating IPs.\n\n :param router_id: Filter the floating IP list result by the ID of the\n router for the floating IP.\n :param port_id: Filter the floating IP list result by the ID of a port\n associated with the floating IP.\n :param status: Filter the floating IP list result by the status of the\n floating IP. Values are ACTIVE, DOWN and ERROR.\n :param description: Filter the list result by the human-readable\n description of the resource. (available only for OpenStack Newton+)\n :param floating_network_id: Filter the floating IP list result by the\n ID of the network associated with the floating IP.\n :param fixed_ip_address: Filter the floating IP list result by the\n fixed IP address that is associated with the floating IP address.\n :param floating_ip_address: Filter the floating IP list result by the\n floating IP address.\n :param kwargs: additional floating IP list filters\n \"\"\"\n filters = _clean_dict(\n router_id=router_id,\n port_id=port_id,\n status=status,\n description=description,\n floating_network_id=floating_network_id,\n floating_ip_address=floating_ip_address,\n fixed_ip_address=fixed_ip_address,\n **kwargs\n )\n resp = self.client.list_floatingips(**filters)\n return resp[\"floatingips\"]\n\n @atomic.action_timer(\"neutron.create_security_group\")\n def create_security_group(self, name=None, project_id=_NONE,\n description=_NONE, stateful=_NONE):\n \"\"\"Create a security group\n\n :param name: Human-readable name of the resource.\n :param project_id: The ID of the project.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n :param stateful: Indicates if the security group is stateful or\n stateless.\n \"\"\"\n body = _clean_dict(\n name=name or self.generate_random_name(),\n tenant_id=project_id,\n description=description,\n stateful=stateful\n )\n resp = self.client.create_security_group({\"security_group\": body})\n return resp[\"security_group\"]\n\n @atomic.action_timer(\"neutron.show_security_group\")\n def get_security_group(self, security_group_id, fields=_NONE):\n \"\"\"Get security group\n\n :param security_group_id: Security group ID\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(fields=fields)\n resp = self.client.show_security_group(security_group_id, **body)\n return resp[\"security_group\"]\n\n @atomic.action_timer(\"neutron.update_security_group\")\n def update_security_group(self, security_group_id, name=_NONE,\n description=_NONE, stateful=_NONE):\n \"\"\"Update a security group\n\n :param security_group_id: Security group ID\n :param name: Human-readable name of the resource.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n :param stateful: Indicates if the security group is stateful or\n stateless.\n \"\"\"\n body = _clean_dict(\n name=name,\n description=description,\n stateful=stateful\n )\n if not body:\n raise TypeError(\"No updates for a security group.\")\n\n resp = self.client.update_security_group(security_group_id,\n {\"security_group\": body})\n return resp[\"security_group\"]\n\n @atomic.action_timer(\"neutron.delete_security_group\")\n def delete_security_group(self, security_group_id):\n \"\"\"Delete security group.\n\n :param security_group_id: Security group ID\n \"\"\"\n return self.client.delete_security_group(security_group_id)\n\n @atomic.action_timer(\"neutron.list_security_groups\")\n def list_security_groups(self, name=_NONE, **kwargs):\n \"\"\"List security groups.\n\n :param name: Filter the list result by the human-readable name of the\n resource.\n :param kwargs: additional security group list filters\n \"\"\"\n if name:\n kwargs[\"name\"] = name\n resp = self.client.list_security_groups(**kwargs)\n return resp[\"security_groups\"]\n\n @atomic.action_timer(\"neutron.create_security_group_rule\")\n def create_security_group_rule(self,\n security_group_id,\n direction=\"ingress\",\n protocol=\"tcp\",\n ethertype=_NONE,\n port_range_min=_NONE,\n port_range_max=_NONE,\n remote_ip_prefix=_NONE,\n description=_NONE):\n \"\"\"Create security group rule.\n\n :param security_group_id: The security group ID to associate with this\n security group rule.\n :param direction: Ingress or egress, which is the direction in which\n the security group rule is applied.\n :param protocol: The IP protocol can be represented by a string, an\n integer, or null. Valid string or integer values are any or 0, ah\n or 51, dccp or 33, egp or 8, esp or 50, gre or 47, icmp or 1,\n icmpv6 or 58, igmp or 2, ipip or 4, ipv6-encap or 41,\n ipv6-frag or 44, ipv6-icmp or 58, ipv6-nonxt or 59,\n ipv6-opts or 60, ipv6-route or 43, ospf or 89, pgm or 113,\n rsvp or 46, sctp or 132, tcp or 6, udp or 17, udplite or 136,\n vrrp or 112. Additionally, any integer value between [0-255] is\n also valid. The string any (or integer 0) means all IP protocols.\n See the constants in neutron_lib.constants for the most\n up-to-date list of supported strings.\n :param ethertype: Must be IPv4 or IPv6, and addresses represented in\n CIDR must match the ingress or egress rules.\n :param port_range_min: The minimum port number in the range that is\n matched by the security group rule. If the protocol is TCP, UDP,\n DCCP, SCTP or UDP-Lite this value must be less than or equal to\n the port_range_max attribute value. If the protocol is ICMP, this\n value must be an ICMP type.\n :param port_range_max: The maximum port number in the range that is\n matched by the security group rule. If the protocol is TCP, UDP,\n DCCP, SCTP or UDP-Lite this value must be greater than or equal to\n the port_range_min attribute value. If the protocol is ICMP, this\n value must be an ICMP code.\n :param remote_ip_prefix: The remote IP prefix that is matched by this\n security group rule.\n :param description: A human-readable description for the resource.\n Default is an empty string.\n \"\"\"\n body = _clean_dict(\n security_group_id=security_group_id,\n direction=direction,\n protocol=protocol,\n ethertype=ethertype,\n port_range_min=port_range_min,\n port_range_max=port_range_max,\n remote_ip_prefix=remote_ip_prefix,\n description=description\n )\n return self.client.create_security_group_rule(\n {\"security_group_rule\": body})[\"security_group_rule\"]\n\n @atomic.action_timer(\"neutron.show_security_group_rule\")\n def get_security_group_rule(self, security_group_rule_id, verbose=_NONE,\n fields=_NONE):\n \"\"\"Get security group details\n\n :param security_group_rule_id: Security group rule ID\n :param verbose: Show detailed information.\n :param fields: The fields that you want the server to return. If no\n fields list is specified, the networking API returns all\n attributes allowed by the policy settings. By using fields\n parameter, the API returns only the requested set of attributes.\n \"\"\"\n body = _clean_dict(verbose=verbose, fields=fields)\n resp = self.client.show_security_group_rule(\n security_group_rule_id, **body)\n return resp[\"security_group_rule\"]\n\n @atomic.action_timer(\"neutron.delete_security_group_rule\")\n def delete_security_group_rule(self, security_group_rule_id):\n \"\"\"Delete a given security group rule.\n\n :param security_group_rule_id: Security group rule ID\n \"\"\"\n self.client.delete_security_group_rule(\n security_group_rule_id)\n\n @atomic.action_timer(\"neutron.list_security_group_rules\")\n def list_security_group_rules(\n self, security_group_id=_NONE, protocol=_NONE, direction=_NONE,\n port_range_min=_NONE, port_range_max=_NONE, description=_NONE,\n **kwargs):\n \"\"\"List all security group rules.\n\n :param security_group_id: Filter the security group rule list result\n by the ID of the security group that associates with this security\n group rule.\n :param protocol: Filter the security group rule list result by the IP\n protocol.\n :param direction: Filter the security group rule list result by the\n direction in which the security group rule is applied, which is\n ingress or egress.\n :param port_range_min: Filter the security group rule list result by\n the minimum port number in the range that is matched by the\n security group rule.\n :param port_range_max: Filter the security group rule list result by\n the maximum port number in the range that is matched by the\n security group rule.\n :param description: Filter the list result by the human-readable\n description of the resource.\n :param kwargs: additional security group rule list filters\n :return: list of security group rules\n \"\"\"\n filters = _clean_dict(\n security_group_id=security_group_id,\n protocol=protocol,\n direction=direction,\n port_range_min=port_range_min,\n port_range_max=port_range_max,\n description=description,\n **kwargs\n )\n resp = self.client.list_security_group_rules(**filters)\n return resp[\"security_group_rules\"]\n\n @atomic.action_timer(\"neutron.list_agents\")\n def list_agents(self, **kwargs):\n \"\"\"Fetches agents.\n\n :param kwargs: filters\n :returns: user agents list\n \"\"\"\n return self.client.list_agents(**kwargs)[\"agents\"]\n\n @atomic.action_timer(\"neutron.list_extension\")\n def list_extensions(self):\n \"\"\"List neutron extensions.\"\"\"\n return self.client.list_extensions()[\"extensions\"]\n\n @property\n def cached_supported_extensions(self):\n \"\"\"Return cached list of extension if exist or fetch it if is missed\"\"\"\n if self._cached_supported_extensions is None:\n self._cached_supported_extensions = self.list_extensions()\n return self._cached_supported_extensions\n\n def supports_extension(self, extension, silent=False):\n \"\"\"Check whether a neutron extension is supported.\n\n :param extension: Extension to check\n :param silent: Return boolean result of the search instead of raising\n an exception\n \"\"\"\n exist = any(ext.get(\"alias\") == extension\n for ext in self.cached_supported_extensions)\n if not silent and not exist:\n raise exceptions.NotFoundException(\n message=f\"Neutron driver does not support {extension}\")\n\n return exist\n" }, { "alpha_fraction": 0.5513821244239807, "alphanum_fraction": 0.5544715523719788, "avg_line_length": 38.93506622314453, "blob_id": "95b8aa54378f7ead3c2f696fb02be0d7004bb5e4", "content_id": "e94e7b3d4fbced15fe1a962a47c7eb7c769d14dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6150, "license_type": "permissive", "max_line_length": 78, "num_lines": 154, "path": "/rally_openstack/task/contexts/dataplane/heat.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pkgutil\n\nfrom rally.common import utils as rutils\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.heat import utils as heat_utils\n\n\ndef get_data(filename_or_resource):\n if isinstance(filename_or_resource, list):\n return pkgutil.get_data(*filename_or_resource)\n return open(filename_or_resource).read()\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"heat_dataplane\", platform=\"openstack\", order=435)\nclass HeatDataplane(context.OpenStackContext):\n \"\"\"Context class for create stack by given template.\n\n This context will create stacks by given template for each tenant and\n add details to context. Following details will be added:\n\n * id of stack;\n * template file contents;\n * files dictionary;\n * stack parameters;\n\n Heat template should define a \"gate\" node which will interact with Rally\n by ssh and workload nodes by any protocol. To make this possible heat\n template should accept the following parameters:\n\n * network_id: id of public network\n * router_id: id of external router to connect \"gate\" node\n * key_name: name of nova ssh keypair to use for \"gate\" node\n \"\"\"\n FILE_SCHEMA = {\n \"description\": \"\",\n \"type\": \"string\",\n }\n RESOURCE_SCHEMA = {\n \"description\": \"\",\n \"type\": \"array\",\n \"minItems\": 2,\n \"maxItems\": 2,\n \"items\": {\"type\": \"string\"}\n }\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"stacks_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"template\": {\n \"oneOf\": [FILE_SCHEMA, RESOURCE_SCHEMA],\n },\n \"files\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"parameters\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"context_parameters\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"stacks_per_tenant\": 1,\n }\n\n def _get_context_parameter(self, user, tenant_id, path):\n value = {\"user\": user, \"tenant\": self.context[\"tenants\"][tenant_id]}\n for key in path.split(\".\"):\n try:\n # try to cast string to int in order to support integer keys\n # e.g 'spam.1.eggs' will be translated to [\"spam\"][1][\"eggs\"]\n key = int(key)\n except ValueError:\n pass\n try:\n value = value[key]\n except KeyError:\n raise exceptions.RallyException(\n \"There is no key %s in context\" % path)\n return value\n\n def _get_public_network_id(self):\n nc = osclients.Clients(self.context[\"admin\"][\"credential\"]).neutron()\n networks = nc.list_networks(**{\"router:external\": True})[\"networks\"]\n return networks[0][\"id\"]\n\n def setup(self):\n template = get_data(self.config[\"template\"])\n files = {}\n for key, filename in self.config.get(\"files\", {}).items():\n files[key] = get_data(filename)\n parameters = self.config.get(\"parameters\", rutils.LockedDict())\n with parameters.unlocked():\n if \"network_id\" not in parameters:\n parameters[\"network_id\"] = self._get_public_network_id()\n for user, tenant_id in self._iterate_per_tenants():\n for name, path in self.config.get(\"context_parameters\",\n {}).items():\n parameters[name] = self._get_context_parameter(user,\n tenant_id,\n path)\n if \"router_id\" not in parameters:\n networks = self.context[\"tenants\"][tenant_id][\"networks\"]\n parameters[\"router_id\"] = networks[0][\"router_id\"]\n if \"key_name\" not in parameters:\n parameters[\"key_name\"] = user[\"keypair\"][\"name\"]\n heat_scenario = heat_utils.HeatScenario(\n {\"user\": user, \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]})\n self.context[\"tenants\"][tenant_id][\"stack_dataplane\"] = []\n for i in range(self.config[\"stacks_per_tenant\"]):\n stack = heat_scenario._create_stack(template, files=files,\n parameters=parameters)\n tenant_data = self.context[\"tenants\"][tenant_id]\n tenant_data[\"stack_dataplane\"].append([stack.id, template,\n files, parameters])\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"heat.stacks\"],\n users=self.context.get(\"users\", []),\n superclass=heat_utils.HeatScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5942954421043396, "alphanum_fraction": 0.6130267977714539, "avg_line_length": 35.13846206665039, "blob_id": "08120f6b103535910eaac323cd863dedb1dbbbf5", "content_id": "68eeab72fb39e9f8d25870e0f9cebab2614fb45a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2349, "license_type": "permissive", "max_line_length": 78, "num_lines": 65, "path": "/tests/unit/task/contexts/quotas/test_nova_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.quotas import nova_quotas\nfrom tests.unit import test\n\n\nclass NovaQuotasTestCase(test.TestCase):\n\n def setUp(self):\n super(NovaQuotasTestCase, self).setUp()\n self.quotas = {\n \"instances\": 10,\n \"cores\": 100,\n \"ram\": 100000,\n \"floating_ips\": 100,\n \"fixed_ips\": 10000,\n \"metadata_items\": 5,\n \"injected_files\": 5,\n \"injected_file_content_bytes\": 2048,\n \"injected_file_path_bytes\": 1024,\n \"key_pairs\": 50,\n \"security_groups\": 50,\n \"security_group_rules\": 50,\n \"server_group_members\": 777,\n \"server_groups\": 33\n }\n\n def test_update(self):\n clients = mock.MagicMock()\n nova_quo = nova_quotas.NovaQuotas(clients)\n tenant_id = mock.MagicMock()\n nova_quo.update(tenant_id, **self.quotas)\n clients.nova().quotas.update.assert_called_once_with(tenant_id,\n **self.quotas)\n\n def test_delete(self):\n clients = mock.MagicMock()\n nova_quo = nova_quotas.NovaQuotas(clients)\n tenant_id = mock.MagicMock()\n nova_quo.delete(tenant_id)\n clients.nova().quotas.delete.assert_called_once_with(tenant_id)\n\n def test_get(self):\n tenant_id = \"tenant_id\"\n quota_set = mock.MagicMock(**self.quotas)\n clients = mock.MagicMock()\n clients.nova.return_value.quotas.get.return_value = quota_set\n nova_quo = nova_quotas.NovaQuotas(clients)\n\n self.assertEqual(self.quotas, nova_quo.get(tenant_id))\n clients.nova().quotas.get.assert_called_once_with(tenant_id)\n" }, { "alpha_fraction": 0.5007262229919434, "alphanum_fraction": 0.5027233362197876, "avg_line_length": 38.62590026855469, "blob_id": "4a44224b1e2d0964ac82288093fcc68606cee9a2", "content_id": "2a40fc77f4ee2e1a1dcde149ed32d8d9136402f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5508, "license_type": "permissive", "max_line_length": 79, "num_lines": 139, "path": "/rally_openstack/task/contexts/nova/servers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\nfrom rally_openstack.task import types\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"servers\", platform=\"openstack\", order=430)\nclass ServerGenerator(context.OpenStackContext):\n \"\"\"Creates specified amount of Nova Servers per each tenant.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"image\": {\n \"description\": \"Name of image to boot server(s) from.\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"}\n },\n \"additionalProperties\": False\n },\n \"flavor\": {\n \"description\": \"Name of flavor to boot server(s) with.\",\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"}\n },\n \"additionalProperties\": False\n },\n \"servers_per_tenant\": {\n \"description\": \"Number of servers to boot in each Tenant.\",\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"auto_assign_nic\": {\n \"description\": \"True if NICs should be assigned.\",\n \"type\": \"boolean\",\n },\n \"nics\": {\n \"type\": \"array\",\n \"description\": \"List of networks to attach to server.\",\n \"items\": {\"oneOf\": [\n {\n \"type\": \"object\",\n \"properties\": {\"net-id\": {\"type\": \"string\"}},\n \"description\": \"Network ID in a format like OpenStack \"\n \"API expects to see.\",\n \"additionalProperties\": False\n },\n {\n \"type\": \"string\",\n \"description\": \"Network ID.\"\n }\n ]},\n \"minItems\": 1\n }\n },\n \"required\": [\"image\", \"flavor\"],\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"servers_per_tenant\": 5,\n \"auto_assign_nic\": False\n }\n\n def setup(self):\n image = self.config[\"image\"]\n flavor = self.config[\"flavor\"]\n auto_nic = self.config[\"auto_assign_nic\"]\n servers_per_tenant = self.config[\"servers_per_tenant\"]\n kwargs = {}\n if self.config.get(\"nics\"):\n if isinstance(self.config[\"nics\"][0], dict):\n # it is a format that Nova API expects\n kwargs[\"nics\"] = list(self.config[\"nics\"])\n else:\n kwargs[\"nics\"] = [{\"net-id\": nic}\n for nic in self.config[\"nics\"]]\n\n image_id = types.GlanceImage(self.context).pre_process(\n resource_spec=image, config={})\n flavor_id = types.Flavor(self.context).pre_process(\n resource_spec=flavor, config={})\n\n for iter_, (user, tenant_id) in enumerate(self._iterate_per_tenants()):\n LOG.debug(\"Booting servers for user tenant %s\" % user[\"tenant_id\"])\n tmp_context = {\"user\": user,\n \"tenant\": self.context[\"tenants\"][tenant_id],\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"],\n \"iteration\": iter_}\n nova_scenario = nova_utils.NovaScenario(tmp_context)\n\n LOG.debug(\"Calling _boot_servers with image_id=%(image_id)s \"\n \"flavor_id=%(flavor_id)s \"\n \"servers_per_tenant=%(servers_per_tenant)s\"\n % {\"image_id\": image_id,\n \"flavor_id\": flavor_id,\n \"servers_per_tenant\": servers_per_tenant})\n\n servers = nova_scenario._boot_servers(image_id, flavor_id,\n requests=servers_per_tenant,\n auto_assign_nic=auto_nic,\n **kwargs)\n\n current_servers = [server.id for server in servers]\n\n LOG.debug(\"Adding booted servers %s to context\" % current_servers)\n\n self.context[\"tenants\"][tenant_id][\n \"servers\"] = current_servers\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"nova.servers\"],\n users=self.context.get(\"users\", []),\n superclass=nova_utils.NovaScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6414273977279663, "alphanum_fraction": 0.6440557837486267, "avg_line_length": 41.27350616455078, "blob_id": "925e8709d675794d517771cc46a3989d89505ecf", "content_id": "5235bf3f3fd6c3378d7528c186db78f6ebc9a1dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9892, "license_type": "permissive", "max_line_length": 79, "num_lines": 234, "path": "/rally_openstack/common/services/key_manager/barbican.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\nfrom rally.task import service\n\n\nclass BarbicanService(service.Service):\n\n @atomic.action_timer(\"barbican.list_secrets\")\n def list_secrets(self):\n \"\"\"List Secret\"\"\"\n return self._clients.barbican().secrets.list()\n\n @atomic.action_timer(\"barbican.create_secret\")\n def create_secret(self, name=None, payload=None,\n payload_content_type=None, payload_content_encoding=None,\n algorithm=None, bit_length=None, secret_type=None,\n mode=None, expiration=None):\n \"\"\"Create Secret\n\n :param name: A friendly name for the secret\n :param payload: The unecrypted secret data\n :param payload_content_type: the format/type of the secret data\n :param payload_content_encoding: the encoding of the secret data\n :param algorithm: the algorithm associated with this secret key\n :param bit_length: The bit length of this secret key\n :param mode: the algorigthm mode used with this secret key\n :param secret_type: The secret type for this secret key\n :param exipration: the expiration time of the secret in ISO8601\n format\n :returns: a new secret object\n \"\"\"\n name = name or self.generate_random_name()\n val = self._clients.barbican().secrets.create(\n name=name, payload=payload,\n payload_content_type=payload_content_type,\n payload_content_encoding=payload_content_encoding,\n algorithm=algorithm, bit_length=bit_length, mode=mode,\n secret_type=secret_type, expiration=expiration)\n val.store()\n return val\n\n @atomic.action_timer(\"barbican.get_secret\")\n def get_secret(self, secret_ref):\n \"\"\"Get the secret.\n\n :param secret_name: The name of the secret.\n \"\"\"\n secret = self._clients.barbican().secrets.get(secret_ref)\n # secret is lazy, its properties would be filled with real\n # values while getting some property.\n try:\n secret.status\n except Exception as e:\n from rally import exceptions\n raise exceptions.GetResourceFailure(resource=secret, err=e)\n return secret\n\n @atomic.action_timer(\"barbican.delete_secret\")\n def delete_secret(self, secret_name):\n \"\"\"Delete the secret\n\n :param secret_name: The name of the secret to delete\n \"\"\"\n return self._clients.barbican().secrets.delete(secret_name)\n\n @atomic.action_timer(\"barbican.list_container\")\n def list_container(self):\n \"\"\"List containers\"\"\"\n return self._clients.barbican().containers.list()\n\n @atomic.action_timer(\"barbican.container_delete\")\n def container_delete(self, container_href):\n \"\"\"Delete the container\n\n :param container_href: the container reference\n \"\"\"\n return self._clients.barbican().containers.delete(container_href)\n\n @atomic.action_timer(\"barbican.container_create\")\n def container_create(self, name=None, secrets=None):\n \"\"\"Create a generic container\n\n :param name: the name of the container\n :param secrets: secrets to populate when creating a container\n \"\"\"\n name = name or self.generate_random_name()\n val = self._clients.barbican().containers.create(\n name=name, secrets=secrets)\n val.store()\n return val\n\n @atomic.action_timer(\"barbican.create_rsa_container\")\n def create_rsa_container(self, name=None, public_key=None,\n private_key=None, private_key_passphrase=None):\n \"\"\"Create a RSA container\n\n :param name: a friendly name for the container\n :param public_key: Secret object containing a Public Key\n :param private_key: Secret object containing a Private Key\n :param private_key_passphrase: Secret object containing\n a passphrase\n :returns: RSAContainer\n \"\"\"\n name = name or self.generate_random_name()\n val = self._clients.barbican().containers.create_rsa(\n name=name, public_key=public_key, private_key=private_key,\n private_key_passphrase=private_key_passphrase)\n val.store()\n return val\n\n @atomic.action_timer(\"barbican.create_certificate_container\")\n def create_certificate_container(self, name=None, certificate=None,\n intermediates=None, private_key=None,\n private_key_passphrase=None):\n \"\"\"Create a certificate container\n\n :param name: A friendly name for the CertificateContainer\n :param certificate: Secret object containing a Certificate\n :param intermediates: Secret object containing\n Intermediate Certs\n :param private_key: Secret object containing a Private Key\n :param private_key_passphrase: Secret object containing a passphrase\n :returns: CertificateContainer\n \"\"\"\n name = name or self.generate_random_name()\n val = self._clients.barbican().containers.create_certificate(\n name=name, certificate=certificate, intermediates=intermediates,\n private_key=private_key, private_key_passphrase=None)\n val.store()\n return val\n\n @atomic.action_timer(\"barbican.orders_list\")\n def orders_list(self):\n \"\"\"list orders\"\"\"\n return self._clients.barbican().orders.list()\n\n @atomic.action_timer(\"barbican.orders_delete\")\n def orders_delete(self, order_ref):\n \"\"\"Delete the order\n\n :param order_ref: The order reference\n \"\"\"\n return self._clients.barbican().orders.delete(order_ref)\n\n @atomic.action_timer(\"barbican.orders_get\")\n def orders_get(self, order_ref):\n \"\"\"Get the order\n\n :param order_ref: The order reference\n \"\"\"\n return self._clients.barbican().orders.get(order_ref)\n\n @atomic.action_timer(\"barbican.create_key\")\n def create_key(self, name=None, algorithm=\"aes\", bit_length=256, mode=None,\n payload_content_type=None, expiration=None):\n \"\"\"Create a key order object\n\n :param name: A friendly name for the secret to be created\n :param algorithm: The algorithm associated with this secret key\n :param bit_length: The bit length of this secret key\n :param mode: The algorithm mode used with this secret key\n :param payload_content_type: The format/type of the secret data\n :param expiration: The expiration time of the secret\n in ISO 8601 format\n :returns: KeyOrder\n \"\"\"\n name = name or self.generate_random_name()\n order = self._clients.barbican().orders.create_key(\n name=name, algorithm=algorithm, bit_length=bit_length,\n mode=mode, payload_content_type=payload_content_type,\n expiration=expiration)\n order.submit()\n return order\n\n @atomic.action_timer(\"barbican.create_asymmetric\")\n def create_asymmetric(self, name=None, algorithm=\"aes\", bit_length=256,\n pass_phrase=None, payload_content_type=None,\n expiration=None):\n \"\"\"Create an asymmetric order object\n\n :param name: A friendly name for the container to be created\n :param algorithm: The algorithm associated with this secret key\n :param bit_length: The bit length of this secret key\n :param pass_phrase: Optional passphrase\n :param payload_content_type: The format/type of the secret data\n :param expiration: The expiration time of the secret\n in ISO 8601 format\n :returns: AsymmetricOrder\n \"\"\"\n name = name or self.generate_random_name()\n order = self._clients.barbican().orders.create_asymmetric(\n name=name, algorithm=algorithm, bit_length=bit_length,\n pass_phrase=pass_phrase, payload_content_type=payload_content_type,\n expiration=expiration)\n order.submit()\n return order\n\n @atomic.action_timer(\"barbican.create_certificate\")\n def create_certificate(self, name=None, request_type=None, subject_dn=None,\n source_container_ref=None, ca_id=None, profile=None,\n request_data=None):\n \"\"\"Create a certificate order object\n\n :param name: A friendly name for the container to be created\n :param request_type: The type of the certificate request\n :param subject_dn: A subject for the certificate\n :param source_container_ref: A container with a\n public/private key pair to use as source for stored-key\n requests\n :param ca_id: The identifier of the CA to use\n :param profile: The profile of certificate to use\n :param request_data: The CSR content\n :returns: CertificateOrder\n \"\"\"\n name = name or self.generate_random_name()\n order = self._clients.barbican().orders.create_certificate(\n name=name, request_type=request_type, subject_dn=subject_dn,\n source_container_ref=source_container_ref, ca_id=ca_id,\n profile=profile, request_data=request_data)\n order.submit()\n return order\n" }, { "alpha_fraction": 0.44257405400276184, "alphanum_fraction": 0.4467102587223053, "avg_line_length": 36.80363464355469, "blob_id": "5472e237f88ad55b13252ffba6e251a17559df04", "content_id": "33ad9aa87e7a86f0aba74e161c105386f37b8350", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10396, "license_type": "permissive", "max_line_length": 79, "num_lines": 275, "path": "/rally_openstack/task/contexts/api_versions.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task import context\n\n\[email protected](\"check_api_versions\")\nclass CheckOpenStackAPIVersionsValidator(validation.Validator):\n \"\"\"Additional validation for api_versions context\"\"\"\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n for client in plugin_cfg:\n client_cls = osclients.OSClient.get(client)\n try:\n if (\"service_type\" in plugin_cfg[client]\n or \"service_name\" in plugin_cfg[client]):\n client_cls.is_service_type_configurable()\n\n if \"version\" in plugin_cfg[client]:\n client_cls.validate_version(plugin_cfg[client][\"version\"])\n\n except exceptions.RallyException as e:\n return self.fail(\n \"Invalid settings for '%(client)s': %(error)s\" % {\n \"client\": client,\n \"error\": e.format_message()})\n\n\[email protected](\"check_api_versions\")\[email protected](name=\"api_versions\", platform=\"openstack\", order=150)\nclass OpenStackAPIVersions(context.OpenStackContext):\n \"\"\"Context for specifying OpenStack clients versions and service types.\n\n Some OpenStack services support several API versions. To recognize\n the endpoints of each version, separate service types are provided in\n Keystone service catalog.\n\n Rally has the map of default service names - service types. But since\n service type is an entity, which can be configured manually by admin(\n via keystone api) without relation to service name, such map can be\n insufficient.\n\n Also, Keystone service catalog does not provide a map types to name\n (this statement is true for keystone < 3.3 ).\n\n This context was designed for not-default service types and not-default\n API versions usage.\n\n An example of specifying API version:\n\n .. code-block:: json\n\n # In this example we will launch NovaKeypair.create_and_list_keypairs\n # scenario on 2.2 api version.\n {\n \"NovaKeypair.create_and_list_keypairs\": [\n {\n \"args\": {\n \"key_type\": \"x509\"\n },\n \"runner\": {\n \"type\": \"constant\",\n \"times\": 10,\n \"concurrency\": 2\n },\n \"context\": {\n \"users\": {\n \"tenants\": 3,\n \"users_per_tenant\": 2\n },\n \"api_versions\": {\n \"nova\": {\n \"version\": 2.2\n }\n }\n }\n }\n ]\n }\n\n An example of specifying API version along with service type:\n\n .. code-block:: json\n\n # In this example we will launch CinderVolumes.create_and_attach_volume\n # scenario on Cinder V2\n {\n \"CinderVolumes.create_and_attach_volume\": [\n {\n \"args\": {\n \"size\": 10,\n \"image\": {\n \"name\": \"^cirros.*-disk$\"\n },\n \"flavor\": {\n \"name\": \"m1.tiny\"\n },\n \"create_volume_params\": {\n \"availability_zone\": \"nova\"\n }\n },\n \"runner\": {\n \"type\": \"constant\",\n \"times\": 5,\n \"concurrency\": 1\n },\n \"context\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 2\n },\n \"api_versions\": {\n \"cinder\": {\n \"version\": 2,\n \"service_type\": \"volumev2\"\n }\n }\n }\n }\n ]\n }\n\n Also, it possible to use service name as an identifier of service endpoint,\n but an admin user is required (Keystone can return map of service\n names - types, but such API is permitted only for admin). An example:\n\n .. code-block:: json\n\n # Similar to the previous example, but `service_name` argument is used\n # instead of `service_type`\n {\n \"CinderVolumes.create_and_attach_volume\": [\n {\n \"args\": {\n \"size\": 10,\n \"image\": {\n \"name\": \"^cirros.*-disk$\"\n },\n \"flavor\": {\n \"name\": \"m1.tiny\"\n },\n \"create_volume_params\": {\n \"availability_zone\": \"nova\"\n }\n },\n \"runner\": {\n \"type\": \"constant\",\n \"times\": 5,\n \"concurrency\": 1\n },\n \"context\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 2\n },\n \"api_versions\": {\n \"cinder\": {\n \"version\": 2,\n \"service_name\": \"cinderv2\"\n }\n }\n }\n }\n ]\n }\n\n \"\"\"\n VERSION_SCHEMA = {\n \"anyOf\": [\n {\"type\": \"string\", \"description\": \"a string-like version.\"},\n {\"type\": \"number\", \"description\": \"a number-like version.\"}\n ]\n }\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"patternProperties\": {\n \"^[a-z]+$\": {\n \"type\": \"object\",\n \"oneOf\": [\n {\n \"description\": \"version only\",\n \"properties\": {\n \"version\": VERSION_SCHEMA,\n },\n \"required\": [\"version\"],\n \"additionalProperties\": False\n },\n {\n \"description\": \"version and service_name\",\n \"properties\": {\n \"version\": VERSION_SCHEMA,\n \"service_name\": {\"type\": \"string\"}\n },\n \"required\": [\"service_name\"],\n \"additionalProperties\": False\n },\n {\n \"description\": \"version and service_type\",\n \"properties\": {\n \"version\": VERSION_SCHEMA,\n \"service_type\": {\"type\": \"string\"}\n },\n \"required\": [\"service_type\"],\n \"additionalProperties\": False\n }\n ],\n }\n },\n \"minProperties\": 1,\n \"additionalProperties\": False\n }\n\n def setup(self):\n # FIXME(andreykurilin): move all checks to validate method.\n\n # use admin only when `service_name` is presented\n admin_clients = osclients.Clients(\n self.context.get(\"admin\", {}).get(\"credential\"))\n clients = osclients.Clients(random.choice(\n self.context[\"users\"])[\"credential\"])\n services = clients.keystone.service_catalog.get_endpoints()\n services_from_admin = None\n for client_name, conf in self.config.items():\n if \"service_type\" in conf and conf[\"service_type\"] not in services:\n raise exceptions.ValidationError(\n \"There is no service with '%s' type in your environment.\"\n % conf[\"service_type\"])\n elif \"service_name\" in conf:\n if not self.context.get(\"admin\", {}).get(\"credential\"):\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(),\n msg=\"Setting 'service_name' is admin only operation.\")\n if not services_from_admin:\n services_from_admin = dict(\n [(s.name, s.type)\n for s in admin_clients.keystone().services.list()])\n if conf[\"service_name\"] not in services_from_admin:\n raise exceptions.ValidationError(\n \"There is no '%s' service in your environment\"\n % conf[\"service_name\"])\n\n # TODO(boris-42): Use separate key [\"openstack\"][\"versions\"]\n self.context[\"config\"][\"api_versions@openstack\"][client_name][\n \"service_type\"] = services_from_admin[conf[\"service_name\"]]\n\n admin_cred = self.context.get(\"admin\", {}).get(\"credential\")\n if admin_cred:\n admin_cred[\"api_info\"].update(\n self.context[\"config\"][\"api_versions@openstack\"]\n )\n for user in self.context[\"users\"]:\n user[\"credential\"][\"api_info\"].update(\n self.context[\"config\"][\"api_versions@openstack\"]\n )\n\n def cleanup(self):\n # nothing to do here\n pass\n" }, { "alpha_fraction": 0.6758474707603455, "alphanum_fraction": 0.6832627058029175, "avg_line_length": 39.17021179199219, "blob_id": "c5c7bcd1118f546e189e68f0736f6538b1a453b5", "content_id": "8dd240273d7de44f604255d5d3ed739354850081", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1888, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/tests/functional/extra/fake_dir/fake_plugin.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task import scenario\n\n\[email protected](\"fakedummy\", default_version=\"1\",\n default_service_type=\"dummy\",\n supported_versions=[\"1\", \"2\"])\nclass FakeDummy(osclients.OSClient):\n def create_client(self, version=None, service_type=None):\n version = self.choose_version(version)\n service_type = self.choose_service_type(service_type)\n return {\"version\": version, \"service_type\": service_type}\n\n\[email protected](\"faileddummy\", default_version=\"1\",\n default_service_type=\"faileddummy\",\n supported_versions=[\"1\", \"2\"])\nclass FailedDummy(osclients.OSClient):\n def create_client(self, version=None, service_type=None):\n raise Exception(\"Failed Dummy\")\n\n\[email protected](name=\"FakeDummy.openstack_api\")\nclass FakeDummyOpenstackAPI(scenario.OpenStackScenario):\n\n def run(self):\n admin_client = self.admin_clients(\"fakedummy\")\n self.assertEqual(\"dummyv2\", admin_client[\"service_type\"])\n self.assertEqual(\"2\", admin_client[\"version\"])\n\n client = self.clients(\"fakedummy\")\n self.assertEqual(\"dummyv2\", client[\"service_type\"])\n self.assertEqual(\"2\", client[\"version\"])\n" }, { "alpha_fraction": 0.7259933948516846, "alphanum_fraction": 0.7293046116828918, "avg_line_length": 37.967742919921875, "blob_id": "04e9f356fe6fd89d3dac457c6ce66c2ea02b5fd7", "content_id": "a6a173f66ae3fdeb788b8a9a197be55b43e45bdd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "permissive", "max_line_length": 78, "num_lines": 31, "path": "/rally_openstack/__init__.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pbr.version\nfrom rally.common import version as __rally_version__\n\nfrom rally_openstack import _compat\n\n__rally_version__ = __rally_version__.version_info.semantic_version()\n__rally_version__ = __rally_version__.version_tuple()\n\n__version_info__ = pbr.version.VersionInfo(\"rally-openstack\")\n__version__ = __version_info__.version_string()\n__version_tuple__ = __version_info__.semantic_version().version_tuple()\n\n\n# WARNING: IF YOU ARE LOOKING FOR SOME PHYSICALLY UNEXISTING MODULES THAT CAN\n# BE IMPORTED (FOR BACKWARD COMPATIBILITY), PLEASE CHECK THE NEXT FUNCTION\n# HAPPY DEBUGGING!!\n_compat.init()\n" }, { "alpha_fraction": 0.6250767111778259, "alphanum_fraction": 0.6267130374908447, "avg_line_length": 40.78632354736328, "blob_id": "95a6d8e3f4ea2cf78d4b49d30880ba468a71029e", "content_id": "ecf51f2ee74038beba66e92482937459f12256ee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4889, "license_type": "permissive", "max_line_length": 79, "num_lines": 117, "path": "/rally_openstack/task/scenarios/swift/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\n\nfrom rally_openstack.task import scenario\n\n\nclass SwiftScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Swift scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"swift.list_containers\")\n def _list_containers(self, full_listing=True, **kwargs):\n \"\"\"Return list of containers.\n\n :param full_listing: bool, enable unlimit number of listing returned\n :param kwargs: dict, other optional parameters to get_account\n\n :returns: tuple, (dict of response headers, a list of containers)\n \"\"\"\n return self.clients(\"swift\").get_account(full_listing=full_listing,\n **kwargs)\n\n @atomic.action_timer(\"swift.create_container\")\n def _create_container(self, public=False, **kwargs):\n \"\"\"Create a new container.\n\n :param public: bool, set container as public\n :param kwargs: dict, other optional parameters to put_container\n\n :returns: container name\n \"\"\"\n if public:\n kwargs.setdefault(\"headers\", {})\n kwargs[\"headers\"].setdefault(\"X-Container-Read\", \".r:*,.rlistings\")\n\n container_name = self.generate_random_name()\n\n self.clients(\"swift\").put_container(container_name, **kwargs)\n return container_name\n\n @atomic.action_timer(\"swift.delete_container\")\n def _delete_container(self, container_name, **kwargs):\n \"\"\"Delete a container with given name.\n\n :param container_name: str, name of the container to delete\n :param kwargs: dict, other optional parameters to delete_container\n \"\"\"\n self.clients(\"swift\").delete_container(container_name, **kwargs)\n\n @atomic.action_timer(\"swift.list_objects\")\n def _list_objects(self, container_name, full_listing=True, **kwargs):\n \"\"\"Return objects inside container.\n\n :param container_name: str, name of the container to make the list\n objects operation against\n :param full_listing: bool, enable unlimit number of listing returned\n :param kwargs: dict, other optional parameters to get_container\n\n :returns: tuple, (dict of response headers, a list of objects)\n \"\"\"\n return self.clients(\"swift\").get_container(container_name,\n full_listing=full_listing,\n **kwargs)\n\n @atomic.action_timer(\"swift.upload_object\")\n def _upload_object(self, container_name, content, **kwargs):\n \"\"\"Upload content to a given container.\n\n :param container_name: str, name of the container to upload object to\n :param content: file stream, content to upload\n :param kwargs: dict, other optional parameters to put_object\n\n :returns: tuple, (etag and object name)\n \"\"\"\n object_name = self.generate_random_name()\n\n return (self.clients(\"swift\").put_object(container_name, object_name,\n content, **kwargs),\n object_name)\n\n @atomic.action_timer(\"swift.download_object\")\n def _download_object(self, container_name, object_name, **kwargs):\n \"\"\"Download object from container.\n\n :param container_name: str, name of the container to download object\n from\n :param object_name: str, name of the object to download\n :param kwargs: dict, other optional parameters to get_object\n\n :returns: tuple, (dict of response headers, the object's contents)\n \"\"\"\n return self.clients(\"swift\").get_object(container_name, object_name,\n **kwargs)\n\n @atomic.action_timer(\"swift.delete_object\")\n def _delete_object(self, container_name, object_name, **kwargs):\n \"\"\"Delete object from container.\n\n :param container_name: str, name of the container to delete object from\n :param object_name: str, name of the object to delete\n :param kwargs: dict, other optional parameters to delete_object\n \"\"\"\n self.clients(\"swift\").delete_object(container_name, object_name,\n **kwargs)\n" }, { "alpha_fraction": 0.5494348406791687, "alphanum_fraction": 0.5516183972358704, "avg_line_length": 41.283775329589844, "blob_id": "706aa51998a610c501ae0e44afb4fe561177d9c2", "content_id": "7d8b84d96c1c5c850bb8f83df7187fb900e95427", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62284, "license_type": "permissive", "max_line_length": 79, "num_lines": 1473, "path": "/tests/unit/task/scenarios/neutron/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport netaddr\n\nfrom rally import exceptions\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\nfrom rally_openstack.task.scenarios.neutron import utils\nfrom tests.unit import test\n\nNETWORK_SERVICE = \"rally_openstack.common.services.network\"\nNET_UTILS = \"%s.net_utils\" % NETWORK_SERVICE\nNEUTRON_UTILS = \"rally_openstack.task.scenarios.neutron.utils\"\n\n\[email protected]\nclass NeutronScenarioTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(NeutronScenarioTestCase, self).setUp()\n self.network = mock.Mock()\n self._clients = mock.MagicMock(\n credential=credential.OpenStackCredential(\n auth_url=\"example.com\",\n username=\"root\",\n password=\"changeme\",\n permission=consts.EndpointPermission.ADMIN\n )\n )\n self._nc = self._clients.neutron.return_value\n self.scenario = utils.NeutronScenario(self.context,\n clients=self._clients)\n\n self.random_name = \"random_name\"\n name_generator = mock.Mock(return_value=self.random_name)\n self.scenario.generate_random_name = name_generator\n self.scenario.neutron._name_generator = name_generator\n\n def test__get_network_id(self):\n networks = [{\"id\": \"foo-id\", \"name\": \"foo-network\"},\n {\"id\": \"bar-id\", \"name\": \"bar-network\"}]\n network_id = \"foo-id\"\n\n # Valid network-name\n network = \"foo-network\"\n self._nc.list_networks = mock.Mock(return_value={\"networks\": networks})\n resultant_network_id = self.scenario._get_network_id(network)\n self.assertEqual(network_id, resultant_network_id)\n self._nc.list_networks.assert_called_once_with()\n\n self._nc.list_networks.reset_mock()\n\n # Valid network-id\n network = \"foo-id\"\n resultant_network_id = self.scenario._get_network_id(network)\n self.assertEqual(network_id, resultant_network_id)\n self._nc.list_networks.assert_called_once_with()\n self._nc.list_networks.reset_mock()\n\n # Invalid network-name\n network = \"absent-network\"\n self.assertRaises(exceptions.NotFoundException,\n self.scenario._get_network_id, network)\n self._nc.list_networks.assert_called_once_with()\n\n def test_create_network(self):\n network = {\"network\": mock.Mock()}\n self._nc.create_network.return_value = network\n\n network_data = {\"admin_state_up\": False}\n\n self.assertEqual(network, self.scenario._create_network(network_data))\n\n expected_network_data = {\"network\": network_data}\n network_data[\"name\"] = self.scenario.generate_random_name.return_value\n self._nc.create_network.assert_called_once_with(expected_network_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_network\")\n\n def test_list_networks(self):\n networks_list = []\n networks_dict = {\"networks\": networks_list}\n self._nc.list_networks.return_value = networks_dict\n\n return_networks_list = self.scenario._list_networks()\n self.assertEqual(networks_list, return_networks_list)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_networks\", count=1)\n\n def test_show_network(self):\n network = {\n \"network\": {\n \"id\": \"fake-id\",\n \"name\": \"fake-name\",\n \"admin_state_up\": False\n }\n }\n\n return_network = self.scenario._show_network(network)\n self.assertEqual(\n {\"network\": self._nc.show_network.return_value[\"network\"]},\n return_network)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_network\")\n\n def test_show_router(self):\n router = {\n \"router\": {\n \"id\": \"fake-id\",\n \"name\": \"fake-name\",\n \"admin_state_up\": False\n }\n }\n\n return_router = self.scenario._show_router(router)\n self.assertEqual(\n {\"router\": self._nc.show_router.return_value[\"router\"]},\n return_router)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_router\")\n\n def test_update_network(self):\n expected_network = {\n \"network\": {\n \"name\": self.scenario.generate_random_name.return_value,\n \"admin_state_up\": False\n }\n }\n self._nc.update_network.return_value = expected_network\n\n network = {\"network\": {\"name\": \"network-name\", \"id\": \"network-id\"}}\n network_update_args = {\"name\": \"foo\", \"admin_state_up\": False}\n\n result_network = self.scenario._update_network(network,\n network_update_args)\n self._nc.update_network.assert_called_once_with(\n network[\"network\"][\"id\"], expected_network)\n self.assertEqual(expected_network, result_network)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_network\")\n\n def test_delete_network(self):\n net_id = \"foo\"\n network = {\"id\": net_id}\n self.scenario._delete_network(network)\n self._nc.delete_network.assert_called_once_with(net_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_network\")\n\n @mock.patch(\"%s.generate_cidr\" % NET_UTILS)\n def test_create_subnet(self, mock_generate_cidr):\n network_id = \"fake-id\"\n start_cidr = \"192.168.0.0/24\"\n mock_generate_cidr.return_value = (4, \"192.168.0.0/24\")\n\n network = {\"network\": {\"id\": network_id}}\n expected_subnet_data = {\n \"subnet\": {\n \"network_id\": network_id,\n \"cidr\": start_cidr,\n \"ip_version\": netaddr.IPNetwork(start_cidr).version,\n \"name\": self.scenario.generate_random_name.return_value,\n \"dns_nameservers\": mock.ANY\n }\n }\n\n # Default options\n subnet_data = {\"network_id\": network_id}\n self.scenario._create_subnet(network, subnet_data, start_cidr)\n self._nc.create_subnet.assert_called_once_with(\n expected_subnet_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_subnet\")\n\n self._nc.create_subnet.reset_mock()\n\n # Custom options\n extras = {\"cidr\": \"2001::/64\", \"allocation_pools\": []}\n extras[\"ip_version\"] = netaddr.IPNetwork(extras[\"cidr\"]).version\n mock_generate_cidr.return_value = (6, \"2001::/64\")\n subnet_data.update(extras)\n expected_subnet_data[\"subnet\"].update(extras)\n self.scenario._create_subnet(network, subnet_data)\n self._nc.create_subnet.assert_called_once_with(expected_subnet_data)\n\n def test_list_subnets(self):\n subnets = [{\"name\": \"fake1\"}, {\"name\": \"fake2\"}]\n self._nc.list_subnets.return_value = {\"subnets\": subnets}\n result = self.scenario._list_subnets()\n self.assertEqual(subnets, result)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_subnets\")\n\n def test_show_subnet(self):\n subnet = {\"subnet\": {\"name\": \"fake-name\", \"id\": \"fake-id\"}}\n\n result_subnet = self.scenario._show_subnet(subnet)\n self.assertEqual(\n {\"subnet\": self._nc.show_subnet.return_value[\"subnet\"]},\n result_subnet)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_subnet\")\n\n def test_update_subnet(self):\n expected_subnet = {\n \"subnet\": {\n \"name\": self.scenario.generate_random_name.return_value,\n \"enable_dhcp\": False\n }\n }\n self._nc.update_subnet.return_value = expected_subnet\n\n subnet = {\"subnet\": {\"name\": \"subnet-name\", \"id\": \"subnet-id\"}}\n subnet_update_args = {\"name\": \"foo\", \"enable_dhcp\": False}\n\n result_subnet = self.scenario._update_subnet(subnet,\n subnet_update_args)\n self._nc.update_subnet.assert_called_once_with(\n subnet[\"subnet\"][\"id\"], expected_subnet)\n self.assertEqual(expected_subnet, result_subnet)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_subnet\")\n\n def test_delete_subnet(self):\n network = self.scenario._create_network({})\n subnet = self.scenario._create_subnet(network, {})\n self.scenario._delete_subnet(subnet)\n\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_subnet\")\n\n def test_create_router(self):\n router = self._nc.create_router.return_value\n\n # Default options\n result_router = self.scenario._create_router({})\n self._nc.create_router.assert_called_once_with({\n \"router\": {\n \"name\": self.scenario.generate_random_name.return_value\n }\n })\n self.assertEqual({\"router\": router[\"router\"]}, result_router)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_router\")\n\n def test_create_router_with_ext_gw(self):\n self._clients.credential.permission = consts.EndpointPermission.ADMIN\n net_id = \"ext-net\"\n self._nc.list_networks.return_value = {\n \"networks\": [{\"id\": net_id, \"router:external\": True}]\n }\n self._nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]}\n\n # External_gw options\n gw_info = {\"network_id\": net_id, \"enable_snat\": True}\n router_data = {\n \"name\": self.scenario.generate_random_name.return_value,\n \"external_gateway_info\": gw_info\n }\n result_router = self.scenario._create_router({}, external_gw=True)\n self._nc.create_router.assert_called_once_with(\n {\"router\": router_data})\n self.assertEqual(\n {\"router\": self._nc.create_router.return_value[\"router\"]},\n result_router\n )\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"neutron.create_router\")\n\n def test_create_router_with_ext_gw_but_no_ext_net(self):\n self._nc.list_networks.return_value = {\"networks\": []}\n self._nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]\n }\n\n # External_gw options with no external networks in list_networks()\n result_router = self.scenario._create_router({}, external_gw=True)\n self._nc.create_router.assert_called_once_with({\n \"router\": {\"name\": self.scenario.generate_random_name.return_value}\n })\n self.assertEqual(\n {\"router\": self._nc.create_router.return_value[\"router\"]},\n result_router\n )\n self._nc.list_networks.assert_called_once_with(\n **{\"router:external\": True}\n )\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_router\")\n\n def test_create_router_with_ext_gw_but_no_ext_gw_mode_extension(self):\n net_id = \"ext-net\"\n self._nc.list_networks.return_value = {\n \"networks\": [{\"id\": net_id, \"router:external\": True}]\n }\n self._nc.list_extensions.return_value = {\"extensions\": []}\n\n result_router = self.scenario._create_router({}, external_gw=True)\n\n router_data = {\n \"name\": self.scenario.generate_random_name.return_value,\n \"external_gateway_info\": {\"network_id\": net_id}\n }\n\n self._nc.create_router.assert_called_once_with({\"router\": router_data})\n self.assertEqual(\n {\"router\": self._nc.create_router.return_value[\"router\"]},\n result_router\n )\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"neutron.create_router\")\n\n def test_list_routers(self):\n routers = [mock.Mock()]\n self._nc.list_routers.return_value = {\"routers\": routers}\n self.assertEqual(routers, self.scenario._list_routers())\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_routers\")\n\n def test_list_agents(self):\n agents = [mock.Mock()]\n self._nc.list_agents.return_value = {\"agents\": agents}\n self.assertEqual(agents, self.scenario._list_agents())\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_agents\")\n\n def test_update_router(self):\n expected_router = {\n \"router\": {\n \"name\": self.scenario.generate_random_name.return_value,\n \"admin_state_up\": False\n }\n }\n self._nc.update_router.return_value = expected_router\n\n router = {\n \"router\": {\n \"id\": \"router-id\",\n \"name\": \"router-name\",\n \"admin_state_up\": True\n }\n }\n router_update_args = {\"name\": \"foo\", \"admin_state_up\": False}\n\n result_router = self.scenario._update_router(router,\n router_update_args)\n self._nc.update_router.assert_called_once_with(\n router[\"router\"][\"id\"], expected_router)\n self.assertEqual(expected_router, result_router)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_router\")\n\n def test_delete_router(self):\n router_id = \"foo\"\n router = {\"router\": {\"id\": router_id}}\n self.scenario._delete_router(router)\n self._nc.delete_router.assert_called_once_with(router_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_router\")\n\n def test_remove_interface_router(self):\n subnet = {\"name\": \"subnet-name\", \"id\": \"subnet-id\"}\n router = {\"id\": 1}\n self.scenario._add_interface_router(subnet, router)\n self.scenario._remove_interface_router(subnet, router)\n self._nc.remove_interface_router.assert_called_once_with(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]})\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.remove_interface_router\")\n\n def test_add_gateway_router(self):\n ext_net = {\n \"network\": {\n \"name\": \"extnet-name\",\n \"id\": \"extnet-id\"\n }\n }\n router = {\n \"router\": {\n \"name\": \"router-name\",\n \"id\": \"router-id\"\n }\n }\n enable_snat = \"fake_snat\"\n self._nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]}\n\n self.scenario._add_gateway_router(router, ext_net, enable_snat)\n self._nc.add_gateway_router.assert_called_once_with(\n router[\"router\"][\"id\"],\n {\"network_id\": ext_net[\"network\"][\"id\"],\n \"enable_snat\": enable_snat})\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.add_gateway_router\")\n\n def test_add_gateway_router_no_snat_update(self):\n ext_net = {\n \"network\": {\n \"name\": \"extnet-name\",\n \"id\": \"extnet-id\"\n }\n }\n router = {\n \"router\": {\n \"name\": \"router-name\",\n \"id\": \"router-id\"\n }\n }\n self._nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]}\n\n self.scenario._add_gateway_router(router, ext_net)\n self._nc.add_gateway_router.assert_called_once_with(\n router[\"router\"][\"id\"],\n {\"network_id\": ext_net[\"network\"][\"id\"]}\n )\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.add_gateway_router\")\n\n def test_add_gateway_router_without_ext_gw_mode_extension(self):\n ext_net = {\n \"network\": {\n \"name\": \"extnet-name\",\n \"id\": \"extnet-id\"\n }\n }\n router = {\n \"router\": {\n \"name\": \"router-name\",\n \"id\": \"router-id\"\n }\n }\n self._nc.list_extensions.return_value = {\n \"extensions\": {}}\n\n self.scenario._add_gateway_router(router, ext_net, enable_snat=True)\n self._nc.add_gateway_router.assert_called_once_with(\n router[\"router\"][\"id\"], {\"network_id\": ext_net[\"network\"][\"id\"]})\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.add_gateway_router\")\n\n def test_remove_gateway_router(self):\n router = {\n \"router\": {\n \"name\": \"router-name\",\n \"id\": \"router-id\"\n }\n }\n self.scenario._remove_gateway_router(router)\n self._nc.remove_gateway_router.assert_called_once_with(\n router[\"router\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.remove_gateway_router\")\n\n def test_create_port(self):\n net_id = \"network-id\"\n net = {\"network\": {\"id\": net_id}}\n expected_port_args = {\n \"port\": {\n \"network_id\": net_id,\n \"name\": self.scenario.generate_random_name.return_value\n }\n }\n\n # Defaults\n port_create_args = {}\n self.scenario._create_port(net, port_create_args)\n self._nc.create_port.assert_called_once_with(expected_port_args)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_port\")\n\n self._nc.create_port.reset_mock()\n\n # Custom options\n port_args = {\"admin_state_up\": True}\n expected_port_args[\"port\"].update(port_args)\n self.scenario._create_port(net, port_args)\n self._nc.create_port.assert_called_once_with(expected_port_args)\n\n def test_list_ports(self):\n ports = [{\"name\": \"port1\"}, {\"name\": \"port2\"}]\n self._nc.list_ports.return_value = {\"ports\": ports}\n self.assertEqual(ports, self.scenario._list_ports())\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_ports\")\n\n def test_show_port(self):\n expect_port = {\n \"port\": {\n \"id\": \"port-id\",\n \"name\": \"port-name\",\n \"admin_state_up\": True\n }\n }\n self._nc.show_port.return_value = expect_port\n self.assertEqual(expect_port, self.scenario._show_port(expect_port))\n self._nc.show_port.assert_called_once_with(\n expect_port[\"port\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_port\")\n\n def test_update_port(self):\n expected_port = {\n \"port\": {\n \"admin_state_up\": False,\n \"name\": self.scenario.generate_random_name.return_value\n }\n }\n self._nc.update_port.return_value = expected_port\n\n port = {\n \"port\": {\n \"id\": \"port-id\",\n \"name\": \"port-name\",\n \"admin_state_up\": True\n }\n }\n port_update_args = {\"admin_state_up\": False}\n\n result_port = self.scenario._update_port(port, port_update_args)\n self._nc.update_port.assert_called_once_with(\n port[\"port\"][\"id\"], expected_port)\n self.assertEqual(expected_port, result_port)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_port\")\n\n def test_delete_port(self):\n network = self.scenario._create_network({})\n port = self.scenario._create_port(network, {})\n self.scenario._delete_port(port)\n\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_port\")\n\n @ddt.data(\n {\"context\": {\"tenant\": {\"networks\":\n [mock.MagicMock(), mock.MagicMock()]}}},\n {\"network_create_args\": {\"fakearg\": \"fake\"},\n \"context\": {\"tenant\": {\"networks\":\n [mock.MagicMock(), mock.MagicMock()]}}})\n @ddt.unpack\n @mock.patch(\"random.choice\", side_effect=lambda l: l[0])\n def test_get_or_create_network(self, mock_random_choice,\n network_create_args=None, context=None):\n self.scenario.context = context\n self.scenario._create_network = mock.Mock(\n return_value={\"network\": mock.Mock()})\n\n network = self.scenario._get_or_create_network(network_create_args)\n\n # ensure that the return value is the proper type either way\n self.assertIn(\"network\", network)\n\n if \"networks\" in context[\"tenant\"]:\n self.assertEqual(network,\n {\"network\": context[\"tenant\"][\"networks\"][0]})\n self.assertFalse(self.scenario._create_network.called)\n else:\n self.assertEqual(network,\n self.scenario._create_network.return_value)\n self.scenario._create_network.assert_called_once_with(\n network_create_args or {})\n\n def test_create_network_and_subnets(self):\n self._nc.create_network.return_value = {\"network\": {\"id\": \"fake-id\"}}\n self._nc.create_subnet.return_value = {\n \"subnet\": {\n \"name\": \"subnet-name\",\n \"id\": \"subnet-id\",\n \"enable_dhcp\": False\n }\n }\n\n network_create_args = {}\n subnet_create_args = {}\n subnets_per_network = 4\n\n # Default options\n self.scenario._create_network_and_subnets(\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnets_per_network=subnets_per_network)\n\n self._nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": self.random_name}}\n )\n self.assertEqual(\n [\n mock.call(\n {\"subnet\": {\"name\": self.random_name,\n \"network_id\": \"fake-id\",\n \"dns_nameservers\": mock.ANY,\n \"ip_version\": 4, \"cidr\": mock.ANY}\n }\n )\n ] * subnets_per_network,\n self._nc.create_subnet.call_args_list\n )\n\n self._nc.create_network.reset_mock()\n self._nc.create_subnet.reset_mock()\n\n # Custom options\n self.scenario._create_network_and_subnets(\n network_create_args=network_create_args,\n subnet_create_args={\"allocation_pools\": [\"x\"]},\n subnet_cidr_start=\"10.10.10.0/24\",\n subnets_per_network=subnets_per_network)\n\n self._nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": self.random_name}}\n )\n self.assertEqual(\n [\n mock.call(\n {\"subnet\": {\"name\": self.random_name,\n \"network_id\": \"fake-id\",\n \"allocation_pools\": [\"x\"],\n \"dns_nameservers\": mock.ANY,\n \"ip_version\": 4, \"cidr\": mock.ANY}\n }\n )\n ] * subnets_per_network,\n self._nc.create_subnet.call_args_list\n )\n\n def test_list_floating_ips(self):\n fips_list = [{\"id\": \"floating-ip-id\"}]\n fips_dict = {\"floatingips\": fips_list}\n self._nc.list_floatingips.return_value = fips_dict\n self.assertEqual(self.scenario._list_floating_ips(),\n self._nc.list_floatingips.return_value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_floating_ips\")\n\n def test_delete_floating_ip(self):\n fip = {\"floatingip\": {\"id\": \"fake-id\"}}\n self.scenario._delete_floating_ip(fip[\"floatingip\"])\n self._nc.delete_floatingip.assert_called_once_with(\n fip[\"floatingip\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_floating_ip\")\n\n def test_associate_floating_ip(self):\n fip = {\"id\": \"fip-id\"}\n port = {\"id\": \"port-id\"}\n self.scenario._associate_floating_ip(fip, port)\n self._nc.update_floatingip.assert_called_once_with(\n \"fip-id\", {\"floatingip\": {\"port_id\": \"port-id\"}})\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.associate_floating_ip\")\n\n def test_dissociate_floating_ip(self):\n fip = {\"id\": \"fip-id\"}\n self.scenario._dissociate_floating_ip(fip)\n self._nc.update_floatingip.assert_called_once_with(\n \"fip-id\", {\"floatingip\": {\"port_id\": None}})\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.dissociate_floating_ip\")\n\n @ddt.data(\n {},\n {\"router_create_args\": {\"admin_state_up\": False}},\n {\"network_create_args\": {\"router:external\": True},\n \"subnet_create_args\": {\"allocation_pools\": [\"x\"]},\n \"subnets_per_network\": 3,\n \"router_create_args\": {\"admin_state_up\": False}})\n @ddt.unpack\n def test_create_network_structure(self, network_create_args=None,\n subnet_create_args=None,\n subnet_cidr_start=None,\n subnets_per_network=1,\n router_create_args=None):\n network_id = \"net-id\"\n network = {\"network\": {\"id\": network_id}}\n\n router_create_args = router_create_args or {}\n\n subnets = []\n subnet_create_calls = []\n routers = []\n router_create_calls = []\n for i in range(subnets_per_network):\n subnets.append({\"subnet\": mock.MagicMock()})\n routers.append({\"router\": mock.MagicMock()})\n subnet_create_calls.append(\n mock.call({\n \"subnet\": {\n \"network_id\": network_id,\n \"name\": self.random_name,\n \"dns_nameservers\": mock.ANY,\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **(subnet_create_args or {})\n }\n }))\n router_create_calls.append(\n mock.call({\n \"router\": {\n \"name\": self.random_name,\n **(router_create_args or {})\n }\n }))\n\n self._nc.create_network.return_value = network\n self._nc.create_subnet.side_effect = subnets\n self._nc.create_router.side_effect = routers\n\n actual = self.scenario._create_network_structure(network_create_args,\n subnet_create_args,\n subnet_cidr_start,\n subnets_per_network,\n router_create_args)\n self.assertEqual((network, subnets, routers), actual)\n network_create_args = network_create_args or {}\n network_create_args[\"name\"] = self.random_name\n self._nc.create_network.assert_called_once_with(\n {\"network\": network_create_args})\n self.assertEqual(\n subnet_create_calls, self._nc.create_subnet.call_args_list\n )\n self.assertEqual(\n router_create_calls, self._nc.create_router.call_args_list\n )\n\n add_iface_calls = [\n mock.call(\n routers[i][\"router\"][\"id\"],\n {\"subnet_id\": subnets[i][\"subnet\"][\"id\"]}\n )\n for i in range(subnets_per_network or 1)]\n self.assertEqual(\n add_iface_calls,\n self._nc.add_interface_router.call_args_list\n )\n\n def test_delete_v1_pool(self):\n pool = {\"pool\": {\"id\": \"fake-id\"}}\n self.scenario._delete_v1_pool(pool[\"pool\"])\n self.clients(\"neutron\").delete_pool.assert_called_once_with(\n pool[\"pool\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_pool\")\n\n def test_update_pool(self):\n expected_pool = {\n \"pool\": {\n \"name\": self.scenario.generate_random_name.return_value,\n \"admin_state_up\": False,\n \"fakearg\": \"fake\"\n }\n }\n self.clients(\"neutron\").update_pool.return_value = expected_pool\n\n pool = {\"pool\": {\"name\": \"pool-name\", \"id\": \"pool-id\"}}\n pool_update_args = {\"name\": \"foo\",\n \"admin_state_up\": False,\n \"fakearg\": \"fake\"}\n\n result_pool = self.scenario._update_v1_pool(pool, **pool_update_args)\n self.assertEqual(expected_pool, result_pool)\n self.clients(\"neutron\").update_pool.assert_called_once_with(\n pool[\"pool\"][\"id\"], expected_pool)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_pool\")\n\n def test_list_v1_pools(self):\n pools_list = []\n pools_dict = {\"pools\": pools_list}\n self.clients(\"neutron\").list_pools.return_value = pools_dict\n return_pools_dict = self.scenario._list_v1_pools()\n self.assertEqual(pools_dict, return_pools_dict)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_pools\")\n\n def test_list_v1_vips(self):\n vips_list = []\n vips_dict = {\"vips\": vips_list}\n self.clients(\"neutron\").list_vips.return_value = vips_dict\n return_vips_dict = self.scenario._list_v1_vips()\n self.assertEqual(vips_dict, return_vips_dict)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_vips\")\n\n def test_delete_v1_vip(self):\n vip = {\"vip\": {\"id\": \"fake-id\"}}\n self.scenario._delete_v1_vip(vip[\"vip\"])\n self.clients(\"neutron\").delete_vip.assert_called_once_with(\n vip[\"vip\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_vip\")\n\n def test_update_v1_vip(self):\n expected_vip = {\n \"vip\": {\n \"name\": self.scenario.generate_random_name.return_value,\n \"admin_state_up\": False\n }\n }\n self.clients(\"neutron\").update_vip.return_value = expected_vip\n\n vip = {\"vip\": {\"name\": \"vip-name\", \"id\": \"vip-id\"}}\n vip_update_args = {\"name\": \"foo\", \"admin_state_up\": False}\n\n result_vip = self.scenario._update_v1_vip(vip, **vip_update_args)\n self.assertEqual(expected_vip, result_vip)\n self.clients(\"neutron\").update_vip.assert_called_once_with(\n vip[\"vip\"][\"id\"], expected_vip)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_vip\")\n\n def test_create_security_group(self):\n security_group_create_args = {\"description\": \"Fake security group\"}\n expected_security_group = {\n \"security_group\": {\n \"id\": \"fake-id\",\n \"name\": self.scenario.generate_random_name.return_value,\n \"description\": \"Fake security group\"\n }\n }\n self._nc.create_security_group.return_value = expected_security_group\n\n security_group_data = {\n \"security_group\":\n {\"name\": \"random_name\",\n \"description\": \"Fake security group\"}\n }\n resultant_security_group = self.scenario._create_security_group(\n **security_group_create_args)\n self.assertEqual(expected_security_group, resultant_security_group)\n self._nc.create_security_group.assert_called_once_with(\n security_group_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_security_group\")\n\n def test_list_security_groups(self):\n security_groups_list = [{\"id\": \"security-group-id\"}]\n security_groups_dict = {\"security_groups\": security_groups_list}\n self._nc.list_security_groups = mock.Mock(\n return_value=security_groups_dict)\n self.assertEqual(\n self.scenario._list_security_groups(),\n self._nc.list_security_groups.return_value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_security_groups\")\n\n def test_show_security_group(self):\n security_group = {\"security_group\": {\"id\": \"fake-id\"}}\n result = self.scenario._show_security_group(security_group)\n self.assertEqual(\n {\"security_group\":\n self._nc.show_security_group.return_value[\"security_group\"]},\n result\n )\n self._nc.show_security_group.assert_called_once_with(\n security_group[\"security_group\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_security_group\")\n\n def test_delete_security_group(self):\n security_group = {\"security_group\": {\"id\": \"fake-id\"}}\n self.scenario._delete_security_group(security_group)\n self._nc.delete_security_group.assert_called_once_with(\n security_group[\"security_group\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_security_group\")\n\n def test_update_security_group(self):\n security_group = {\n \"security_group\": {\n \"id\": \"security-group-id\",\n \"description\": \"Not updated\"\n }\n }\n expected_security_group = {\n \"security_group\": {\n \"id\": \"security-group-id\",\n \"name\": self.scenario.generate_random_name.return_value,\n \"description\": \"Updated\"\n }\n }\n\n self._nc.update_security_group.return_value = expected_security_group\n\n result_security_group = self.scenario._update_security_group(\n security_group, description=\"Updated\")\n\n self._nc.update_security_group.assert_called_once_with(\n security_group[\"security_group\"][\"id\"],\n {\"security_group\": {\n \"description\": \"Updated\",\n \"name\": self.scenario.generate_random_name.return_value}}\n )\n self.assertEqual(expected_security_group, result_security_group)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_security_group\")\n\n def test_create_security_group_rule(self):\n security_group_rule_args = {\"description\": \"Fake Rule\"}\n expected_rules = {\n \"security_group_rule\": {\n \"id\": \"fake-id\",\n \"security_group_id\": \"security-group-id\",\n \"direction\": \"ingress\",\n \"protocol\": \"tcp\",\n \"description\": \"Fake Rule\"\n }\n }\n self._nc.create_security_group_rule.return_value = expected_rules\n\n security_group_rule_data = {\n \"security_group_rule\":\n {\"security_group_id\": \"security-group-id\",\n \"direction\": \"ingress\",\n \"protocol\": \"tcp\",\n \"description\": \"Fake Rule\"}\n }\n result_security_group_rule = self.scenario._create_security_group_rule(\n \"security-group-id\", **security_group_rule_args)\n self.assertEqual(expected_rules,\n result_security_group_rule)\n self._nc.create_security_group_rule.assert_called_once_with(\n security_group_rule_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_security_group_rule\")\n\n def test_list_security_group_rules(self):\n security_group_rules_list = [{\"id\": \"security-group-rule-id\"}]\n security_group_rules_dict = {\n \"security_group_rules\": security_group_rules_list}\n\n self._nc.list_security_group_rules = mock.Mock(\n return_value=security_group_rules_dict)\n self.assertEqual(\n self.scenario._list_security_group_rules(),\n self._nc.list_security_group_rules.return_value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_security_group_rules\")\n\n def test_show_security_group_rule(self):\n return_rule = self.scenario._show_security_group_rule(1)\n expected = self._nc.show_security_group_rule.return_value\n expected = {\"security_group_rule\": expected[\"security_group_rule\"]}\n self.assertEqual(expected, return_rule)\n\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.show_security_group_rule\")\n\n def test_delete_security_group_rule(self):\n self.scenario._delete_security_group_rule(1)\n self._nc.delete_security_group_rule.assert_called_once_with(1)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_security_group_rule\")\n\n @ddt.data(\n {\"networks\": [{\"subnets\": \"subnet-id\"}]},\n {\"pool_create_args\": None, \"networks\": [{\"subnets\": [\"subnet-id\"]}]},\n {\"pool_create_args\": {}, \"networks\": [{\"subnets\": [\"subnet-id\"]}]},\n {\"pool_create_args\": {\"name\": \"given-name\"},\n \"networks\": [{\"subnets\": [\"subnet-id\"]}]},\n )\n @ddt.unpack\n def test__create_v1_pools(self, networks, pool_create_args=None):\n pool_create_args = pool_create_args or {}\n pool = {\"pool\": {\"id\": \"pool-id\"}}\n self.scenario._create_lb_pool = mock.Mock(return_value=pool)\n resultant_pools = self.scenario._create_v1_pools(\n networks=networks, **pool_create_args)\n if networks:\n subnets = []\n [subnets.extend(net[\"subnets\"]) for net in networks]\n self.scenario._create_lb_pool.assert_has_calls(\n [mock.call(subnet,\n **pool_create_args) for subnet in subnets])\n self.assertEqual([pool] * len(subnets), resultant_pools)\n\n @ddt.data(\n {\"subnet_id\": \"foo-id\"},\n {\"pool_create_args\": None, \"subnet_id\": \"foo-id\"},\n {\"pool_create_args\": {}, \"subnet_id\": \"foo-id\"},\n {\"pool_create_args\": {\"name\": \"given-name\"},\n \"subnet_id\": \"foo-id\"},\n {\"subnet_id\": \"foo-id\"}\n )\n @ddt.unpack\n def test__create_lb_pool(self, subnet_id=None,\n pool_create_args=None):\n pool = {\"pool\": {\"id\": \"pool-id\"}}\n pool_create_args = pool_create_args or {}\n if pool_create_args.get(\"name\") is None:\n self.generate_random_name = mock.Mock(return_value=\"random_name\")\n self.clients(\"neutron\").create_pool.return_value = pool\n args = {\"lb_method\": \"ROUND_ROBIN\", \"protocol\": \"HTTP\",\n \"name\": \"random_name\", \"subnet_id\": subnet_id}\n args.update(pool_create_args)\n expected_pool_data = {\"pool\": args}\n resultant_pool = self.scenario._create_lb_pool(\n subnet_id=subnet_id,\n **pool_create_args)\n self.assertEqual(pool, resultant_pool)\n self.clients(\"neutron\").create_pool.assert_called_once_with(\n expected_pool_data)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"neutron.create_pool\")\n\n @ddt.data(\n {},\n {\"vip_create_args\": {}},\n {\"vip_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test__create_v1_vip(self, vip_create_args=None):\n vip = {\"vip\": {\"id\": \"vip-id\"}}\n pool = {\"pool\": {\"id\": \"pool-id\", \"subnet_id\": \"subnet-id\"}}\n vip_create_args = vip_create_args or {}\n if vip_create_args.get(\"name\") is None:\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"random_name\")\n self.clients(\"neutron\").create_vip.return_value = vip\n args = {\"protocol_port\": 80, \"protocol\": \"HTTP\", \"name\": \"random_name\",\n \"subnet_id\": pool[\"pool\"][\"subnet_id\"],\n \"pool_id\": pool[\"pool\"][\"id\"]}\n args.update(vip_create_args)\n expected_vip_data = {\"vip\": args}\n resultant_vip = self.scenario._create_v1_vip(pool, **vip_create_args)\n self.assertEqual(vip, resultant_vip)\n self.clients(\"neutron\").create_vip.assert_called_once_with(\n expected_vip_data)\n\n @ddt.data(\n {\"floating_ip_args\": {}},\n {\"floating_ip_args\": {\"floating_ip_address\": \"1.0.0.1\"}},\n )\n @ddt.unpack\n def test__create_floating_ip(self, floating_ip_args):\n floating_network = \"floating\"\n fip = {\"floatingip\": {\"id\": \"fip-id\"}}\n network_id = \"net-id\"\n self._nc.create_floatingip.return_value = fip\n self._nc.list_networks.return_value = {\n \"networks\": [\n {\"id\": \"id-1\", \"name\": \"xxx\",\n \"router:external\": True},\n {\"id\": network_id, \"name\": floating_network,\n \"router:external\": True}\n ]\n }\n expected_fip_data = {\n \"floatingip\": {\n \"floating_network_id\": network_id,\n \"description\": \"random_name\",\n **floating_ip_args\n }\n }\n\n resultant_fip = self.scenario._create_floatingip(\n floating_network, **floating_ip_args)\n\n self.assertEqual(fip, resultant_fip)\n self._nc.create_floatingip.assert_called_once_with(\n expected_fip_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_floating_ip\")\n\n @mock.patch(\"%s.neutron.LOG.info\" % NETWORK_SERVICE)\n def test__create_floating_ip_in_pre_newton_openstack(self, mock_log_info):\n from neutronclient.common import exceptions as n_exceptions\n\n floating_network = \"floating\"\n fip = {\"floatingip\": {\"id\": \"fip-id\"}}\n network_id = \"net-id\"\n self._nc.create_floatingip.return_value = fip\n self._nc.list_networks.return_value = {\n \"networks\": [\n {\"id\": \"id-1\", \"name\": \"xxx\",\n \"router:external\": True},\n {\"id\": network_id, \"name\": floating_network,\n \"router:external\": True}\n ]\n }\n\n e = n_exceptions.BadRequest(\"Unrecognized attribute(s) 'description'\")\n self._nc.create_floatingip.side_effect = e\n\n a_e = self.assertRaises(n_exceptions.BadRequest,\n self.scenario._create_floatingip,\n floating_network)\n\n self.assertEqual(e, a_e)\n self.assertTrue(mock_log_info.called)\n\n expected_fip_data = {\"floatingip\": {\"floating_network_id\": network_id,\n \"description\": \"random_name\"}}\n self._nc.create_floatingip.assert_called_once_with(expected_fip_data)\n self._nc.list_networks.assert_called_once_with()\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_floating_ip\")\n\n @ddt.data(\n {},\n {\"healthmonitor_create_args\": {}},\n {\"healthmonitor_create_args\": {\"type\": \"TCP\"}},\n )\n @ddt.unpack\n def test__create_v1_healthmonitor(self,\n healthmonitor_create_args=None):\n hm = {\"health_monitor\": {\"id\": \"hm-id\"}}\n healthmonitor_create_args = healthmonitor_create_args or {}\n self.clients(\"neutron\").create_health_monitor.return_value = hm\n args = {\"type\": \"PING\", \"delay\": 20,\n \"timeout\": 10, \"max_retries\": 3}\n args.update(healthmonitor_create_args)\n expected_hm_data = {\"health_monitor\": args}\n resultant_hm = self.scenario._create_v1_healthmonitor(\n **healthmonitor_create_args)\n self.assertEqual(hm, resultant_hm)\n self.clients(\"neutron\").create_health_monitor.assert_called_once_with(\n expected_hm_data)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_healthmonitor\")\n\n def test_list_v1_healthmonitors(self):\n hm_list = []\n hm_dict = {\"health_monitors\": hm_list}\n self.clients(\"neutron\").list_health_monitors.return_value = hm_dict\n return_hm_dict = self.scenario._list_v1_healthmonitors()\n self.assertEqual(hm_dict, return_hm_dict)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_healthmonitors\")\n\n def test_delete_v1_healthmonitor(self):\n healthmonitor = {\"health_monitor\": {\"id\": \"fake-id\"}}\n self.scenario._delete_v1_healthmonitor(healthmonitor[\"health_monitor\"])\n self.clients(\"neutron\").delete_health_monitor.assert_called_once_with(\n healthmonitor[\"health_monitor\"][\"id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_healthmonitor\")\n\n def test_update_healthmonitor(self):\n expected_hm = {\"health_monitor\": {\"admin_state_up\": False}}\n mock_update = self.clients(\"neutron\").update_health_monitor\n mock_update.return_value = expected_hm\n hm = {\"health_monitor\": {\"id\": \"pool-id\"}}\n healthmonitor_update_args = {\"admin_state_up\": False}\n result_hm = self.scenario._update_v1_healthmonitor(\n hm, **healthmonitor_update_args)\n self.assertEqual(expected_hm, result_hm)\n mock_update.assert_called_once_with(\n hm[\"health_monitor\"][\"id\"], expected_hm)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_healthmonitor\")\n\n def test_update_loadbalancer_resource(self):\n lb = {\"id\": \"1\", \"provisioning_status\": \"READY\"}\n new_lb = {\"id\": \"1\", \"provisioning_status\": \"ACTIVE\"}\n self.clients(\"neutron\").show_loadbalancer.return_value = {\n \"loadbalancer\": new_lb}\n\n return_lb = self.scenario.update_loadbalancer_resource(lb)\n\n self.clients(\"neutron\").show_loadbalancer.assert_called_once_with(\n lb[\"id\"])\n self.assertEqual(new_lb, return_lb)\n\n def test_update_loadbalancer_resource_not_found(self):\n from neutronclient.common import exceptions as n_exceptions\n lb = {\"id\": \"1\", \"provisioning_status\": \"READY\"}\n self.clients(\"neutron\").show_loadbalancer.side_effect = (\n n_exceptions.NotFound)\n\n self.assertRaises(exceptions.GetResourceNotFound,\n self.scenario.update_loadbalancer_resource,\n lb)\n self.clients(\"neutron\").show_loadbalancer.assert_called_once_with(\n lb[\"id\"])\n\n def test_update_loadbalancer_resource_failure(self):\n from neutronclient.common import exceptions as n_exceptions\n lb = {\"id\": \"1\", \"provisioning_status\": \"READY\"}\n self.clients(\"neutron\").show_loadbalancer.side_effect = (\n n_exceptions.Forbidden)\n\n self.assertRaises(exceptions.GetResourceFailure,\n self.scenario.update_loadbalancer_resource,\n lb)\n self.clients(\"neutron\").show_loadbalancer.assert_called_once_with(\n lb[\"id\"])\n\n def test__create_lbaasv2_loadbalancer(self):\n neutronclient = self.clients(\"neutron\")\n create_args = {\"name\": \"s_rally\", \"vip_subnet_id\": \"1\",\n \"fake\": \"fake\"}\n new_lb = {\"id\": \"1\", \"provisioning_status\": \"ACTIVE\"}\n\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"s_rally\")\n self.mock_wait_for_status.mock.return_value = new_lb\n\n return_lb = self.scenario._create_lbaasv2_loadbalancer(\n \"1\", fake=\"fake\")\n\n neutronclient.create_loadbalancer.assert_called_once_with(\n {\"loadbalancer\": create_args})\n self.assertEqual(new_lb, return_lb)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_lbaasv2_loadbalancer\")\n\n def test__list_lbaasv2_loadbalancers(self):\n value = {\"loadbalancer\": [{\"id\": \"1\", \"name\": \"s_rally\"}]}\n self.clients(\"neutron\").list_loadbalancers.return_value = value\n\n return_value = self.scenario._list_lbaasv2_loadbalancers(\n True, fake=\"fake\")\n\n (self.clients(\"neutron\").list_loadbalancers\n .assert_called_once_with(True, fake=\"fake\"))\n self.assertEqual(value, return_value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_lbaasv2_loadbalancers\")\n\n def test__create_bgpvpn(self, atomic_action=True):\n bv = {\"bgpvpn\": {\"id\": \"bgpvpn-id\"}}\n self.admin_clients(\"neutron\").create_bgpvpn.return_value = bv\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"random_name\")\n expected_bv_data = {\"bgpvpn\": {\"name\": \"random_name\"}}\n resultant_bv = self.scenario._create_bgpvpn()\n self.assertEqual(bv, resultant_bv)\n self.admin_clients(\"neutron\").create_bgpvpn.assert_called_once_with(\n expected_bv_data)\n if atomic_action:\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_bgpvpn\")\n\n def test_delete_bgpvpn(self):\n bgpvpn_create_args = {}\n bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_create_args)\n self.scenario._delete_bgpvpn(bgpvpn)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_bgpvpn\")\n\n def test__list_bgpvpns(self):\n bgpvpns_list = []\n bgpvpns_dict = {\"bgpvpns\": bgpvpns_list}\n self.admin_clients(\"neutron\").list_bgpvpns.return_value = bgpvpns_dict\n return_bgpvpns_list = self.scenario._list_bgpvpns()\n self.assertEqual(bgpvpns_list, return_bgpvpns_list)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_bgpvpns\")\n\n @ddt.data(\n {},\n {\"bgpvpn_update_args\": {\"update_name\": True}},\n {\"bgpvpn_update_args\": {\"update_name\": False}},\n )\n @ddt.unpack\n def test__update_bgpvpn(self, bgpvpn_update_args=None):\n expected_bgpvpn = {\"bgpvpn\": {}}\n bgpvpn_update_data = bgpvpn_update_args or {}\n if bgpvpn_update_data.get(\"update_name\"):\n expected_bgpvpn = {\"bgpvpn\": {\"name\": \"updated_name\"}}\n self.admin_clients(\n \"neutron\").update_bgpvpn.return_value = expected_bgpvpn\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"updated_name\")\n bgpvpn = {\"bgpvpn\": {\"name\": \"bgpvpn-name\", \"id\": \"bgpvpn-id\"}}\n result_bgpvpn = self.scenario._update_bgpvpn(bgpvpn,\n **bgpvpn_update_data)\n self.admin_clients(\"neutron\").update_bgpvpn.assert_called_once_with(\n bgpvpn[\"bgpvpn\"][\"id\"], expected_bgpvpn)\n self.assertEqual(expected_bgpvpn, result_bgpvpn)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.update_bgpvpn\")\n\n def test__create_bgpvpn_network_assoc(self):\n network_id = \"network_id\"\n bgpvpn_id = \"bgpvpn_id\"\n value = {\"network_association\": {\n \"network_id\": network_id,\n \"id\": bgpvpn_id}}\n self.clients(\n \"neutron\").create_bgpvpn_network_assoc.return_value = value\n network = {\"id\": network_id}\n bgpvpn = {\"bgpvpn\": {\"id\": bgpvpn_id}}\n return_value = self.scenario._create_bgpvpn_network_assoc(bgpvpn,\n network)\n netassoc = {\"network_id\": network[\"id\"]}\n self.clients(\n \"neutron\").create_bgpvpn_network_assoc.assert_called_once_with(\n bgpvpn_id, {\"network_association\": netassoc})\n self.assertEqual(return_value, value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_bgpvpn_network_assoc\")\n\n def test__create_router_network_assoc(self):\n router_id = \"router_id\"\n bgpvpn_id = \"bgpvpn_id\"\n value = {\"router_association\": {\n \"router_id\": router_id,\n \"id\": \"asso_id\"}}\n self.clients(\"neutron\").create_bgpvpn_router_assoc.return_value = value\n router = {\"id\": router_id}\n bgpvpn = {\"bgpvpn\": {\"id\": bgpvpn_id}}\n return_value = self.scenario._create_bgpvpn_router_assoc(bgpvpn,\n router)\n router_assoc = {\"router_id\": router[\"id\"]}\n self.clients(\n \"neutron\").create_bgpvpn_router_assoc.assert_called_once_with(\n bgpvpn_id, {\"router_association\": router_assoc})\n self.assertEqual(return_value, value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_bgpvpn_router_assoc\")\n\n def test__delete_bgpvpn_network_assoc(self):\n bgpvpn_assoc_args = {}\n asso_id = \"aaaa-bbbb\"\n network_assoc = {\"network_association\": {\"id\": asso_id}}\n bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args)\n self.scenario._delete_bgpvpn_network_assoc(bgpvpn, network_assoc)\n self.clients(\n \"neutron\").delete_bgpvpn_network_assoc.assert_called_once_with(\n bgpvpn[\"bgpvpn\"][\"id\"], asso_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_bgpvpn_network_assoc\")\n\n def test__delete_bgpvpn_router_assoc(self):\n bgpvpn_assoc_args = {}\n asso_id = \"aaaa-bbbb\"\n router_assoc = {\"router_association\": {\"id\": asso_id}}\n bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args)\n self.scenario._delete_bgpvpn_router_assoc(bgpvpn, router_assoc)\n self.clients(\n \"neutron\").delete_bgpvpn_router_assoc.assert_called_once_with(\n bgpvpn[\"bgpvpn\"][\"id\"], asso_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_bgpvpn_router_assoc\")\n\n def test__list_bgpvpn_network_assocs(self):\n value = {\"network_associations\": []}\n bgpvpn_id = \"bgpvpn-id\"\n bgpvpn = {\"bgpvpn\": {\"id\": bgpvpn_id}}\n self.clients(\"neutron\").list_bgpvpn_network_assocs.return_value = value\n return_asso_list = self.scenario._list_bgpvpn_network_assocs(bgpvpn)\n self.clients(\n \"neutron\").list_bgpvpn_network_assocs.assert_called_once_with(\n bgpvpn_id)\n self.assertEqual(value, return_asso_list)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_bgpvpn_network_assocs\")\n\n def test__list_bgpvpn_router_assocs(self):\n value = {\"router_associations\": []}\n bgpvpn_id = \"bgpvpn-id\"\n bgpvpn = {\"bgpvpn\": {\"id\": bgpvpn_id}}\n self.clients(\"neutron\").list_bgpvpn_router_assocs.return_value = value\n return_asso_list = self.scenario._list_bgpvpn_router_assocs(bgpvpn)\n self.clients(\n \"neutron\").list_bgpvpn_router_assocs.assert_called_once_with(\n bgpvpn_id)\n self.assertEqual(value, return_asso_list)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_bgpvpn_router_assocs\")\n\n def test__delete_trunk(self):\n trunk_port = {\"trunk\": {\"port_id\": \"fake-id\"}}\n self.scenario._delete_trunk(trunk_port[\"trunk\"])\n self.clients(\"neutron\").delete_trunk.assert_called_once_with(\n trunk_port[\"trunk\"][\"port_id\"])\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.delete_trunk\")\n\n def test__create_trunk(self):\n port_id = \"port-id\"\n subport_payload = [{\"port_id\": \"subport-port-id\",\n \"segmentation_type\": \"vlan\",\n \"segmentation_id\": 1}]\n trunk_payload = {\n \"port_id\": port_id,\n \"name\": self.scenario.generate_random_name.return_value,\n \"sub_ports\": subport_payload\n }\n expected_trunk_args = {\n \"trunk\": trunk_payload\n }\n\n self.scenario._create_trunk(trunk_payload)\n self.clients(\"neutron\").create_trunk.assert_called_once_with(\n expected_trunk_args)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.create_trunk\")\n\n def test__list_trunks(self):\n trunks = [{\"name\": \"trunk1\"}, {\"name\": \"trunk2\"}]\n self.clients(\"neutron\").list_trunks.return_value = {\"trunks\": trunks}\n self.assertEqual(trunks, self.scenario._list_trunks())\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_trunks\")\n\n def test__list_ports_by_device_id(self):\n device_id = \"device-id\"\n self.scenario._list_ports_by_device_id(device_id)\n self._nc.list_ports.assert_called_once_with(device_id=device_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_ports\")\n\n def test__list_subports_by_trunk(self):\n trunk_id = \"trunk-id\"\n self.scenario._list_subports_by_trunk(trunk_id)\n self.clients(\"neutron\").trunk_get_subports.assert_called_once_with(\n trunk_id)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron.list_subports_by_trunk\")\n\n def test__add_subports_to_trunk(self):\n trunk_id = \"trunk-id\"\n port_id = \"port-id\"\n subport_payload = [{\"port_id\": port_id}]\n expected_subport_payload = {\n \"sub_ports\": subport_payload\n }\n self.scenario._add_subports_to_trunk(trunk_id, subport_payload)\n self.clients(\"neutron\").trunk_add_subports.assert_called_once_with(\n trunk_id, expected_subport_payload)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"neutron._add_subports_to_trunk\")\n\n\nclass NeutronScenarioFunctionalTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.generate_cidr\" % NET_UTILS)\n def test_functional_create_network_and_subnets(self, mock_generate_cidr):\n clients = mock.MagicMock()\n scenario = utils.NeutronScenario(context=self.context,\n clients=clients)\n network_create_args = {}\n subnet_create_args = {}\n subnets_per_network = 5\n subnet_cidr_start = \"1.1.1.0/24\"\n\n cidrs = [(4, \"1.1.%d.0/24\" % i) for i in range(subnets_per_network)]\n cidrs_ = iter(cidrs)\n mock_generate_cidr.side_effect = lambda **kw: next(cidrs_)\n\n scenario._create_network_and_subnets(\n network_create_args,\n subnet_create_args,\n subnets_per_network,\n subnet_cidr_start)\n\n # This checks both data (cidrs seem to be enough) and subnets number\n nc = clients.neutron.return_value\n result_cidrs = sorted(\n (4, arg[0][\"subnet\"][\"cidr\"])\n for arg, _kwarg in nc.create_subnet.call_args_list\n )\n self.assertEqual(cidrs, result_cidrs)\n" }, { "alpha_fraction": 0.47637245059013367, "alphanum_fraction": 0.478028267621994, "avg_line_length": 38.157108306884766, "blob_id": "54e0dddf59e670f99adf124488a0a1c0a9e32a9e", "content_id": "4285cf684c3eff193f75d84a5ed7b0ce9f12c1af", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15702, "license_type": "permissive", "max_line_length": 79, "num_lines": 401, "path": "/rally_openstack/environment/platforms/existing.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\nimport traceback\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.env import platform\nfrom rally_openstack.common import osclients\n\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\n\[email protected](name=\"existing\", platform=\"openstack\")\nclass OpenStack(platform.Platform):\n \"\"\"Default plugin for OpenStack platform\n\n It may be used to test any existing OpenStack API compatible cloud.\n \"\"\"\n VERSION_SCHEMA = {\n \"anyOf\": [\n {\"type\": \"string\", \"description\": \"a string-like version.\"},\n {\"type\": \"number\", \"description\": \"a number-like version.\"}\n ]\n }\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"definitions\": {\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"project_name\": {\"type\": \"string\"},\n \"tenant_name\": {\"type\": \"string\"},\n \"domain_name\": {\"type\": \"string\"},\n \"user_domain_name\": {\"type\": \"string\"},\n \"project_domain_name\": {\"type\": \"string\"},\n },\n \"additionalProperties\": False,\n \"anyOf\": [\n {\n \"description\": \"Keystone V2.0 (old-style)\",\n \"required\": [\"username\", \"password\", \"tenant_name\"]\n },\n {\n \"description\": \"Keystone V3.0 (modern terms)\",\n \"required\": [\"username\", \"password\", \"project_name\"]\n }\n ]\n },\n \"api_info\": {\n \"type\": \"object\",\n \"patternProperties\": {\n \"^(?!neutron)([a-z]+)$\": {\n \"type\": \"object\",\n \"properties\": {\n \"version\": VERSION_SCHEMA,\n \"service_type\": {\"type\": \"string\"}\n },\n \"minProperties\": 1,\n \"additionalProperties\": False\n },\n \"^neutron$\": {\n \"type\": \"object\",\n \"properties\": {\n \"version\": VERSION_SCHEMA,\n \"service_type\": {\"type\": \"string\"},\n \"pre_newton\": {\n \"type\": \"boolean\",\n \"description\": \"Whether Neutron API is older \"\n \"then OpenStack Newton or not. \"\n \"Based on this option, some \"\n \"external fields for \"\n \"identifying resources can be \"\n \"applied.\"\n }\n },\n \"minProperties\": 1,\n \"additionalProperties\": False\n }\n },\n \"additionalProperties\": False\n }\n },\n \"properties\": {\n \"auth_url\": {\"type\": \"string\"},\n \"region_name\": {\"type\": \"string\"},\n \"endpoint\": {\"type\": [\"string\", \"null\"]},\n \"endpoint_type\": {\"enum\": [\"public\", \"internal\", \"admin\", None]},\n \"https_insecure\": {\"type\": \"boolean\"},\n \"https_cacert\": {\"type\": \"string\"},\n \"https_cert\": {\"type\": \"string\"},\n \"https_key\": {\"type\": \"string\"},\n \"profiler_hmac_key\": {\"type\": [\"string\", \"null\"]},\n \"profiler_conn_str\": {\"type\": [\"string\", \"null\"]},\n \"admin\": {\"$ref\": \"#/definitions/user\"},\n \"users\": {\n \"type\": \"array\",\n \"items\": {\"$ref\": \"#/definitions/user\"},\n \"minItems\": 1\n },\n \"api_info\": {\"$ref\": \"#/definitions/api_info\"}\n },\n \"anyOf\": [\n {\n \"description\": \"The case when the admin is specified and the \"\n \"users can be created via 'users@openstack' \"\n \"context or 'existing_users' will be used.\",\n \"required\": [\"admin\", \"auth_url\"]},\n {\n \"description\": \"The case when the only existing users are \"\n \"specified.\",\n \"required\": [\"users\", \"auth_url\"]}\n ],\n \"additionalProperties\": False\n }\n\n def create(self):\n defaults = {\n \"region_name\": None,\n \"endpoint_type\": None,\n \"domain_name\": None,\n \"user_domain_name\": cfg.CONF.openstack.user_domain,\n \"project_domain_name\": cfg.CONF.openstack.project_domain,\n \"https_insecure\": False,\n \"https_cacert\": None\n }\n\n \"\"\"Converts creds of real OpenStack to internal presentation.\"\"\"\n new_data = copy.deepcopy(self.spec)\n if \"endpoint\" in new_data:\n LOG.warning(\"endpoint is deprecated and not used.\")\n del new_data[\"endpoint\"]\n admin = new_data.pop(\"admin\", None)\n users = new_data.pop(\"users\", [])\n api_info = new_data.pop(\"api_info\", None)\n\n if admin:\n if \"project_name\" in admin:\n admin[\"tenant_name\"] = admin.pop(\"project_name\")\n admin.update(new_data)\n for k, v in defaults.items():\n admin.setdefault(k, v)\n for user in users:\n if \"project_name\" in user:\n user[\"tenant_name\"] = user.pop(\"project_name\")\n user.update(new_data)\n for k, v in defaults.items():\n user.setdefault(k, v)\n platform_data = {\"admin\": admin, \"users\": users}\n if api_info:\n platform_data[\"api_info\"] = api_info\n return platform_data, {}\n\n def destroy(self):\n # NOTE(boris-42): No action need to be performed.\n pass\n\n def cleanup(self, task_uuid=None):\n return {\n \"message\": \"Coming soon!\",\n \"discovered\": 0,\n \"deleted\": 0,\n \"failed\": 0,\n \"resources\": {},\n \"errors\": []\n }\n\n def check_health(self):\n \"\"\"Check whatever platform is alive.\"\"\"\n\n users_to_check = self.platform_data[\"users\"]\n if self.platform_data[\"admin\"]:\n users_to_check.append(self.platform_data[\"admin\"])\n clients = None\n for user in users_to_check:\n user[\"api_info\"] = self.platform_data.get(\"api_info\", {})\n try:\n clients = osclients.Clients(user)\n if self.platform_data[\"admin\"] == user:\n clients.verified_keystone()\n else:\n clients.keystone()\n except osclients.exceptions.RallyException as e:\n # all rally native exceptions should provide user-friendly\n # messages\n return {\"available\": False,\n \"message\": e.format_message(),\n \"traceback\": traceback.format_exc()}\n except Exception:\n d = copy.deepcopy(user)\n d[\"password\"] = \"***\"\n if logging.is_debug():\n LOG.exception(\"Something unexpected had happened while \"\n \"validating OpenStack credentials.\")\n if self.platform_data[\"admin\"] == user:\n user_role = \"admin\"\n else:\n user_role = \"user\"\n return {\n \"available\": False,\n \"message\": (\n \"Bad %s creds: \\n%s\"\n % (user_role,\n json.dumps(d, indent=2, sort_keys=True))),\n \"traceback\": traceback.format_exc()\n }\n\n for name in self.platform_data.get(\"api_info\", {}):\n if name == \"keystone\":\n continue\n if not hasattr(clients, name):\n return {\n \"available\": False,\n \"message\": (\"There is no OSClient plugin '%s' for\"\n \" communicating with OpenStack API.\"\n % name)}\n client = getattr(clients, name)\n try:\n client.validate_version(client.choose_version())\n client.create_client()\n except osclients.exceptions.RallyException as e:\n return {\n \"available\": False,\n \"message\": (\"Invalid setting for '%(client)s':\"\n \" %(error)s\") % {\n \"client\": name, \"error\": e.format_message()}\n }\n except Exception:\n return {\n \"available\": False,\n \"message\": (\"Can not create '%(client)s' with\"\n \" %(version)s version.\") % {\n \"client\": name,\n \"version\": client.choose_version()},\n \"traceback\": traceback.format_exc()\n }\n\n return {\"available\": True}\n\n def info(self):\n \"\"\"Return information about cloud as dict.\"\"\"\n active_user = (self.platform_data[\"admin\"]\n or self.platform_data[\"users\"][0])\n services = []\n for stype, name in osclients.Clients(active_user).services().items():\n if name == \"__unknown__\":\n # `__unknown__` name misleads, let's just not include it...\n services.append({\"type\": stype})\n else:\n services.append({\"type\": stype, \"name\": name})\n\n return {\n \"info\": {\n \"services\": sorted(services, key=lambda x: x[\"type\"])\n }\n }\n\n def _get_validation_context(self):\n return {\"users@openstack\": {}}\n\n @classmethod\n def create_spec_from_sys_environ(cls, sys_environ):\n \"\"\"Create a spec based on system environment.\n\n * OS_AUTH_URL - The auth url for OpenStack cluster. Supported both\n versioned and unversioned urls.\n\n * OS_USERNAME - A user name with admin role to use.\n\n * OS_PASSWORD - A password for selected user.\n\n * OS_PROJECT_NAME - Project name to scope to\n\n * OS_TENANT_NAME - Project name to scope to (an alternative for\n $OS_PROJECT_NAME)\n\n * OS_USER_DOMAIN_NAME - User domain name (in case of Keystone V3)\n\n * OS_PROJECT_DOMAIN_NAME - Domain name containing project (in case of\n Keystone V3)\n\n * OS_ENDPOINT_TYPE - Type of endpoint. Valid endpoint types: admin,\n public, internal\n\n * OS_INTERFACE - Type of endpoint (an alternative for OS_ENDPOINT_TYPE)\n\n * OS_REGION_NAME - Authentication region name\n\n * OS_CACERT - A path to CA certificate bundle file\n\n * OS_CERT - A path to Client certificate bundle file\n\n * OS_KEY - A path to Client certificate key file\n\n * OS_INSECURE - Disable server certificate verification\n\n * OSPROFILER_HMAC_KEY - HMAC key to use for encrypting context while\n using osprofiler\n\n * OSPROFILER_CONN_STR - A connection string for OSProfiler collector\n to grep profiling results while building html task reports\n\n \"\"\"\n\n from oslo_utils import strutils\n\n required_env_vars = [\"OS_AUTH_URL\", \"OS_USERNAME\", \"OS_PASSWORD\"]\n missing_env_vars = [v for v in required_env_vars if\n v not in sys_environ]\n if missing_env_vars:\n return {\"available\": False,\n \"message\": \"The following variable(s) are missed: %s\" %\n missing_env_vars}\n tenant_name = sys_environ.get(\"OS_PROJECT_NAME\",\n sys_environ.get(\"OS_TENANT_NAME\"))\n if tenant_name is None:\n return {\"available\": False,\n \"message\": \"One of OS_PROJECT_NAME or OS_TENANT_NAME \"\n \"should be specified.\"}\n\n endpoint_type = sys_environ.get(\"OS_ENDPOINT_TYPE\",\n sys_environ.get(\"OS_INTERFACE\"))\n if endpoint_type and \"URL\" in endpoint_type:\n endpoint_type = endpoint_type.replace(\"URL\", \"\")\n\n spec = {\n \"auth_url\": sys_environ[\"OS_AUTH_URL\"],\n \"admin\": {\n \"username\": sys_environ[\"OS_USERNAME\"],\n \"password\": sys_environ[\"OS_PASSWORD\"],\n \"tenant_name\": tenant_name\n },\n \"endpoint_type\": endpoint_type,\n \"region_name\": sys_environ.get(\"OS_REGION_NAME\", \"\"),\n \"https_cacert\": sys_environ.get(\"OS_CACERT\", \"\"),\n \"https_cert\": sys_environ.get(\"OS_CERT\", \"\"),\n \"https_key\": sys_environ.get(\"OS_KEY\", \"\"),\n \"https_insecure\": strutils.bool_from_string(\n sys_environ.get(\"OS_INSECURE\")),\n \"profiler_hmac_key\": sys_environ.get(\"OSPROFILER_HMAC_KEY\"),\n \"profiler_conn_str\": sys_environ.get(\"OSPROFILER_CONN_STR\"),\n \"api_info\": {\n \"keystone\": {\n \"version\": 2,\n \"service_type\": \"identity\"\n }\n }\n }\n\n user_domain_name = sys_environ.get(\"OS_USER_DOMAIN_NAME\")\n project_domain_name = sys_environ.get(\"OS_PROJECT_DOMAIN_NAME\")\n identity_api_version = sys_environ.get(\n \"OS_IDENTITY_API_VERSION\", sys_environ.get(\"IDENTITY_API_VERSION\"))\n if (identity_api_version == \"3\"\n or (identity_api_version is None\n and (user_domain_name or project_domain_name))):\n # it is Keystone v3 and it has another config scheme\n spec[\"admin\"][\"project_name\"] = spec[\"admin\"].pop(\"tenant_name\")\n spec[\"admin\"][\"user_domain_name\"] = user_domain_name or \"Default\"\n project_domain_name = project_domain_name or \"Default\"\n spec[\"admin\"][\"project_domain_name\"] = project_domain_name\n spec[\"api_info\"] = {\n \"keystone\": {\n \"version\": 3,\n \"service_type\": \"identityv3\"\n }\n }\n\n return {\"spec\": spec, \"available\": True, \"message\": \"Available\"}\n\n @classmethod\n def _get_doc(cls):\n doc = cls.__doc__.strip()\n\n env_vars_docs = cls.create_spec_from_sys_environ.__doc__\n env_vars_description = \"\\n\".join(\n line for line in env_vars_docs.split(\"\\n\")[1:]\n )\n doc += (f\"\\n **The following environment variables are expected for \"\n f\"creation a Rally environment using sustem environment \"\n f\"variables**\\n{env_vars_description}\")\n return doc\n" }, { "alpha_fraction": 0.6020546555519104, "alphanum_fraction": 0.6057178974151611, "avg_line_length": 43.52836990356445, "blob_id": "7f8638f9223171a216a5575ea856f53bbf1e2aeb", "content_id": "37c84029a33a292c4816f88ccbbba15d34e1adf7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12557, "license_type": "permissive", "max_line_length": 79, "num_lines": 282, "path": "/tests/unit/common/services/storage/test_block.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common.services.storage import block\nfrom tests.unit import test\n\n\nclass BlockTestCase(test.TestCase):\n def setUp(self):\n super(BlockTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = self._get_service_with_fake_impl()\n\n def _get_service_with_fake_impl(self):\n path = \"rally_openstack.common.services.storage.block\"\n path = \"%s.BlockStorage.discover_impl\" % path\n with mock.patch(path) as mock_discover:\n mock_discover.return_value = mock.MagicMock(), None\n service = block.BlockStorage(self.clients)\n return service\n\n def test_create_volume(self):\n self.assertEqual(self.service._impl.create_volume.return_value,\n self.service.create_volume(\"fake_volume\"))\n self.service._impl.create_volume.assert_called_once_with(\n \"fake_volume\", availability_zone=None, consistencygroup_id=None,\n description=None, group_id=None, imageRef=None, metadata=None,\n name=None, project_id=None,\n scheduler_hints=None, snapshot_id=None,\n source_volid=None, user_id=None, volume_type=None, backup_id=None)\n\n def test_list_volumes(self):\n self.assertEqual(self.service._impl.list_volumes.return_value,\n self.service.list_volumes(detailed=True))\n self.service._impl.list_volumes.assert_called_once_with(\n detailed=True, limit=None, marker=None, search_opts=None,\n sort=None)\n\n def test_get_volume(self):\n self.assertTrue(self.service._impl.get_volume.return_value,\n self.service.get_volume(1))\n self.service._impl.get_volume.assert_called_once_with(1)\n\n def test_update_volume(self):\n self.assertTrue(self.service._impl.update_volume.return_value,\n self.service.update_volume(1, name=\"name\",\n description=\"desp\"))\n self.service._impl.update_volume.assert_called_once_with(\n 1, name=\"name\", description=\"desp\")\n\n def test_delete_volume(self):\n self.service.delete_volume(\"volume\")\n self.service._impl.delete_volume.assert_called_once_with(\"volume\")\n\n def test_extend_volume(self):\n self.assertEqual(self.service._impl.extend_volume.return_value,\n self.service.extend_volume(\"volume\", new_size=1))\n self.service._impl.extend_volume.assert_called_once_with(\"volume\",\n new_size=1)\n\n def test_list_snapshots(self):\n self.assertEqual(self.service._impl.list_snapshots.return_value,\n self.service.list_snapshots(detailed=True))\n self.service._impl.list_snapshots.assert_called_once_with(\n detailed=True)\n\n def test_list_types(self):\n self.assertEqual(\n self.service._impl.list_types.return_value,\n self.service.list_types(search_opts=None, is_public=None))\n self.service._impl.list_types.assert_called_once_with(is_public=None,\n search_opts=None)\n\n def test_set_metadata(self):\n self.assertEqual(\n self.service._impl.set_metadata.return_value,\n self.service.set_metadata(\"volume\", sets=10, set_size=3))\n self.service._impl.set_metadata.assert_called_once_with(\n \"volume\", set_size=3, sets=10)\n\n def test_delete_metadata(self):\n keys = [\"a\", \"b\"]\n self.service.delete_metadata(\"volume\", keys=keys, deletes=10,\n delete_size=3)\n self.service._impl.delete_metadata.assert_called_once_with(\n \"volume\", keys, delete_size=3, deletes=10)\n\n def test_update_readonly_flag(self):\n self.assertEqual(\n self.service._impl.update_readonly_flag.return_value,\n self.service.update_readonly_flag(\"volume\", read_only=True))\n self.service._impl.update_readonly_flag.assert_called_once_with(\n \"volume\", read_only=True)\n\n def test_upload_volume_to_image(self):\n self.assertEqual(\n self.service._impl.upload_volume_to_image.return_value,\n self.service.upload_volume_to_image(\"volume\",\n force=False,\n container_format=\"bare\",\n disk_format=\"raw\"))\n self.service._impl.upload_volume_to_image.assert_called_once_with(\n \"volume\", container_format=\"bare\", disk_format=\"raw\", force=False)\n\n def test_create_qos(self):\n spaces = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n self.assertEqual(\n self.service._impl.create_qos.return_value,\n self.service.create_qos(spaces)\n )\n self.service._impl.create_qos.assert_called_once_with(spaces)\n\n def test_list_qos(self):\n self.assertEqual(\n self.service._impl.list_qos.return_value,\n self.service.list_qos(True)\n )\n self.service._impl.list_qos.assert_called_once_with(True)\n\n def test_get_qos(self):\n self.assertEqual(\n self.service._impl.get_qos.return_value,\n self.service.get_qos(\"qos\"))\n self.service._impl.get_qos.assert_called_once_with(\"qos\")\n\n def test_set_qos(self):\n set_specs_args = {\"test\": \"foo\"}\n self.assertEqual(\n self.service._impl.set_qos.return_value,\n self.service.set_qos(qos=\"qos\", set_specs_args=set_specs_args))\n self.service._impl.set_qos.assert_called_once_with(\n qos=\"qos\", set_specs_args=set_specs_args)\n\n def test_qos_associate_type(self):\n self.assertEqual(\n self.service._impl.qos_associate_type.return_value,\n self.service.qos_associate_type(qos_specs=\"fake_qos\",\n volume_type=\"fake_type\"))\n self.service._impl.qos_associate_type.assert_called_once_with(\n \"fake_qos\", \"fake_type\")\n\n def test_qos_disassociate_type(self):\n self.assertEqual(\n self.service._impl.qos_disassociate_type.return_value,\n self.service.qos_disassociate_type(qos_specs=\"fake_qos\",\n volume_type=\"fake_type\"))\n self.service._impl.qos_disassociate_type.assert_called_once_with(\n \"fake_qos\", \"fake_type\")\n\n def test_create_snapshot(self):\n self.assertEqual(\n self.service._impl.create_snapshot.return_value,\n self.service.create_snapshot(1, force=False, name=None,\n description=None, metadata=None))\n self.service._impl.create_snapshot.assert_called_once_with(\n 1, force=False, name=None, description=None, metadata=None)\n\n def test_delete_snapshot(self):\n self.service.delete_snapshot(\"snapshot\")\n self.service._impl.delete_snapshot.assert_called_once_with(\"snapshot\")\n\n def test_create_backup(self):\n self.assertEqual(\n self.service._impl.create_backup.return_value,\n self.service.create_backup(1, container=None,\n name=None, description=None,\n incremental=False, force=False,\n snapshot_id=None))\n self.service._impl.create_backup.assert_called_once_with(\n 1, container=None, name=None, description=None, incremental=False,\n force=False, snapshot_id=None)\n\n def test_delete_backup(self):\n self.service.delete_backup(\"backup\")\n self.service._impl.delete_backup.assert_called_once_with(\"backup\")\n\n def test_restore_backup(self):\n self.assertEqual(self.service._impl.restore_backup.return_value,\n self.service.restore_backup(1, volume_id=1))\n self.service._impl.restore_backup.assert_called_once_with(\n 1, volume_id=1)\n\n def test_list_backups(self):\n self.assertEqual(self.service._impl.list_backups.return_value,\n self.service.list_backups(detailed=True))\n self.service._impl.list_backups.assert_called_once_with(detailed=True)\n\n def test_list_transfers(self):\n self.assertEqual(\n self.service._impl.list_transfers.return_value,\n self.service.list_transfers(detailed=True, search_opts=None))\n self.service._impl.list_transfers.assert_called_once_with(\n detailed=True, search_opts=None)\n\n def test_create_volume_type(self):\n self.assertEqual(\n self.service._impl.create_volume_type.return_value,\n self.service.create_volume_type(name=\"type\",\n description=None,\n is_public=True))\n self.service._impl.create_volume_type.assert_called_once_with(\n name=\"type\", description=None, is_public=True)\n\n def test_get_volume_type(self):\n self.assertEqual(\n self.service._impl.get_volume_type.return_value,\n self.service.get_volume_type(\"volume_type\"))\n self.service._impl.get_volume_type.assert_called_once_with(\n \"volume_type\")\n\n def test_delete_volume_type(self):\n self.service.delete_volume_type(\"volume_type\")\n self.service._impl.delete_volume_type.assert_called_once_with(\n \"volume_type\")\n\n def test_set_volume_type_keys(self):\n self.assertEqual(\n self.service._impl.set_volume_type_keys.return_value,\n self.service.set_volume_type_keys(\"volume_type\",\n metadata=\"metadata\"))\n self.service._impl.set_volume_type_keys.assert_called_once_with(\n \"volume_type\", \"metadata\")\n\n def test_transfer_create(self):\n self.assertEqual(self.service._impl.transfer_create.return_value,\n self.service.transfer_create(1, name=\"t\"))\n self.service._impl.transfer_create.assert_called_once_with(\n 1, name=\"t\")\n\n def test_transfer_accept(self):\n self.assertEqual(self.service._impl.transfer_accept.return_value,\n self.service.transfer_accept(1, auth_key=2))\n self.service._impl.transfer_accept.assert_called_once_with(\n 1, auth_key=2)\n\n def test_create_encryption_type(self):\n self.assertEqual(\n self.service._impl.create_encryption_type.return_value,\n self.service.create_encryption_type(\"type\", specs=2))\n self.service._impl.create_encryption_type.assert_called_once_with(\n \"type\", specs=2)\n\n def test_get_encryption_type(self):\n self.assertEqual(\n self.service._impl.get_encryption_type.return_value,\n self.service.get_encryption_type(\"type\"))\n self.service._impl.get_encryption_type.assert_called_once_with(\n \"type\")\n\n def test_list_encryption_type(self):\n self.assertEqual(self.service._impl.list_encryption_type.return_value,\n self.service.list_encryption_type(search_opts=None))\n self.service._impl.list_encryption_type.assert_called_once_with(\n search_opts=None)\n\n def test_delete_encryption_type(self):\n self.service.delete_encryption_type(\"type\")\n self.service._impl.delete_encryption_type.assert_called_once_with(\n \"type\")\n\n def test_update_encryption_type(self):\n self.assertEqual(\n self.service._impl.update_encryption_type.return_value,\n self.service.update_encryption_type(\"type\", specs=3))\n self.service._impl.update_encryption_type.assert_called_once_with(\n \"type\", specs=3)\n" }, { "alpha_fraction": 0.548985481262207, "alphanum_fraction": 0.551111102104187, "avg_line_length": 39.74803161621094, "blob_id": "5247414842ea735bc1edd99b83145b3009c030ee", "content_id": "ff12f0b0c4adef10c9ac9ab09dc533dbf85557f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5175, "license_type": "permissive", "max_line_length": 76, "num_lines": 127, "path": "/rally_openstack/task/contexts/sahara/sahara_input_data_sources.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom urllib.parse import urlparse\n\nimport requests\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.sahara import utils\nfrom rally_openstack.task.scenarios.swift import utils as swift_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"sahara_input_data_sources\", platform=\"openstack\",\n order=443)\nclass SaharaInputDataSources(context.OpenStackContext):\n \"\"\"Context class for setting up Input Data Sources for an EDP job.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"input_type\": {\n \"enum\": [\"swift\", \"hdfs\"],\n },\n \"input_url\": {\n \"type\": \"string\",\n },\n \"swift_files\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"download_url\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"name\", \"download_url\"]\n }\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"input_type\", \"input_url\"]\n }\n\n def setup(self):\n utils.init_sahara_context(self)\n self.context[\"sahara\"][\"swift_objects\"] = []\n self.context[\"sahara\"][\"container_name\"] = None\n\n for user, tenant_id in self._iterate_per_tenants():\n clients = osclients.Clients(user[\"credential\"])\n if self.config[\"input_type\"] == \"swift\":\n self.setup_inputs_swift(clients, tenant_id,\n self.config[\"input_url\"],\n self.config[\"swift_files\"],\n user[\"credential\"].username,\n user[\"credential\"].password)\n else:\n self.setup_inputs(clients, tenant_id,\n self.config[\"input_type\"],\n self.config[\"input_url\"])\n\n def setup_inputs(self, clients, tenant_id, input_type, input_url):\n input_ds = clients.sahara().data_sources.create(\n name=self.generate_random_name(),\n description=\"\",\n data_source_type=input_type,\n url=input_url)\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"input\"] = input_ds.id\n\n def setup_inputs_swift(self, clients, tenant_id, input_url,\n swift_files, username, password):\n swift_scenario = swift_utils.SwiftScenario(clients=clients,\n context=self.context)\n # TODO(astudenov): use self.generate_random_name()\n container_name = \"rally_\" + urlparse(input_url).netloc.rstrip(\n \".sahara\")\n self.context[\"sahara\"][\"container_name\"] = (\n swift_scenario._create_container(container_name=container_name))\n for swift_file in swift_files:\n content = requests.get(swift_file[\"download_url\"]).content\n self.context[\"sahara\"][\"swift_objects\"].append(\n swift_scenario._upload_object(\n self.context[\"sahara\"][\"container_name\"], content,\n object_name=swift_file[\"name\"]))\n input_ds_swift = clients.sahara().data_sources.create(\n name=self.generate_random_name(), description=\"\",\n data_source_type=\"swift\", url=input_url,\n credential_user=username, credential_pass=password)\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"input\"] = (\n input_ds_swift.id)\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"swift.object\", \"swift.container\"],\n users=self.context.get(\"users\", []),\n superclass=swift_utils.SwiftScenario,\n task_id=self.get_owner_id())\n resource_manager.cleanup(\n names=[\"sahara.data_sources\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6928281188011169, "alphanum_fraction": 0.6928281188011169, "avg_line_length": 23.633333206176758, "blob_id": "3758ec1436f6c81a3646752e8a7baed1d0292c78", "content_id": "87e5b003d02606d0cc541c8cfc165f3d4b7903c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 739, "license_type": "permissive", "max_line_length": 78, "num_lines": 30, "path": "/devstack/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "Rally with DevStack all-in-one installation\n-------------------------------------------\n\nIt is also possible to install Rally with DevStack. First, clone the\ncorresponding repositories:\n\n.. code-block:: bash\n\n git clone https://git.openstack.org/openstack-dev/devstack\n git clone https://github.com/openstack/rally-openstack\n\nThen, configure DevStack to run Rally. First, create your ``local.conf`` file:\n\n.. code-block:: bash\n\n cd devstack\n cp samples/local.conf local.conf\n\nNext, edit local.conf: add the following line to the ``[[local|localrc]]``\nsection.\n\n.. code-block:: bash\n\n enable_plugin rally https://github.com/openstack/rally-openstack master\n\nFinally, run DevStack as usually:\n\n.. code-block:: bash\n\n ./stack.sh\n" }, { "alpha_fraction": 0.6777535676956177, "alphanum_fraction": 0.6902944445610046, "avg_line_length": 31.175437927246094, "blob_id": "ee1bff7508c66fef06ce8a75fda3a46144a2efd3", "content_id": "56fbb98effcb305229a52e1cebf826b9506da2e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1834, "license_type": "permissive", "max_line_length": 78, "num_lines": 57, "path": "/rally_openstack/common/services/network/net_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport netaddr\n\nfrom rally.common import logging\nfrom rally.common import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\n_IPv4_START_CIDR = \"10.2.0.0/24\"\n_IPv6_START_CIDR = \"dead:beaf::/64\"\n\n_IPv4_CIDR_INCR = utils.RAMInt()\n_IPv6_CIDR_INCR = utils.RAMInt()\n\n\ndef get_ip_version(ip):\n return netaddr.IPNetwork(ip).version\n\n\ndef generate_cidr(ip_version=None, start_cidr=None):\n \"\"\"Generate next CIDR for network or subnet, without IP overlapping.\n\n This is process and thread safe, because `cidr_incr' points to\n value stored directly in RAM. This guarantees that CIDRs will be\n serial and unique even under hard multiprocessing/threading load.\n\n :param ip_version: version of IP to take default value for start_cidr\n :param start_cidr: start CIDR str\n \"\"\"\n if start_cidr is None:\n if ip_version == 6:\n start_cidr = _IPv6_START_CIDR\n else:\n start_cidr = _IPv4_START_CIDR\n\n ip_version = get_ip_version(start_cidr)\n if ip_version == 4:\n cidr = str(netaddr.IPNetwork(start_cidr).next(next(_IPv4_CIDR_INCR)))\n else:\n cidr = str(netaddr.IPNetwork(start_cidr).next(next(_IPv6_CIDR_INCR)))\n LOG.debug(\"CIDR generated: %s\" % cidr)\n return ip_version, cidr\n" }, { "alpha_fraction": 0.6239625215530396, "alphanum_fraction": 0.625134289264679, "avg_line_length": 34.192440032958984, "blob_id": "6f823807b59fb62b9a484b4e0630a6fbab06b595", "content_id": "acc690831ec98f95b45fd820a2e4df5d3aaad156", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10241, "license_type": "permissive", "max_line_length": 79, "num_lines": 291, "path": "/rally_openstack/task/scenarios/murano/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport uuid\nimport zipfile\n\nfrom rally.common import cfg\nfrom rally.common import utils as common_utils\nfrom rally.task import atomic\nfrom rally.task import utils\nimport yaml\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\ndef pack_dir(source_directory, zip_name=None):\n \"\"\"Archive content of the directory into .zip\n\n Zip content of the source folder excluding root directory\n into zip archive. When zip_name is specified, it would be used\n as a destination for the archive. Otherwise method would\n try to use temporary file as a destination for the archive.\n\n :param source_directory: root of the newly created archive.\n Directory is added recursively.\n :param zip_name: destination zip file name.\n :raises IOError: whenever there are IO issues.\n :returns: path to the newly created zip archive either specified via\n zip_name or a temporary one.\n \"\"\"\n\n if not zip_name:\n fp = tempfile.NamedTemporaryFile(delete=False)\n zip_name = fp.name\n zipf = zipfile.ZipFile(zip_name, mode=\"w\")\n try:\n for root, dirs, files in os.walk(source_directory):\n for f in files:\n abspath = os.path.join(root, f)\n relpath = os.path.relpath(abspath, source_directory)\n zipf.write(abspath, relpath)\n finally:\n zipf.close()\n return zip_name\n\n\nclass MuranoScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Murano scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"murano.list_environments\")\n def _list_environments(self):\n \"\"\"Return environments list.\"\"\"\n return self.clients(\"murano\").environments.list()\n\n @atomic.action_timer(\"murano.create_environment\")\n def _create_environment(self):\n \"\"\"Create environment.\n\n :param env_name: String used to name environment\n\n :returns: Environment instance\n \"\"\"\n env_name = self.generate_random_name()\n return self.clients(\"murano\").environments.create({\"name\": env_name})\n\n @atomic.action_timer(\"murano.delete_environment\")\n def _delete_environment(self, environment):\n \"\"\"Delete given environment.\n\n Return when the environment is actually deleted.\n\n :param environment: Environment instance\n \"\"\"\n self.clients(\"murano\").environments.delete(environment.id)\n\n @atomic.action_timer(\"murano.create_session\")\n def _create_session(self, environment_id):\n \"\"\"Create session for environment with specific id\n\n :param environment_id: Environment id\n :returns: Session instance\n \"\"\"\n return self.clients(\"murano\").sessions.configure(environment_id)\n\n @atomic.action_timer(\"murano.create_service\")\n def _create_service(self, environment, session, full_package_name,\n image_name=None, flavor_name=None):\n \"\"\"Create Murano service.\n\n :param environment: Environment instance\n :param session: Session instance\n :param full_package_name: full name of the Murano package\n :param image_name: Image name\n :param flavor_name: Flavor name\n :returns: Service instance\n \"\"\"\n app_id = str(uuid.uuid4())\n data = {\"?\": {\"id\": app_id,\n \"type\": full_package_name},\n \"name\": self.generate_random_name()}\n\n return self.clients(\"murano\").services.post(\n environment_id=environment.id, path=\"/\", data=data,\n session_id=session.id)\n\n @atomic.action_timer(\"murano.deploy_environment\")\n def _deploy_environment(self, environment, session):\n \"\"\"Deploy environment.\n\n :param environment: Environment instance\n :param session: Session instance\n \"\"\"\n self.clients(\"murano\").sessions.deploy(environment.id,\n session.id)\n\n config = CONF.openstack\n utils.wait_for_status(\n environment,\n ready_statuses=[\"READY\"],\n update_resource=utils.get_from_manager([\"DEPLOY FAILURE\"]),\n timeout=config.murano_deploy_environment_timeout,\n check_interval=config.murano_deploy_environment_check_interval\n )\n\n @atomic.action_timer(\"murano.list_packages\")\n def _list_packages(self, include_disabled=False):\n \"\"\"Returns packages list.\n\n :param include_disabled: if \"True\" then disabled packages will be\n included in a the result.\n Default value is False.\n :returns: list of imported packages\n \"\"\"\n return self.clients(\"murano\").packages.list(\n include_disabled=include_disabled)\n\n @atomic.action_timer(\"murano.import_package\")\n def _import_package(self, package):\n \"\"\"Import package to the Murano.\n\n :param package: path to zip archive with Murano application\n :returns: imported package\n \"\"\"\n\n package = self.clients(\"murano\").packages.create(\n {}, {\"file\": open(package)}\n )\n\n return package\n\n @atomic.action_timer(\"murano.delete_package\")\n def _delete_package(self, package):\n \"\"\"Delete specified package.\n\n :param package: package that will be deleted\n \"\"\"\n\n self.clients(\"murano\").packages.delete(package.id)\n\n @atomic.action_timer(\"murano.update_package\")\n def _update_package(self, package, body, operation=\"replace\"):\n \"\"\"Update specified package.\n\n :param package: package that will be updated\n :param body: dict object that defines what package property will be\n updated, e.g {\"tags\": [\"tag\"]} or {\"enabled\": \"true\"}\n :param operation: string object that defines the way of how package\n property will be updated, allowed operations are\n \"add\", \"replace\" or \"delete\".\n Default value is \"replace\".\n :returns: updated package\n \"\"\"\n\n return self.clients(\"murano\").packages.update(\n package.id, body, operation)\n\n @atomic.action_timer(\"murano.filter_applications\")\n def _filter_applications(self, filter_query):\n \"\"\"Filter list of uploaded application by specified criteria.\n\n :param filter_query: dict that contains filter criteria, it\n will be passed as **kwargs to filter method\n e.g. {\"category\": \"Web\"}\n :returns: filtered list of packages\n \"\"\"\n\n return self.clients(\"murano\").packages.filter(**filter_query)\n\n def _zip_package(self, package_path):\n \"\"\"Call _prepare_package method that returns path to zip archive.\"\"\"\n return MuranoPackageManager(self.task)._prepare_package(package_path)\n\n\nclass MuranoPackageManager(common_utils.RandomNameGeneratorMixin):\n RESOURCE_NAME_FORMAT = \"app.rally_XXXXXXXX_XXXXXXXX\"\n\n def __init__(self, task):\n self.task = task\n\n @staticmethod\n def _read_from_file(filename):\n with open(filename, \"r\") as f:\n read_data = f.read()\n return yaml.safe_load(read_data)\n\n @staticmethod\n def _write_to_file(data, filename):\n with open(filename, \"w\") as f:\n yaml.safe_dump(data, f)\n\n def _change_app_fullname(self, app_dir):\n \"\"\"Change application full name.\n\n To avoid name conflict error during package import (when user\n tries to import a few packages into the same tenant) need to change the\n application name. For doing this need to replace following parts\n in manifest.yaml\n from\n ...\n FullName: app.name\n ...\n Classes:\n app.name: app_class.yaml\n to:\n ...\n FullName: <new_name>\n ...\n Classes:\n <new_name>: app_class.yaml\n\n :param app_dir: path to directory with Murano application context\n \"\"\"\n\n new_fullname = self.generate_random_name()\n\n manifest_file = os.path.join(app_dir, \"manifest.yaml\")\n manifest = self._read_from_file(manifest_file)\n\n class_file_name = manifest[\"Classes\"][manifest[\"FullName\"]]\n\n # update manifest.yaml file\n del manifest[\"Classes\"][manifest[\"FullName\"]]\n manifest[\"FullName\"] = new_fullname\n manifest[\"Classes\"][new_fullname] = class_file_name\n self._write_to_file(manifest, manifest_file)\n\n def _prepare_package(self, package_path):\n \"\"\"Check whether the package path is path to zip archive or not.\n\n If package_path is not a path to zip archive but path to Murano\n application folder, than method prepares zip archive with Murano\n application. It copies directory with Murano app files to temporary\n folder, changes manifest.yaml and class file (to avoid '409 Conflict'\n errors in Murano) and prepares zip package.\n\n :param package_path: path to zip archive or directory with package\n components\n :returns: path to zip archive with Murano application\n \"\"\"\n\n if not zipfile.is_zipfile(package_path):\n tmp_dir = tempfile.mkdtemp()\n pkg_dir = os.path.join(tmp_dir, \"package/\")\n try:\n shutil.copytree(os.path.expanduser(package_path), pkg_dir)\n\n self._change_app_fullname(pkg_dir)\n package_path = pack_dir(pkg_dir)\n\n finally:\n shutil.rmtree(tmp_dir)\n\n return package_path\n" }, { "alpha_fraction": 0.6443256735801697, "alphanum_fraction": 0.6488487124443054, "avg_line_length": 40.220340728759766, "blob_id": "1731249b3c4a83f656e69fbb2e2139f69ac5f238", "content_id": "dff022256e87966f24a476c75b3ea37952ef238f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2432, "license_type": "permissive", "max_line_length": 78, "num_lines": 59, "path": "/rally_openstack/task/contexts/nova/keypairs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Rackspace UK\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"keypair\", platform=\"openstack\", order=310)\nclass Keypair(context.OpenStackContext):\n \"\"\"Create Nova KeyPair for each user.\"\"\"\n\n # NOTE(andreykurilin): \"type\" != \"null\", since we need to support backward\n # compatibility(previously empty dict was valid) and I hope in near\n # future, we will extend this context to accept keys.\n CONFIG_SCHEMA = {\"type\": \"object\",\n \"additionalProperties\": False}\n\n def _generate_keypair(self, credential):\n nova_client = osclients.Clients(credential).nova()\n # NOTE(hughsaunders): If keypair exists, it should re-generate name.\n\n keypairs = nova_client.keypairs.list()\n keypair_names = [keypair.name for keypair in keypairs]\n while True:\n keypair_name = self.generate_random_name()\n if keypair_name not in keypair_names:\n break\n\n keypair = nova_client.keypairs.create(keypair_name)\n return {\"private\": keypair.private_key,\n \"public\": keypair.public_key,\n \"name\": keypair_name,\n \"id\": keypair.id}\n\n def setup(self):\n for user in self.context[\"users\"]:\n user[\"keypair\"] = self._generate_keypair(user[\"credential\"])\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"nova.keypairs\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.7516040205955505, "alphanum_fraction": 0.7540482878684998, "avg_line_length": 44.45833206176758, "blob_id": "3a228358a11a575b874790403c7ac0bd38802319", "content_id": "f6b703a1f53467d811d4973136ee3db5ae5a8518", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3273, "license_type": "permissive", "max_line_length": 78, "num_lines": 72, "path": "/rally_openstack/task/scenarios/barbican/containers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.barbican import utils\n\n\"\"\"Scenarios for Barbican containers.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanContainers.list\")\nclass BarbicanContainersList(utils.BarbicanBase):\n def run(self):\n \"\"\"List Containers.\"\"\"\n self.admin_barbican.list_container()\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanContainers.create_and_delete\")\nclass BarbicanContainersGenericCreateAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete generic container.\"\"\"\n container = self.admin_barbican.container_create()\n self.admin_barbican.container_delete(container.container_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanContainers.create_and_add\")\nclass BarbicanContainersGenericCreateAndAddSecret(utils.BarbicanBase):\n def run(self):\n \"\"\"Create secret, create generic container, and delete container.\"\"\"\n secret = self.admin_barbican.create_secret()\n secret = {\"secret\": secret}\n container = self.admin_barbican.container_create(secrets=secret)\n self.admin_barbican.container_delete(container.container_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanContainers.create_certificate_and_delete\")\nclass BarbicanContainersCertificateCreateAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete certificate container.\"\"\"\n container = self.admin_barbican.create_certificate_container()\n self.admin_barbican.container_delete(container.container_ref)\n\n\[email protected](\"required_services\", services=[consts.Service.BARBICAN])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"BarbicanContainers.create_rsa_and_delete\")\nclass BarbicanContainersRSACreateAndDelete(utils.BarbicanBase):\n def run(self):\n \"\"\"Create and delete certificate container.\"\"\"\n container = self.admin_barbican.create_rsa_container()\n self.admin_barbican.container_delete(container.container_ref)\n" }, { "alpha_fraction": 0.41186586022377014, "alphanum_fraction": 0.43115097284317017, "avg_line_length": 31.694778442382812, "blob_id": "98e8c40a0b7b6bac21ff63d6cdaada34eec9212b", "content_id": "c05c3e869e154ede4ad0b6c535e294eb57bd58d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8141, "license_type": "permissive", "max_line_length": 78, "num_lines": 249, "path": "/rally_openstack/task/scenarios/sahara/consts.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nNODE_PROCESSES = {\n \"vanilla\": {\n \"1.2.1\": {\n \"master\": [\"namenode\", \"jobtracker\", \"oozie\"],\n \"worker\": [\"datanode\", \"tasktracker\"]\n },\n \"2.3.0\": {\n \"master\": [\"namenode\", \"resourcemanager\", \"historyserver\",\n \"oozie\"],\n \"worker\": [\"datanode\", \"nodemanager\"]\n },\n \"2.4.1\": {\n \"master\": [\"namenode\", \"resourcemanager\", \"historyserver\",\n \"oozie\"],\n \"worker\": [\"datanode\", \"nodemanager\"]\n },\n \"2.6.0\": {\n \"master\": [\"namenode\", \"resourcemanager\", \"historyserver\",\n \"oozie\"],\n \"worker\": [\"datanode\", \"nodemanager\"]\n },\n \"2.7.1\": {\n \"master\": [\"namenode\", \"resourcemanager\", \"historyserver\",\n \"oozie\"],\n \"worker\": [\"datanode\", \"nodemanager\"]\n }\n },\n \"hdp\": {\n \"1.3.2\": {\n \"master\": [\"JOBTRACKER\", \"NAMENODE\", \"SECONDARY_NAMENODE\",\n \"GANGLIA_SERVER\", \"NAGIOS_SERVER\",\n \"AMBARI_SERVER\", \"OOZIE_SERVER\"],\n \"worker\": [\"TASKTRACKER\", \"DATANODE\", \"HDFS_CLIENT\",\n \"MAPREDUCE_CLIENT\", \"OOZIE_CLIENT\", \"PIG\"]\n },\n \"2.0.6\": {\n \"manager\": [\"AMBARI_SERVER\", \"GANGLIA_SERVER\",\n \"NAGIOS_SERVER\"],\n \"master\": [\"NAMENODE\", \"SECONDARY_NAMENODE\",\n \"ZOOKEEPER_SERVER\", \"ZOOKEEPER_CLIENT\",\n \"HISTORYSERVER\", \"RESOURCEMANAGER\",\n \"OOZIE_SERVER\"],\n \"worker\": [\"DATANODE\", \"HDFS_CLIENT\", \"ZOOKEEPER_CLIENT\",\n \"PIG\", \"MAPREDUCE2_CLIENT\", \"YARN_CLIENT\",\n \"NODEMANAGER\", \"OOZIE_CLIENT\"]\n },\n \"2.2\": {\n \"manager\": [\"AMBARI_SERVER\", \"GANGLIA_SERVER\",\n \"NAGIOS_SERVER\"],\n \"master\": [\"NAMENODE\", \"SECONDARY_NAMENODE\",\n \"ZOOKEEPER_SERVER\", \"ZOOKEEPER_CLIENT\",\n \"HISTORYSERVER\", \"RESOURCEMANAGER\",\n \"OOZIE_SERVER\"],\n \"worker\": [\"DATANODE\", \"HDFS_CLIENT\", \"ZOOKEEPER_CLIENT\",\n \"PIG\", \"MAPREDUCE2_CLIENT\", \"YARN_CLIENT\",\n \"NODEMANAGER\", \"OOZIE_CLIENT\", \"TEZ_CLIENT\"]\n }\n },\n \"cdh\": {\n \"5\": {\n \"manager\": [\"CLOUDERA_MANAGER\"],\n \"master\": [\"HDFS_NAMENODE\", \"YARN_RESOURCEMANAGER\",\n \"OOZIE_SERVER\", \"YARN_JOBHISTORY\",\n \"HDFS_SECONDARYNAMENODE\", \"HIVE_METASTORE\",\n \"HIVE_SERVER2\"],\n \"worker\": [\"YARN_NODEMANAGER\", \"HDFS_DATANODE\"]\n },\n \"5.4.0\": {\n \"manager\": [\"CLOUDERA_MANAGER\"],\n \"master\": [\"HDFS_NAMENODE\", \"YARN_RESOURCEMANAGER\",\n \"OOZIE_SERVER\", \"YARN_JOBHISTORY\",\n \"HDFS_SECONDARYNAMENODE\", \"HIVE_METASTORE\",\n \"HIVE_SERVER2\"],\n \"worker\": [\"YARN_NODEMANAGER\", \"HDFS_DATANODE\"]\n },\n \"5.5.0\": {\n \"manager\": [\"CLOUDERA_MANAGER\"],\n \"master\": [\"HDFS_NAMENODE\", \"YARN_RESOURCEMANAGER\",\n \"OOZIE_SERVER\", \"YARN_JOBHISTORY\",\n \"HDFS_SECONDARYNAMENODE\", \"HIVE_METASTORE\",\n \"HIVE_SERVER2\"],\n \"worker\": [\"YARN_NODEMANAGER\", \"HDFS_DATANODE\"]\n }\n },\n \"spark\": {\n \"1.3.1\": {\n \"master\": [\"namenode\", \"master\"],\n \"worker\": [\"datanode\", \"slave\"]\n },\n \"1.6.0\": {\n \"master\": [\"namenode\", \"master\"],\n \"worker\": [\"datanode\", \"slave\"]\n }\n },\n \"ambari\": {\n \"2.3\": {\n \"master-edp\": [\"Hive Metastore\", \"HiveServer\", \"Oozie\"],\n \"master\": [\"Ambari\", \"MapReduce History Server\",\n \"Spark History Server\", \"NameNode\", \"ResourceManager\",\n \"SecondaryNameNode\", \"YARN Timeline Server\",\n \"ZooKeeper\"],\n \"worker\": [\"DataNode\", \"NodeManager\"]\n }\n },\n \"mapr\": {\n \"5.0.0.mrv2\": {\n \"master\": [\"Metrics\", \"Webserver\", \"Zookeeper\", \"HTTPFS\",\n \"Oozie\", \"FileServer\", \"CLDB\", \"Flume\", \"Hue\",\n \"NodeManager\", \"HistoryServer\", \"ResourseManager\",\n \"HiveServer2\", \"HiveMetastore\", \"Sqoop2-Client\",\n \"Sqoop2-Server\"],\n \"worker\": [\"NodeManager\", \"FileServer\"]\n },\n \"5.1.0.mrv2\": {\n \"master\": [\"Metrics\", \"Webserver\", \"Zookeeper\", \"HTTPFS\",\n \"Oozie\", \"FileServer\", \"CLDB\", \"Flume\", \"Hue\",\n \"NodeManager\", \"HistoryServer\", \"ResourseManager\",\n \"HiveServer2\", \"HiveMetastore\", \"Sqoop2-Client\",\n \"Sqoop2-Server\"],\n \"worker\": [\"NodeManager\", \"FileServer\"]\n }\n }\n}\n\nREPLICATION_CONFIGS = {\n \"vanilla\": {\n \"1.2.1\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.3.0\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.4.1\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.6.0\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.7.1\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n },\n \"hdp\": {\n \"1.3.2\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.0.6\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"2.2\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n },\n \"cdh\": {\n \"5\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n },\n \"5.4.0\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n },\n \"5.5.0\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n }\n },\n \"spark\": {\n \"1.3.1\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n },\n \"1.6.0\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n }\n },\n \"ambari\": {\n \"2.3\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs_replication\"\n }\n },\n \"mapr\": {\n \"5.0.0.mrv2\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n },\n \"5.1.0.mrv2\": {\n \"target\": \"HDFS\",\n \"config_name\": \"dfs.replication\"\n }\n }\n\n}\n\nANTI_AFFINITY_PROCESSES = {\n \"vanilla\": {\n \"1.2.1\": [\"datanode\"],\n \"2.3.0\": [\"datanode\"],\n \"2.4.1\": [\"datanode\"],\n \"2.6.0\": [\"datanode\"],\n \"2.7.1\": [\"datanode\"]\n },\n \"hdp\": {\n \"1.3.2\": [\"DATANODE\"],\n \"2.0.6\": [\"DATANODE\"],\n \"2.2\": [\"DATANODE\"]\n },\n \"cdh\": {\n \"5\": [\"HDFS_DATANODE\"],\n \"5.4.0\": [\"HDFS_DATANODE\"],\n \"5.5.0\": [\"HDFS_DATANODE\"]\n },\n \"spark\": {\n \"1.3.1\": [\"datanode\"],\n \"1.6.0\": [\"datanode\"]\n },\n \"ambari\": {\n \"2.3\": [\"DataNode\"],\n },\n \"mapr\": {\n \"5.0.0.mrv2\": [\"FileServer\"],\n \"5.1.0.mrv2\": [\"FileServer\"],\n }\n}\n" }, { "alpha_fraction": 0.5778141617774963, "alphanum_fraction": 0.5810905694961548, "avg_line_length": 38.564815521240234, "blob_id": "afd6b5928eface396d8e1a35a82734f812113c05", "content_id": "c0e9a1b8d9b702da31f546e9a64a7806cc6a799a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4273, "license_type": "permissive", "max_line_length": 77, "num_lines": 108, "path": "/tests/unit/task/contexts/magnum/test_cluster_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.magnum import cluster_templates\nfrom rally_openstack.task.scenarios.magnum import utils as magnum_utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nBASE_CTX = \"rally.task.context\"\nCTX = \"rally_openstack.task.contexts\"\nBASE_SCN = \"rally.task.scenarios\"\nSCN = \"rally_openstack.task.scenarios\"\n\n\nclass ClusterTemplatesGeneratorTestCase(test.ScenarioTestCase):\n\n \"\"\"Generate tenants.\"\"\"\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = dict(name=str(id_))\n return tenants\n\n @mock.patch(\"%s.magnum.utils.MagnumScenario.\"\n \"_create_cluster_template\" % SCN,\n return_value=fakes.FakeClusterTemplate(id=\"uuid\"))\n def test_setup(self, mock__create_cluster_template):\n tenants_count = 2\n users_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants_count,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"cluster_templates\": {\n \"dns_nameserver\": \"8.8.8.8\",\n \"external_network_id\": \"public\",\n \"flavor_id\": \"m1.small\",\n \"docker_volume_size\": 5,\n \"coe\": \"kubernetes\",\n \"image_id\": \"fedora-atomic-latest\",\n \"network_driver\": \"flannel\"\n }\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context)\n ct_ctx.setup()\n\n ct_ctx_config = self.context[\"config\"][\"cluster_templates\"]\n image_id = ct_ctx_config.get(\"image_id\")\n external_network_id = ct_ctx_config.get(\n \"external_network_id\")\n dns_nameserver = ct_ctx_config.get(\"dns_nameserver\")\n flavor_id = ct_ctx_config.get(\"flavor_id\")\n docker_volume_size = ct_ctx_config.get(\"docker_volume_size\")\n network_driver = ct_ctx_config.get(\"network_driver\")\n coe = ct_ctx_config.get(\"coe\")\n mock_calls = [mock.call(image_id=image_id,\n external_network_id=external_network_id,\n dns_nameserver=dns_nameserver,\n flavor_id=flavor_id,\n docker_volume_size=docker_volume_size,\n network_driver=network_driver, coe=coe)\n for i in range(tenants_count)]\n mock__create_cluster_template.assert_has_calls(mock_calls)\n\n # check that stack ids have been saved in context\n for ten_id in self.context[\"tenants\"].keys():\n self.assertIsNotNone(\n self.context[\"tenants\"][ten_id][\"cluster_template\"])\n\n @mock.patch(\"%s.magnum.cluster_templates.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n self.context.update({\n \"users\": mock.MagicMock()\n })\n ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context)\n ct_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"magnum.cluster_templates\"],\n users=self.context[\"users\"],\n superclass=magnum_utils.MagnumScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.7063579559326172, "alphanum_fraction": 0.7096655368804932, "avg_line_length": 42.8870964050293, "blob_id": "b18d7594814e35e8f73c2b5598872a71aecf3fca", "content_id": "e333388d4f80b8129078ba712702a4773ee4309a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2721, "license_type": "permissive", "max_line_length": 78, "num_lines": 62, "path": "/rally_openstack/task/scenarios/cinder/volume_backups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\n\n\n\"\"\"Scenarios for Cinder Volume Backup.\"\"\"\n\n\[email protected](\"number\", param_name=\"size\", minval=1, integer_only=True)\[email protected](\"restricted_parameters\", param_names=[\"name\", \"display_name\"],\n subdict=\"create_volume_kwargs\")\[email protected](\"restricted_parameters\", param_names=\"name\",\n subdict=\"create_backup_kwargs\")\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_cinder_services\", services=\"cinder-backup\")\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"cinder\"]},\n name=\"CinderVolumeBackups.create_incremental_volume_backup\",\n platform=\"openstack\")\nclass CreateIncrementalVolumeBackup(cinder_utils.CinderBasic):\n def run(self, size, do_delete=True, create_volume_kwargs=None,\n create_backup_kwargs=None):\n \"\"\"Create an incremental volume backup.\n\n The scenario first create a volume, the create a backup, the backup\n is full backup. Because Incremental backup must be based on the\n full backup. finally create a incremental backup.\n\n :param size: volume size in GB\n :param do_delete: deletes backup and volume after creating if True\n :param create_volume_kwargs: optional args to create a volume\n :param create_backup_kwargs: optional args to create a volume backup\n \"\"\"\n create_volume_kwargs = create_volume_kwargs or {}\n create_backup_kwargs = create_backup_kwargs or {}\n\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs)\n\n backup2 = self.cinder.create_backup(volume.id, incremental=True)\n\n if do_delete:\n self.cinder.delete_backup(backup2)\n self.cinder.delete_backup(backup1)\n self.cinder.delete_volume(volume)\n" }, { "alpha_fraction": 0.5854219198226929, "alphanum_fraction": 0.5909133553504944, "avg_line_length": 32.60567855834961, "blob_id": "f7a22cac45e2c1555fe7ee28a1443d11db44e955", "content_id": "afa32108dfedbcb4b9cbd6b0d64aae2ed549ddb3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21306, "license_type": "permissive", "max_line_length": 79, "num_lines": 634, "path": "/rally_openstack/common/services/loadbalancer/octavia.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import service\nfrom rally.task import utils\n\nCONF = cfg.CONF\n\nLOG = logging.getLogger(__name__)\n\n\nclass Octavia(service.Service):\n\n @atomic.action_timer(\"octavia.load_balancer_list\")\n def load_balancer_list(self):\n \"\"\"List all load balancers\n\n :return:\n List of load balancers\n \"\"\"\n return self._clients.octavia().load_balancer_list()\n\n @atomic.action_timer(\"octavia.load_balancer_show\")\n def load_balancer_show(self, lb_id):\n \"\"\"Show a load balancer\n\n :param string lb:\n dict of the load balancer to show\n :return:\n A dict of the specified load balancer's settings\n \"\"\"\n try:\n new_lb = self._clients.octavia().load_balancer_show(lb_id)\n except Exception as e:\n if getattr(e, \"code\", 400) == 404:\n raise exceptions.GetResourceNotFound(resource=lb_id)\n raise exceptions.GetResourceFailure(resource=lb_id, err=e)\n return new_lb\n\n @atomic.action_timer(\"octavia.load_balancer_create\")\n def load_balancer_create(self, subnet_id, description=None,\n admin_state=None, project_id=None,\n listeners=None, flavor_id=None,\n provider=None, vip_qos_policy_id=None):\n \"\"\"Create a load balancer\n\n :return:\n A dict of the created load balancer's settings\n \"\"\"\n args = {\n \"name\": self.generate_random_name(),\n \"description\": description,\n \"listeners\": listeners,\n \"provider\": provider,\n \"admin_state_up\": admin_state or True,\n \"project_id\": project_id,\n \"vip_subnet_id\": subnet_id,\n \"vip_qos_policy_id\": vip_qos_policy_id,\n }\n lb = self._clients.octavia().load_balancer_create(\n json={\"loadbalancer\": args})\n return lb[\"loadbalancer\"]\n\n @atomic.action_timer(\"octavia.load_balancer_delete\")\n def load_balancer_delete(self, lb_id, cascade=False):\n \"\"\"Delete a load balancer\n\n :param string lb:\n The dict of the load balancer to delete\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().load_balancer_delete(\n lb_id, cascade=cascade)\n\n @atomic.action_timer(\"octavia.load_balancer_set\")\n def load_balancer_set(self, lb_id, lb_update_args):\n \"\"\"Update a load balancer's settings\n\n :param string lb_id:\n The dict of the load balancer to update\n :param lb_update_args:\n A dict of arguments to update a loadbalancer\n :return:\n Response Code from API\n \"\"\"\n return self._clients.octavia().load_balancer_set(\n lb_id, json={\"loadbalancer\": lb_update_args})\n\n @atomic.action_timer(\"octavia.load_balancer_stats_show\")\n def load_balancer_stats_show(self, lb_id, **kwargs):\n \"\"\"Shows the current statistics for a load balancer.\n\n :param string lb:\n dict of the load balancer\n :return:\n A dict of the specified load balancer's statistics\n \"\"\"\n return self._clients.octavia().load_balancer_stats_show(\n lb_id, **kwargs)\n\n @atomic.action_timer(\"octavia.load_balancer_failover\")\n def load_balancer_failover(self, lb_id):\n \"\"\"Trigger load balancer failover\n\n :param string lb:\n dict of the load balancer to failover\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().load_balancer_failover(lb_id)\n\n @atomic.action_timer(\"octavia.listener_list\")\n def listener_list(self, **kwargs):\n \"\"\"List all listeners\n\n :param kwargs:\n Parameters to filter on\n :return:\n List of listeners\n \"\"\"\n return self._clients.octavia().listener_list(**kwargs)\n\n @atomic.action_timer(\"octavia.listener_show\")\n def listener_show(self, listener_id):\n \"\"\"Show a listener\n\n :param string listener_id:\n ID of the listener to show\n :return:\n A dict of the specified listener's settings\n \"\"\"\n return self._clients.octavia().listener_show(listener_id)\n\n @atomic.action_timer(\"octavia.listener_create\")\n def listener_create(self, **kwargs):\n \"\"\"Create a listener\n\n :param kwargs:\n Parameters to create a listener with (expects json=)\n :return:\n A dict of the created listener's settings\n \"\"\"\n return self._clients.octavia().listener_create(**kwargs)\n\n @atomic.action_timer(\"octavia.listener_delete\")\n def listener_delete(self, listener_id):\n \"\"\"Delete a listener\n\n :param stirng listener_id:\n ID of listener to delete\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().listener_delete(listener_id)\n\n @atomic.action_timer(\"octavia.listener_set\")\n def listener_set(self, listener_id, **kwargs):\n \"\"\"Update a listener's settings\n\n :param string listener_id:\n ID of the listener to update\n :param kwargs:\n A dict of arguments to update a listener\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().listener_set(listener_id, **kwargs)\n\n @atomic.action_timer(\"octavia.listener_stats_show\")\n def listener_stats_show(self, listener_id, **kwargs):\n \"\"\"Shows the current statistics for a listener\n\n :param string listener_id:\n ID of the listener\n :return:\n A dict of the specified listener's statistics\n \"\"\"\n return self._clients.octavia().listener_stats_show(\n listener_id, **kwargs)\n\n @atomic.action_timer(\"octavia.pool_list\")\n def pool_list(self, **kwargs):\n \"\"\"List all pools\n\n :param kwargs:\n Parameters to filter on\n :return:\n List of pools\n \"\"\"\n return self._clients.octavia().pool_list(**kwargs)\n\n def update_pool_resource(self, pool):\n try:\n new_pool = self._clients.octavia().pool_show(pool[\"id\"])\n except Exception as e:\n if getattr(e, \"status_code\", 400) == 404:\n raise exceptions.GetResourceNotFound(resource=pool)\n raise exceptions.GetResourceFailure(resource=pool, err=e)\n return new_pool\n\n @atomic.action_timer(\"octavia.pool_create\")\n def pool_create(self, lb_id, protocol, lb_algorithm,\n listener_id=None, description=None,\n admin_state_up=True, project_id=None,\n session_persistence=None):\n \"\"\"Create a pool\n\n :param lb_id: ID of the loadbalancer\n :param protocol: protocol of the resource\n :param lb_algorithm: loadbalancing algorithm of the pool\n :param listener_id: ID of the listener\n :param description: a human readable description of the pool\n :param admin_state_up: administrative state of the resource\n :param project_id: project ID of the resource\n :param session_persistence: a json object specifiying the session\n persistence of the pool\n :return:\n A dict of the created pool's settings\n \"\"\"\n args = {\n \"name\": self.generate_random_name(),\n \"loadbalancer_id\": lb_id,\n \"protocol\": protocol,\n \"lb_algorithm\": lb_algorithm,\n \"listener_id\": listener_id,\n \"description\": description,\n \"admin_state_up\": admin_state_up,\n \"project_id\": project_id,\n \"session_persistence\": session_persistence\n }\n pool = self._clients.octavia().pool_create(\n json={\"pool\": args})\n pool = pool[\"pool\"]\n pool = utils.wait_for_status(\n pool,\n ready_statuses=[\"ACTIVE\"],\n status_attr=\"provisioning_status\",\n update_resource=self.update_pool_resource,\n timeout=CONF.openstack.octavia_create_loadbalancer_timeout,\n check_interval=(\n CONF.openstack.octavia_create_loadbalancer_poll_interval)\n )\n return pool\n\n @atomic.action_timer(\"octavia.pool_delete\")\n def pool_delete(self, pool_id):\n \"\"\"Delete a pool\n\n :param string pool_id:\n ID of pool to delete\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().pool_delete(pool_id)\n\n @atomic.action_timer(\"octavia.pool_show\")\n def pool_show(self, pool_id):\n \"\"\"Show a pool's settings\n\n :param string pool_id:\n ID of the pool to show\n :return:\n Dict of the specified pool's settings\n \"\"\"\n return self._clients.octavia().pool_show(pool_id)\n\n @atomic.action_timer(\"octavia.pool_set\")\n def pool_set(self, pool_id, pool_update_args):\n \"\"\"Update a pool's settings\n\n :param pool_id:\n ID of the pool to update\n :param pool_update_args:\n A dict of arguments to update a pool\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().pool_set(\n pool_id, json={\"pool\": pool_update_args})\n\n @atomic.action_timer(\"octavia.member_list\")\n def member_list(self, pool_id, **kwargs):\n \"\"\"Lists the member from a given pool id\n\n :param pool_id:\n ID of the pool\n :param kwargs:\n A dict of filter arguments\n :return:\n Response list members\n \"\"\"\n return self._clients.octavia().member_list(pool_id, **kwargs)\n\n @atomic.action_timer(\"octavia.member_show\")\n def member_show(self, pool_id, member_id):\n \"\"\"Showing a member details of a pool\n\n :param pool_id:\n ID of pool the member is added\n :param member_id:\n ID of the member\n :param kwargs:\n A dict of arguments\n :return:\n Response of member\n \"\"\"\n return self._clients.octavia().member_show(pool_id, member_id)\n\n @atomic.action_timer(\"octavia.member_create\")\n def member_create(self, pool_id, **kwargs):\n \"\"\"Creating a member for the given pool id\n\n :param pool_id:\n ID of pool to which member is added\n :param kwargs:\n A Dict of arguments\n :return:\n A member details on successful creation\n \"\"\"\n return self._clients.octavia().member_create(pool_id, **kwargs)\n\n @atomic.action_timer(\"octavia.member_delete\")\n def member_delete(self, pool_id, member_id):\n \"\"\"Removing a member from a pool and mark that member as deleted\n\n :param pool_id:\n ID of the pool\n :param member_id:\n ID of the member to be deleted\n :return:\n Response code from the API\n \"\"\"\n return self._clients.octavia().member_delete(pool_id, member_id)\n\n @atomic.action_timer(\"octavia.member_set\")\n def member_set(self, pool_id, member_id, **kwargs):\n \"\"\"Updating a member settings\n\n :param pool_id:\n ID of the pool\n :param member_id:\n ID of the member to be updated\n :param kwargs:\n A dict of the values of member to be updated\n :return:\n Response code from the API\n \"\"\"\n return self._clients.octavia().member_set(pool_id, member_id, **kwargs)\n\n @atomic.action_timer(\"octavia.l7policy_list\")\n def l7policy_list(self, **kwargs):\n \"\"\"List all l7policies\n\n :param kwargs:\n Parameters to filter on\n :return:\n List of l7policies\n \"\"\"\n return self._clients.octavia().l7policy_list(**kwargs)\n\n @atomic.action_timer(\"octavia.l7policy_create\")\n def l7policy_create(self, **kwargs):\n \"\"\"Create a l7policy\n\n :param kwargs:\n Parameters to create a l7policy with (expects json=)\n :return:\n A dict of the created l7policy's settings\n \"\"\"\n return self._clients.octavia().l7policy_create(**kwargs)\n\n @atomic.action_timer(\"octavia.l7policy_delete\")\n def l7policy_delete(self, l7policy_id):\n \"\"\"Delete a l7policy\n\n :param string l7policy_id:\n ID of l7policy to delete\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().l7policy_delete(l7policy_id)\n\n @atomic.action_timer(\"octavia.l7policy_show\")\n def l7policy_show(self, l7policy_id):\n \"\"\"Show a l7policy's settings\n\n :param string l7policy_id:\n ID of the l7policy to show\n :return:\n Dict of the specified l7policy's settings\n \"\"\"\n return self._clients.octavia().l7policy_show(l7policy_id)\n\n @atomic.action_timer(\"octavia.l7policy_set\")\n def l7policy_set(self, l7policy_id, **kwargs):\n \"\"\"Update a l7policy's settings\n\n :param l7policy_id:\n ID of the l7policy to update\n :param kwargs:\n A dict of arguments to update a l7policy\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().l7policy_set(l7policy_id, **kwargs)\n\n @atomic.action_timer(\"octavia.l7rule_list\")\n def l7rule_list(self, l7policy_id, **kwargs):\n \"\"\"List all l7rules for a l7policy\n\n :param kwargs:\n Parameters to filter on\n :return:\n List of l7policies\n \"\"\"\n return self._clients.octavia().l7rule_list(l7policy_id, **kwargs)\n\n @atomic.action_timer(\"octavia.l7rule_create\")\n def l7rule_create(self, l7policy_id, **kwargs):\n \"\"\"Create a l7rule\n\n :param string l7policy_id:\n The l7policy to create the l7rule for\n :param kwargs:\n Parameters to create a l7rule with (expects json=)\n :return:\n A dict of the created l7rule's settings\n \"\"\"\n return self._clients.octavia().l7rule_create(l7policy_id, **kwargs)\n\n @atomic.action_timer(\"octavia.l7rule_delete\")\n def l7rule_delete(self, l7rule_id, l7policy_id):\n \"\"\"Delete a l7rule\n\n :param string l7rule_id:\n ID of listener to delete\n :param string l7policy_id:\n ID of the l7policy for this l7rule\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().l7rule_delete(l7rule_id, l7policy_id)\n\n @atomic.action_timer(\"octavia.l7rule_show\")\n def l7rule_show(self, l7rule_id, l7policy_id):\n \"\"\"Show a l7rule's settings\n\n :param string l7rule_id:\n ID of the l7rule to show\n :param string l7policy_id:\n ID of the l7policy for this l7rule\n :return:\n Dict of the specified l7rule's settings\n \"\"\"\n return self._clients.octavia().l7rule_show(l7rule_id, l7policy_id)\n\n @atomic.action_timer(\"octavia.l7rule_set\")\n def l7rule_set(self, l7rule_id, l7policy_id, **kwargs):\n \"\"\"Update a l7rule's settings\n\n :param l7rule_id:\n ID of the l7rule to update\n :param string l7policy_id:\n ID of the l7policy for this l7rule\n :param kwargs:\n A dict of arguments to update a l7rule\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().l7rule_set(l7rule_id, l7policy_id,\n **kwargs)\n\n @atomic.action_timer(\"octavia.health_monitor_list\")\n def health_monitor_list(self, **kwargs):\n \"\"\"List all health monitors\n\n :param kwargs:\n Parameters to filter on\n :return:\n A dict containing a list of health monitors\n \"\"\"\n return self._clients.octavia().health_monitor_list(**kwargs)\n\n @atomic.action_timer(\"octavia.health_monitor_create\")\n def health_monitor_create(self, **kwargs):\n \"\"\"Create a health monitor\n\n :param kwargs:\n Parameters to create a health monitor with (expects json=)\n :return:\n A dict of the created health monitor's settings\n \"\"\"\n return self._clients.octavia().health_monitor_create(**kwargs)\n\n @atomic.action_timer(\"octavia.health_monitor_delete\")\n def health_monitor_delete(self, health_monitor_id):\n \"\"\"Delete a health_monitor\n\n :param string health_monitor_id:\n ID of health monitor to delete\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().health_monitor_delete(health_monitor_id)\n\n @atomic.action_timer(\"octavia.health_monitor_show\")\n def health_monitor_show(self, health_monitor_id):\n \"\"\"Show a health monitor's settings\n\n :param string health_monitor_id:\n ID of the health monitor to show\n :return:\n Dict of the specified health monitor's settings\n \"\"\"\n return self._clients.octavia().health_monitor_show(health_monitor_id)\n\n @atomic.action_timer(\"octavia.health_monitor_set\")\n def health_monitor_set(self, health_monitor_id, **kwargs):\n \"\"\"Update a health monitor's settings\n\n :param health_monitor_id:\n ID of the health monitor to update\n :param kwargs:\n A dict of arguments to update a l7policy\n :return:\n Response Code from the API\n \"\"\"\n return self._clients.octavia().health_monitor_set(health_monitor_id,\n **kwargs)\n\n @atomic.action_timer(\"octavia.quota_list\")\n def quota_list(self, params):\n \"\"\"List all quotas\n\n :param params:\n Parameters to filter on (not implemented)\n :return:\n A ``dict`` representing a list of quotas for the project\n \"\"\"\n return self._clients.octavia().quota_list(params)\n\n @atomic.action_timer(\"octavia.quota_show\")\n def quota_show(self, project_id):\n \"\"\"Show a quota\n\n :param string project_id:\n ID of the project to show\n :return:\n A ``dict`` representing the quota for the project\n \"\"\"\n return self._clients.octavia().quota_show(project_id)\n\n @atomic.action_timer(\"octavia.quota_reset\")\n def quota_reset(self, project_id):\n \"\"\"Reset a quota\n\n :param string project_id:\n The ID of the project to reset quotas\n :return:\n ``None``\n \"\"\"\n return self._clients.octavia().quota_reset(project_id)\n\n @atomic.action_timer(\"octavia.quota_set\")\n def quota_set(self, project_id, params):\n \"\"\"Update a quota's settings\n\n :param string project_id:\n The ID of the project to update\n :param params:\n A ``dict`` of arguments to update project quota\n :return:\n A ``dict`` representing the updated quota\n \"\"\"\n return self._clients.octavia().quota_set(project_id, params)\n\n @atomic.action_timer(\"octavia.quota_defaults_show\")\n def quota_defaults_show(self):\n \"\"\"Show quota defaults\n\n :return:\n A ``dict`` representing a list of quota defaults\n \"\"\"\n return self._clients.octavia().quota_defaults_show()\n\n @atomic.action_timer(\"octavia.amphora_show\")\n def amphora_show(self, amphora_id):\n \"\"\"Show an amphora\n\n :param string amphora_id:\n ID of the amphora to show\n :return:\n A ``dict`` of the specified amphora's attributes\n \"\"\"\n return self._clients.octavia().amphora_show(amphora_id)\n\n @atomic.action_timer(\"octavia.amphora_list\")\n def amphora_list(self, **kwargs):\n \"\"\"List all amphorae\n\n :param kwargs:\n Parameters to filter on\n :return:\n A ``dict`` containing a list of amphorae\n \"\"\"\n return self._clients.octavia().amphora_list(**kwargs)\n\n @atomic.action_timer(\"octavia.wait_for_loadbalancers\")\n def wait_for_loadbalancer_prov_status(self, lb, prov_status=\"ACTIVE\"):\n return utils.wait_for_status(\n lb,\n ready_statuses=[prov_status],\n status_attr=\"provisioning_status\",\n update_resource=lambda lb: self.load_balancer_show(lb[\"id\"]),\n timeout=CONF.openstack.octavia_create_loadbalancer_timeout,\n check_interval=(\n CONF.openstack.octavia_create_loadbalancer_poll_interval)\n )\n" }, { "alpha_fraction": 0.6121921539306641, "alphanum_fraction": 0.6232410073280334, "avg_line_length": 45.055694580078125, "blob_id": "9ffbd0cc9cd03f170a78fb18e8ddd82fcdd1e775", "content_id": "ecf6be36acd6919613bf896378e87f091bb9314a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18192, "license_type": "permissive", "max_line_length": 79, "num_lines": 395, "path": "/tests/unit/task/scenarios/magnum/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport os\n\nfrom kubernetes import client as kubernetes_client\nfrom kubernetes.client.rest import ApiException\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.magnum import utils\nfrom tests.unit import test\n\nMAGNUM_UTILS = \"rally_openstack.task.scenarios.magnum.utils\"\n\nCONF = utils.CONF\n\n\nclass MagnumScenarioTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(MagnumScenarioTestCase, self).setUp()\n self.cluster_template = mock.Mock()\n self.cluster = mock.Mock()\n self.pod = mock.Mock()\n self.scenario = utils.MagnumScenario(self.context)\n\n def test_list_cluster_templates(self):\n fake_list = [self.cluster_template]\n\n self.clients(\"magnum\").cluster_templates.list.return_value = fake_list\n return_ct_list = self.scenario._list_cluster_templates()\n self.assertEqual(fake_list, return_ct_list)\n\n self.clients(\"magnum\").cluster_templates.list.assert_called_once_with()\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"magnum.list_cluster_templates\")\n\n def test_create_cluster_template(self):\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"generated_name\")\n fake_ct = self.cluster_template\n self.clients(\"magnum\").cluster_templates.create.return_value = fake_ct\n\n return_cluster_template = self.scenario._create_cluster_template(\n image=\"test_image\",\n keypair=\"test_key\",\n external_network=\"public\",\n dns_nameserver=\"8.8.8.8\",\n flavor=\"m1.large\",\n docker_volume_size=50,\n network_driver=\"docker\",\n coe=\"swarm\")\n\n self.assertEqual(fake_ct, return_cluster_template)\n _, kwargs = self.clients(\"magnum\").cluster_templates.create.call_args\n self.assertEqual(\"generated_name\", kwargs[\"name\"])\n\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"magnum.create_cluster_template\")\n\n def test_get_cluster_template(self):\n client = self.clients(\"magnum\")\n client.cluster_templates.get.return_value = self.cluster_template\n return_cluster_template = self.scenario._get_cluster_template(\"uuid\")\n client.cluster_templates.get.assert_called_once_with(\"uuid\")\n self.assertEqual(self.cluster_template, return_cluster_template)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.get_cluster_template\")\n\n def test_list_clusters(self):\n return_clusters_list = self.scenario._list_clusters(limit=\"foo1\")\n client = self.clients(\"magnum\")\n client.clusters.list.assert_called_once_with(limit=\"foo1\")\n self.assertEqual(client.clusters.list.return_value,\n return_clusters_list)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.list_clusters\")\n\n def test_create_cluster(self):\n self.scenario.generate_random_name = mock.Mock(\n return_value=\"generated_name\")\n self.clients(\"magnum\").clusters.create.return_value = self.cluster\n return_cluster = self.scenario._create_cluster(\n cluster_template=\"generated_uuid\", node_count=2)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.cluster,\n ready_statuses=[\"CREATE_COMPLETE\"],\n failure_statuses=[\"CREATE_FAILED\", \"ERROR\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.\n magnum_cluster_create_poll_interval,\n timeout=CONF.openstack.magnum_cluster_create_timeout,\n id_attr=\"uuid\")\n _, kwargs = self.clients(\"magnum\").clusters.create.call_args\n self.assertEqual(\"generated_name\", kwargs[\"name\"])\n self.assertEqual(\"generated_uuid\", kwargs[\"cluster_template_id\"])\n self.mock_get_from_manager.mock.assert_called_once_with()\n self.assertEqual(\n self.mock_wait_for_status.mock.return_value, return_cluster)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.create_cluster\")\n\n def test_get_cluster(self):\n self.clients(\"magnum\").clusters.get.return_value = self.cluster\n return_cluster = self.scenario._get_cluster(\"uuid\")\n self.clients(\"magnum\").clusters.get.assert_called_once_with(\"uuid\")\n self.assertEqual(self.cluster, return_cluster)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.get_cluster\")\n\n def test_get_ca_certificate(self):\n self.scenario._get_ca_certificate(self.cluster.uuid)\n self.clients(\"magnum\").certificates.get.assert_called_once_with(\n self.cluster.uuid)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.get_ca_certificate\")\n\n def test_create_ca_certificate(self):\n csr_req = {\"cluster_uuid\": \"uuid\", \"csr\": \"csr file\"}\n self.scenario._create_ca_certificate(csr_req)\n self.clients(\"magnum\").certificates.create.assert_called_once_with(\n **csr_req)\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.create_ca_certificate\")\n\n @mock.patch(\"kubernetes.client.api_client.ApiClient\")\n @mock.patch(\"kubernetes.client.api.core_v1_api.CoreV1Api\")\n def test_get_k8s_api_client_using_tls(self, mock_core_v1_api,\n mock_api_client):\n\n if hasattr(kubernetes_client, \"ConfigurationObject\"):\n # it is k8s-client < 4.0.0\n m = mock.patch(\"kubernetes.client.ConfigurationObject\")\n else:\n m = mock.patch(\"kubernetes.client.Configuration\")\n\n mock_configuration_object = m.start()\n self.addCleanup(m.stop)\n\n self.context.update({\n \"ca_certs_directory\": \"/home/stack\",\n \"tenant\": {\n \"id\": \"rally_tenant_id\",\n \"cluster\": \"rally_cluster_uuid\"\n }\n })\n self.scenario = utils.MagnumScenario(self.context)\n cluster_uuid = self.context[\"tenant\"][\"cluster\"]\n client = self.clients(\"magnum\")\n client.clusters.get.return_value = self.cluster\n cluster = self.scenario._get_cluster(cluster_uuid)\n self.cluster_template.tls_disabled = False\n client.cluster_templates.get.return_value = self.cluster_template\n dir = self.context[\"ca_certs_directory\"]\n key_file = os.path.join(dir, cluster_uuid.__add__(\".key\"))\n cert_file = os.path.join(dir, cluster_uuid.__add__(\".crt\"))\n ca_certs = os.path.join(dir, cluster_uuid.__add__(\"_ca.crt\"))\n config = mock_configuration_object.return_value\n config.host = cluster.api_address\n config.ssl_ca_cert = ca_certs\n config.cert_file = cert_file\n config.key_file = key_file\n _api_client = mock_api_client.return_value\n self.scenario._get_k8s_api_client()\n mock_configuration_object.assert_called_once_with()\n if hasattr(kubernetes_client, \"ConfigurationObject\"):\n # k8s-python < 4.0.0\n mock_api_client.assert_called_once_with(config=config)\n else:\n mock_api_client.assert_called_once_with(config)\n\n mock_core_v1_api.assert_called_once_with(_api_client)\n\n @mock.patch(\"kubernetes.client.api_client.ApiClient\")\n @mock.patch(\"kubernetes.client.api.core_v1_api.CoreV1Api\")\n def test_get_k8s_api_client(self, mock_core_v1_api, mock_api_client):\n\n if hasattr(kubernetes_client, \"ConfigurationObject\"):\n # it is k8s-client < 4.0.0\n m = mock.patch(\"kubernetes.client.ConfigurationObject\")\n else:\n m = mock.patch(\"kubernetes.client.Configuration\")\n\n mock_configuration_object = m.start()\n self.addCleanup(m.stop)\n\n self.context.update({\n \"tenant\": {\n \"id\": \"rally_tenant_id\",\n \"cluster\": \"rally_cluster_uuid\"\n }\n })\n self.scenario = utils.MagnumScenario(self.context)\n cluster_uuid = self.context[\"tenant\"][\"cluster\"]\n client = self.clients(\"magnum\")\n client.clusters.get.return_value = self.cluster\n cluster = self.scenario._get_cluster(cluster_uuid)\n self.cluster_template.tls_disabled = True\n client.cluster_templates.get.return_value = self.cluster_template\n config = mock_configuration_object.return_value\n config.host = cluster.api_address\n config.ssl_ca_cert = None\n config.cert_file = None\n config.key_file = None\n _api_client = mock_api_client.return_value\n self.scenario._get_k8s_api_client()\n mock_configuration_object.assert_called_once_with()\n if hasattr(kubernetes_client, \"ConfigurationObject\"):\n # k8s-python < 4.0.0\n mock_api_client.assert_called_once_with(config=config)\n else:\n mock_api_client.assert_called_once_with(config)\n mock_core_v1_api.assert_called_once_with(_api_client)\n\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_list_v1pods(self, mock__get_k8s_api_client):\n k8s_api = mock__get_k8s_api_client.return_value\n self.scenario._list_v1pods()\n k8s_api.list_node.assert_called_once_with(\n namespace=\"default\")\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.k8s_list_v1pods\")\n\n @mock.patch(\"random.choice\")\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_create_v1pod(self, mock__get_k8s_api_client,\n mock_random_choice):\n k8s_api = mock__get_k8s_api_client.return_value\n manifest = (\n {\"apiVersion\": \"v1\", \"kind\": \"Pod\",\n \"metadata\": {\"name\": \"nginx\"}})\n podname = manifest[\"metadata\"][\"name\"] + \"-\"\n for i in range(5):\n podname = podname + mock_random_choice.return_value\n k8s_api.create_namespaced_pod = mock.MagicMock(\n side_effect=[ApiException(status=403), self.pod])\n not_ready_pod = kubernetes_client.models.V1Pod()\n not_ready_status = kubernetes_client.models.V1PodStatus()\n not_ready_status.phase = \"not_ready\"\n not_ready_pod.status = not_ready_status\n almost_ready_pod = kubernetes_client.models.V1Pod()\n almost_ready_status = kubernetes_client.models.V1PodStatus()\n almost_ready_status.phase = \"almost_ready\"\n almost_ready_pod.status = almost_ready_status\n ready_pod = kubernetes_client.models.V1Pod()\n ready_condition = kubernetes_client.models.V1PodCondition(\n status=\"True\", type=\"Ready\")\n ready_status = kubernetes_client.models.V1PodStatus()\n ready_status.phase = \"Running\"\n ready_status.conditions = [ready_condition]\n ready_pod_metadata = kubernetes_client.models.V1ObjectMeta()\n ready_pod_metadata.uid = \"123456789\"\n ready_pod_spec = kubernetes_client.models.V1PodSpec(\n node_name=\"host_abc\",\n containers=[]\n )\n ready_pod.status = ready_status\n ready_pod.metadata = ready_pod_metadata\n ready_pod.spec = ready_pod_spec\n k8s_api.read_namespaced_pod = mock.MagicMock(\n side_effect=[not_ready_pod, almost_ready_pod, ready_pod])\n self.scenario._create_v1pod(manifest)\n k8s_api.create_namespaced_pod.assert_called_with(\n body=manifest, namespace=\"default\")\n k8s_api.read_namespaced_pod.assert_called_with(\n name=podname, namespace=\"default\")\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.k8s_create_v1pod\")\n\n @mock.patch(\"time.time\")\n @mock.patch(\"random.choice\")\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_create_v1pod_timeout(self, mock__get_k8s_api_client,\n mock_random_choice, mock_time):\n k8s_api = mock__get_k8s_api_client.return_value\n manifest = (\n {\"apiVersion\": \"v1\", \"kind\": \"Pod\",\n \"metadata\": {\"name\": \"nginx\"}})\n k8s_api.create_namespaced_pod.return_value = self.pod\n mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801]\n not_ready_pod = kubernetes_client.models.V1Pod()\n not_ready_status = kubernetes_client.models.V1PodStatus()\n not_ready_status.phase = \"not_ready\"\n not_ready_pod_metadata = kubernetes_client.models.V1ObjectMeta()\n not_ready_pod_metadata.uid = \"123456789\"\n not_ready_pod.status = not_ready_status\n not_ready_pod.metadata = not_ready_pod_metadata\n k8s_api.read_namespaced_pod = mock.MagicMock(\n side_effect=[not_ready_pod\n for i in range(4)])\n\n self.assertRaises(\n exceptions.TimeoutException,\n self.scenario._create_v1pod, manifest)\n\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_list_v1rcs(self, mock__get_k8s_api_client):\n k8s_api = mock__get_k8s_api_client.return_value\n self.scenario._list_v1rcs()\n (k8s_api.list_namespaced_replication_controller\n .assert_called_once_with(namespace=\"default\"))\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.k8s_list_v1rcs\")\n\n @mock.patch(\"random.choice\")\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_create_v1rc(self, mock__get_k8s_api_client,\n mock_random_choice):\n k8s_api = mock__get_k8s_api_client.return_value\n manifest = (\n {\"apiVersion\": \"v1\",\n \"kind\": \"ReplicationController\",\n \"metadata\": {\"name\": \"nginx-controller\"},\n \"spec\": {\"replicas\": 2,\n \"selector\": {\"name\": \"nginx\"},\n \"template\": {\"metadata\":\n {\"labels\":\n {\"name\": \"nginx\"}}}}})\n suffix = \"-\"\n for i in range(5):\n suffix = suffix + mock_random_choice.return_value\n rcname = manifest[\"metadata\"][\"name\"] + suffix\n rc = kubernetes_client.models.V1ReplicationController()\n rc.spec = kubernetes_client.models.V1ReplicationControllerSpec()\n rc.spec.replicas = manifest[\"spec\"][\"replicas\"]\n k8s_api.create_namespaced_replication_controller.return_value = rc\n not_ready_rc = kubernetes_client.models.V1ReplicationController()\n not_ready_rc_status = (\n kubernetes_client.models.V1ReplicationControllerStatus(replicas=0))\n not_ready_rc.status = not_ready_rc_status\n ready_rc = kubernetes_client.models.V1ReplicationController()\n ready_rc_status = (\n kubernetes_client.models.V1ReplicationControllerStatus(\n replicas=manifest[\"spec\"][\"replicas\"])\n )\n ready_rc_metadata = kubernetes_client.models.V1ObjectMeta()\n ready_rc_metadata.uid = \"123456789\"\n ready_rc_metadata.name = rcname\n ready_rc.status = ready_rc_status\n ready_rc.metadata = ready_rc_metadata\n k8s_api.read_namespaced_replication_controller = mock.MagicMock(\n side_effect=[not_ready_rc, ready_rc])\n self.scenario._create_v1rc(manifest)\n (k8s_api.create_namespaced_replication_controller\n .assert_called_once_with(body=manifest, namespace=\"default\"))\n (k8s_api.read_namespaced_replication_controller\n .assert_called_with(name=rcname, namespace=\"default\"))\n self._test_atomic_action_timer(\n self.scenario.atomic_actions(), \"magnum.k8s_create_v1rc\")\n\n @mock.patch(\"time.time\")\n @mock.patch(\"random.choice\")\n @mock.patch(MAGNUM_UTILS + \".MagnumScenario._get_k8s_api_client\")\n def test_create_v1rc_timeout(self, mock__get_k8s_api_client,\n mock_random_choice, mock_time):\n k8s_api = mock__get_k8s_api_client.return_value\n manifest = (\n {\"apiVersion\": \"v1\",\n \"kind\": \"ReplicationController\",\n \"metadata\": {\"name\": \"nginx-controller\"},\n \"spec\": {\"replicas\": 2,\n \"selector\": {\"app\": \"nginx\"},\n \"template\": {\"metadata\":\n {\"labels\":\n {\"name\": \"nginx\"}}}}})\n rc = kubernetes_client.models.V1ReplicationController()\n rc.spec = kubernetes_client.models.V1ReplicationControllerSpec()\n rc.spec.replicas = manifest[\"spec\"][\"replicas\"]\n mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801]\n k8s_api.create_namespaced_replication_controller.return_value = rc\n not_ready_rc = kubernetes_client.models.V1ReplicationController()\n not_ready_rc_status = (\n kubernetes_client.models.V1ReplicationControllerStatus(replicas=0))\n not_ready_rc_metadata = kubernetes_client.models.V1ObjectMeta()\n not_ready_rc_metadata.uid = \"123456789\"\n not_ready_rc.status = not_ready_rc_status\n not_ready_rc.metadata = not_ready_rc_metadata\n k8s_api.read_namespaced_replication_controller = mock.MagicMock(\n side_effect=[not_ready_rc\n for i in range(4)])\n\n self.assertRaises(\n exceptions.TimeoutException,\n self.scenario._create_v1rc, manifest)\n" }, { "alpha_fraction": 0.5529100298881531, "alphanum_fraction": 0.5568783283233643, "avg_line_length": 39.64516067504883, "blob_id": "1d878b8e4f28144cf5898343dba603c13572001d", "content_id": "68fc0ad01a6082527aa5a65c9621e829a96db6c1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3780, "license_type": "permissive", "max_line_length": 77, "num_lines": 93, "path": "/rally_openstack/common/services/grafana/grafana.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport requests\n\nfrom rally.common import logging\nfrom rally.common import utils as commonutils\nfrom rally.task import atomic\nfrom rally.task import service\n\nLOG = logging.getLogger(__name__)\n\n\nclass GrafanaService(service.Service):\n\n def __init__(self, spec, name_generator=None, atomic_inst=None):\n \"\"\"Initialization of Grafana service.\n\n :param spec: param contains monitoring system info: IPs, ports, creds\n \"\"\"\n super(GrafanaService, self).__init__(None,\n name_generator=name_generator,\n atomic_inst=atomic_inst)\n self._spec = spec\n\n @atomic.action_timer(\"grafana.check_metric\")\n def check_metric(self, seed, sleep_time, retries_total):\n \"\"\"Check metric with seed name in Grafana datasource.\n\n :param seed: random metric name\n :param sleep_time: sleep time between checking metrics in seconds\n :param retries_total: total number of retries to check metric in\n Grafana\n :return: True if metric in Grafana datasource and False otherwise\n \"\"\"\n check_url = (\"http://%(vip)s:%(port)s/api/datasources/proxy/:\"\n \"%(datasource)s/api/v1/query?query=%(seed)s\" % {\n \"vip\": self._spec[\"monitor_vip\"],\n \"port\": self._spec[\"grafana\"][\"port\"],\n \"datasource\": self._spec[\"datasource_id\"],\n \"seed\": seed\n })\n i = 0\n LOG.info(\"Check metric %s in Grafana\" % seed)\n while i < retries_total:\n LOG.debug(\"Attempt number %s\" % (i + 1))\n resp = requests.get(check_url,\n auth=(self._spec[\"grafana\"][\"user\"],\n self._spec[\"grafana\"][\"password\"]))\n result = resp.json()\n LOG.debug(\"Grafana response code: %s\" % resp.status_code)\n no_result = (result.get(\"data\") is None\n or len(result[\"data\"][\"result\"]) < 1)\n if no_result and i + 1 >= retries_total:\n LOG.debug(\"No instance metrics found in Grafana\")\n return False\n elif no_result:\n i += 1\n commonutils.interruptable_sleep(sleep_time)\n else:\n LOG.debug(\"Metric instance found in Grafana\")\n return True\n\n @atomic.action_timer(\"grafana.push_metric\")\n def push_metric(self, seed):\n \"\"\"Push metric by GET request using pushgateway.\n\n :param seed: random name for metric to push\n \"\"\"\n push_url = \"http://%(ip)s:%(port)s/metrics/job/%(job)s\" % {\n \"ip\": self._spec[\"monitor_vip\"],\n \"port\": self._spec[\"pushgateway_port\"],\n \"job\": self._spec[\"job_name\"]\n }\n resp = requests.post(push_url,\n headers={\"Content-type\": \"text/xml\"},\n data=\"%s 12345\\n\" % seed)\n if resp.ok:\n LOG.info(\"Metric %s pushed\" % seed)\n else:\n LOG.error(\"Error during push metric %s\" % seed)\n return resp.ok\n" }, { "alpha_fraction": 0.6279977560043335, "alphanum_fraction": 0.6335750222206116, "avg_line_length": 31.600000381469727, "blob_id": "41a4f0dfb3599255fb7fd6ba8afa2945e521bd8f", "content_id": "aab872e1159d9268078caa71b088dfcdd8fba490", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1793, "license_type": "permissive", "max_line_length": 75, "num_lines": 55, "path": "/tests/unit/task/contexts/network/test_networking_agents.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2019 Ericsson Software Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.network import networking_agents\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.network\"\n\n\nclass NetworkingAgentsTestCase(test.TestCase):\n\n def setUp(self):\n super(NetworkingAgentsTestCase, self).setUp()\n\n self.config = {}\n self.context = test.get_test_context()\n self.context.update({\n \"users\": [\n {\"id\": 1,\n \"tenant_id\": \"tenant1\",\n \"credential\": mock.Mock()},\n ],\n \"admin\": {\n \"credential\": mock.Mock(),\n },\n \"config\": {\n \"networking_agents\": self.config,\n },\n })\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup(self, mock_clients):\n context = networking_agents.NetworkingAgents(self.context)\n context.setup()\n mock_clients.assert_has_calls([\n mock.call().neutron().list_agents(),\n ])\n\n def test_cleanup(self):\n # NOTE(stpierre): Test that cleanup is not abstract\n networking_agents.NetworkingAgents(\n {\"task\": mock.MagicMock()}).cleanup()\n" }, { "alpha_fraction": 0.5698550939559937, "alphanum_fraction": 0.5744927525520325, "avg_line_length": 34.20408248901367, "blob_id": "7e28e4a4057502f9a21008947366ed587ab58fc1", "content_id": "9e002dc6c8fabd49307a7534f71815b408d67292", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3450, "license_type": "permissive", "max_line_length": 78, "num_lines": 98, "path": "/tests/unit/task/contexts/heat/test_stacks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.heat import stacks\nfrom rally_openstack.task.scenarios.heat import utils as heat_utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts\"\nSCN = \"rally_openstack.task.scenarios\"\n\n\nclass TestStackGenerator(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = dict(name=str(id_))\n return tenants\n\n def test_init(self):\n self.context.update({\n \"config\": {\n \"stacks\": {\n \"stacks_per_tenant\": 1,\n \"resources_per_stack\": 1\n }\n }\n })\n\n inst = stacks.StackGenerator(self.context)\n self.assertEqual(inst.config, self.context[\"config\"][\"stacks\"])\n\n @mock.patch(\"%s.heat.utils.HeatScenario._create_stack\" % SCN,\n return_value=fakes.FakeStack(id=\"uuid\"))\n def test_setup(self, mock_heat_scenario__create_stack):\n tenants_count = 2\n users_per_tenant = 5\n stacks_per_tenant = 1\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants_count,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"stacks\": {\n \"stacks_per_tenant\": stacks_per_tenant,\n \"resources_per_stack\": 1\n }\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n stack_ctx = stacks.StackGenerator(self.context)\n stack_ctx.setup()\n self.assertEqual(tenants_count * stacks_per_tenant,\n mock_heat_scenario__create_stack.call_count)\n # check that stack ids have been saved in context\n for ten_id in self.context[\"tenants\"].keys():\n self.assertEqual(stacks_per_tenant,\n len(self.context[\"tenants\"][ten_id][\"stacks\"]))\n\n @mock.patch(\"%s.heat.stacks.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n self.context.update({\n \"users\": mock.MagicMock()\n })\n stack_ctx = stacks.StackGenerator(self.context)\n stack_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"heat.stacks\"],\n users=self.context[\"users\"],\n superclass=heat_utils.HeatScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.5566391348838806, "alphanum_fraction": 0.5643286108970642, "avg_line_length": 39.39393997192383, "blob_id": "93453c1a4b481559d4c63e2d4929b23e69b908eb", "content_id": "67d89a1e0cd7560100ebe9e27f91199ad5f8112f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15996, "license_type": "permissive", "max_line_length": 79, "num_lines": 396, "path": "/tests/unit/task/cleanup/test_manager.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import utils\n\nfrom rally_openstack.task.cleanup import base\nfrom rally_openstack.task.cleanup import manager\nfrom tests.unit import test\n\n\nBASE = \"rally_openstack.task.cleanup.manager\"\n\n\nclass SeekAndDestroyTestCase(test.TestCase):\n\n def setUp(self):\n super(SeekAndDestroyTestCase, self).setUp()\n # clear out the client cache\n manager.SeekAndDestroy.cache = {}\n\n def test__get_cached_client(self):\n destroyer = manager.SeekAndDestroy(None, None, None)\n cred = mock.Mock()\n user = {\"credential\": cred}\n\n clients = destroyer._get_cached_client(user)\n self.assertIs(cred.clients.return_value, clients)\n cred.clients.assert_called_once_with()\n\n self.assertIsNone(destroyer._get_cached_client(None))\n\n @mock.patch(\"%s.LOG\" % BASE)\n def test__delete_single_resource(self, mock_log):\n mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,\n _interval=0.01)\n mock_resource.delete.side_effect = [Exception, Exception, True]\n mock_resource.is_deleted.side_effect = [False, False, True]\n\n manager.SeekAndDestroy(None, None, None)._delete_single_resource(\n mock_resource)\n\n mock_resource.delete.assert_has_calls([mock.call()] * 3)\n self.assertEqual(3, mock_resource.delete.call_count)\n mock_resource.is_deleted.assert_has_calls([mock.call()] * 3)\n self.assertEqual(3, mock_resource.is_deleted.call_count)\n\n # NOTE(boris-42): No logs and no exceptions means no bugs!\n self.assertEqual(0, mock_log.call_count)\n\n @mock.patch(\"%s.LOG\" % BASE)\n def test__delete_single_resource_timeout(self, mock_log):\n\n mock_resource = mock.MagicMock(_max_attempts=1, _timeout=0.02,\n _interval=0.025)\n\n mock_resource.delete.return_value = True\n mock_resource.is_deleted.side_effect = [False, False, True]\n\n manager.SeekAndDestroy(None, None, None)._delete_single_resource(\n mock_resource)\n\n mock_resource.delete.assert_called_once_with()\n mock_resource.is_deleted.assert_called_once_with()\n\n self.assertEqual(1, mock_log.warning.call_count)\n\n @mock.patch(\"%s.LOG\" % BASE)\n def test__delete_single_resource_exception_in_is_deleted(self, mock_log):\n mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,\n _interval=0)\n mock_resource.delete.return_value = True\n mock_resource.is_deleted.side_effect = [Exception] * 4\n manager.SeekAndDestroy(None, None, None)._delete_single_resource(\n mock_resource)\n\n mock_resource.delete.assert_called_once_with()\n self.assertEqual(4, mock_resource.is_deleted.call_count)\n\n self.assertEqual(1, mock_log.warning.call_count)\n self.assertEqual(4, mock_log.exception.call_count)\n\n def _manager(self, list_side_effect, **kw):\n mock_mgr = mock.MagicMock()\n mock_mgr().list.side_effect = list_side_effect\n mock_mgr.reset_mock()\n\n for k, v in kw.items():\n setattr(mock_mgr, k, v)\n\n return mock_mgr\n\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n def test__publisher_admin(self, mock__get_cached_client):\n mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],\n _perform_for_admin_only=False)\n admin = mock.MagicMock()\n publish = manager.SeekAndDestroy(mock_mgr, admin, None)._publisher\n\n queue = []\n publish(queue)\n mock__get_cached_client.assert_called_once_with(admin)\n mock_mgr.assert_called_once_with(\n admin=mock__get_cached_client.return_value)\n self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])\n\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n def test__publisher_admin_only(self, mock__get_cached_client):\n mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],\n _perform_for_admin_only=True)\n admin = mock.MagicMock()\n publish = manager.SeekAndDestroy(\n mock_mgr, admin, [\"u1\", \"u2\"])._publisher\n\n queue = []\n publish(queue)\n mock__get_cached_client.assert_called_once_with(admin)\n mock_mgr.assert_called_once_with(\n admin=mock__get_cached_client.return_value)\n self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])\n\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n def test__publisher_user_resource(self, mock__get_cached_client):\n mock_mgr = self._manager([Exception, Exception, [1, 2, 3],\n Exception, Exception, [4, 5]],\n _perform_for_admin_only=False,\n _tenant_resource=True)\n\n admin = mock.MagicMock()\n users = [{\"tenant_id\": 1, \"id\": 1}, {\"tenant_id\": 2, \"id\": 2}]\n publish = manager.SeekAndDestroy(mock_mgr, admin, users)._publisher\n\n queue = []\n publish(queue)\n\n mock_client = mock__get_cached_client.return_value\n mock_mgr.assert_has_calls([\n mock.call(admin=mock_client, user=mock_client,\n tenant_uuid=users[0][\"tenant_id\"]),\n mock.call().list(),\n mock.call().list(),\n mock.call().list(),\n mock.call(admin=mock_client, user=mock_client,\n tenant_uuid=users[1][\"tenant_id\"]),\n mock.call().list(),\n mock.call().list()\n ])\n mock__get_cached_client.assert_has_calls([\n mock.call(admin),\n mock.call(users[0]),\n mock.call(users[1])\n ])\n expected_queue = [(admin, users[0], x) for x in range(1, 4)]\n expected_queue += [(admin, users[1], x) for x in range(4, 6)]\n self.assertEqual(expected_queue, queue)\n\n @mock.patch(\"%s.LOG\" % BASE)\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n def test__gen_publisher_tenant_resource(self, mock__get_cached_client,\n mock_log):\n mock_mgr = self._manager([Exception, [1, 2, 3],\n Exception, Exception, Exception,\n [\"this shouldn't be in results\"]],\n _perform_for_admin_only=False,\n _tenant_resource=True)\n users = [{\"tenant_id\": 1, \"id\": 1},\n {\"tenant_id\": 1, \"id\": 2},\n {\"tenant_id\": 2, \"id\": 3}]\n\n publish = manager.SeekAndDestroy(\n mock_mgr, None, users)._publisher\n\n queue = []\n publish(queue)\n\n mock_client = mock__get_cached_client.return_value\n mock_mgr.assert_has_calls([\n mock.call(admin=mock_client, user=mock_client,\n tenant_uuid=users[0][\"tenant_id\"]),\n mock.call().list(),\n mock.call().list(),\n mock.call(admin=mock_client, user=mock_client,\n tenant_uuid=users[2][\"tenant_id\"]),\n mock.call().list(),\n mock.call().list(),\n mock.call().list()\n ])\n mock__get_cached_client.assert_has_calls([\n mock.call(None),\n mock.call(users[0]),\n mock.call(users[2])\n ])\n self.assertEqual(queue, [(None, users[0], x) for x in range(1, 4)])\n self.assertTrue(mock_log.warning.mock_called)\n self.assertTrue(mock_log.exception.mock_called)\n\n @mock.patch(\"rally.common.utils.name_matches_object\")\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n @mock.patch(\"%s.SeekAndDestroy._delete_single_resource\" % BASE)\n def test__consumer(self, mock__delete_single_resource,\n mock__get_cached_client,\n mock_name_matches_object):\n mock_mgr = mock.MagicMock(__name__=\"Test\")\n resource_classes = [mock.Mock()]\n task_id = \"task_id\"\n mock_name_matches_object.return_value = True\n\n consumer = manager.SeekAndDestroy(\n mock_mgr, None, None,\n resource_classes=resource_classes,\n task_id=task_id)._consumer\n\n admin = mock.MagicMock()\n user1 = {\"id\": \"a\", \"tenant_id\": \"uuid1\"}\n cache = {}\n\n consumer(cache, (admin, user1, \"res\"))\n mock_mgr.assert_called_once_with(\n resource=\"res\",\n admin=mock__get_cached_client.return_value,\n user=mock__get_cached_client.return_value,\n tenant_uuid=user1[\"tenant_id\"])\n mock__get_cached_client.assert_has_calls([\n mock.call(admin),\n mock.call(user1)\n ])\n mock__delete_single_resource.assert_called_once_with(\n mock_mgr.return_value)\n\n mock_mgr.reset_mock()\n mock__get_cached_client.reset_mock()\n mock__delete_single_resource.reset_mock()\n mock_name_matches_object.reset_mock()\n\n consumer(cache, (admin, None, \"res2\"))\n mock_mgr.assert_called_once_with(\n resource=\"res2\",\n admin=mock__get_cached_client.return_value,\n user=mock__get_cached_client.return_value,\n tenant_uuid=None)\n\n mock__get_cached_client.assert_has_calls([\n mock.call(admin),\n mock.call(None)\n ])\n mock__delete_single_resource.assert_called_once_with(\n mock_mgr.return_value)\n\n @mock.patch(\"rally.common.utils.name_matches_object\")\n @mock.patch(\"%s.SeekAndDestroy._get_cached_client\" % BASE)\n @mock.patch(\"%s.SeekAndDestroy._delete_single_resource\" % BASE)\n def test__consumer_with_noname_resource(self, mock__delete_single_resource,\n mock__get_cached_client,\n mock_name_matches_object):\n mock_mgr = mock.MagicMock(__name__=\"Test\")\n mock_mgr.return_value.name.return_value = True\n task_id = \"task_id\"\n mock_name_matches_object.return_value = False\n\n consumer = manager.SeekAndDestroy(mock_mgr, None, None,\n task_id=task_id)._consumer\n\n consumer(None, (None, None, \"res\"))\n self.assertFalse(mock__delete_single_resource.called)\n\n mock_mgr.return_value.name.return_value = base.NoName(\"foo\")\n consumer(None, (None, None, \"res\"))\n mock__delete_single_resource.assert_called_once_with(\n mock_mgr.return_value)\n\n @mock.patch(\"%s.broker.run\" % BASE)\n def test_exterminate(self, mock_broker_run):\n manager_cls = mock.MagicMock(_threads=5)\n cleaner = manager.SeekAndDestroy(manager_cls, None, None)\n cleaner._publisher = mock.Mock()\n cleaner._consumer = mock.Mock()\n cleaner.exterminate()\n\n mock_broker_run.assert_called_once_with(cleaner._publisher,\n cleaner._consumer,\n consumers_count=5)\n\n\nclass ResourceManagerTestCase(test.TestCase):\n\n def _get_res_mock(self, **kw):\n _mock = mock.MagicMock()\n for k, v in kw.items():\n setattr(_mock, k, v)\n return _mock\n\n def _list_res_names_helper(self, names, admin_required, mock_iter):\n self.assertEqual(set(names),\n manager.list_resource_names(admin_required))\n mock_iter.assert_called_once_with(base.ResourceManager)\n mock_iter.reset_mock()\n\n @mock.patch(\"%s.discover.itersubclasses\" % BASE)\n def test_list_resource_names(self, mock_itersubclasses):\n mock_itersubclasses.return_value = [\n self._get_res_mock(_service=\"fake\", _resource=\"1\",\n _admin_required=True),\n self._get_res_mock(_service=\"fake\", _resource=\"2\",\n _admin_required=False),\n self._get_res_mock(_service=\"other\", _resource=\"2\",\n _admin_required=False)\n ]\n\n self._list_res_names_helper(\n [\"fake\", \"other\", \"fake.1\", \"fake.2\", \"other.2\"],\n None, mock_itersubclasses)\n self._list_res_names_helper(\n [\"fake\", \"fake.1\"],\n True, mock_itersubclasses)\n self._list_res_names_helper(\n [\"fake\", \"other\", \"fake.2\", \"other.2\"],\n False, mock_itersubclasses)\n\n @mock.patch(\"%s.discover.itersubclasses\" % BASE)\n def test_find_resource_managers(self, mock_itersubclasses):\n mock_itersubclasses.return_value = [\n self._get_res_mock(_service=\"fake\", _resource=\"1\", _order=1,\n _admin_required=True),\n self._get_res_mock(_service=\"fake\", _resource=\"2\", _order=3,\n _admin_required=False),\n self._get_res_mock(_service=\"other\", _resource=\"2\", _order=2,\n _admin_required=False)\n ]\n\n self.assertEqual(mock_itersubclasses.return_value[0:2],\n manager.find_resource_managers(names=[\"fake\"]))\n\n self.assertEqual(mock_itersubclasses.return_value[0:1],\n manager.find_resource_managers(names=[\"fake.1\"]))\n\n self.assertEqual(\n [mock_itersubclasses.return_value[0],\n mock_itersubclasses.return_value[2],\n mock_itersubclasses.return_value[1]],\n manager.find_resource_managers(names=[\"fake\", \"other\"]))\n\n self.assertEqual(mock_itersubclasses.return_value[0:1],\n manager.find_resource_managers(names=[\"fake\"],\n admin_required=True))\n self.assertEqual(mock_itersubclasses.return_value[1:2],\n manager.find_resource_managers(names=[\"fake\"],\n admin_required=False))\n\n @mock.patch(\"rally.common.plugin.discover.itersubclasses\")\n @mock.patch(\"%s.SeekAndDestroy\" % BASE)\n @mock.patch(\"%s.find_resource_managers\" % BASE,\n return_value=[mock.MagicMock(), mock.MagicMock()])\n def test_cleanup(self, mock_find_resource_managers, mock_seek_and_destroy,\n mock_itersubclasses):\n class A(utils.RandomNameGeneratorMixin):\n pass\n\n class B(object):\n pass\n\n mock_itersubclasses.return_value = [A, B]\n\n manager.cleanup(names=[\"a\", \"b\"], admin_required=True,\n admin=\"admin\", users=[\"user\"],\n superclass=A,\n task_id=\"task_id\")\n\n mock_find_resource_managers.assert_called_once_with([\"a\", \"b\"], True)\n\n mock_seek_and_destroy.assert_has_calls([\n mock.call(mock_find_resource_managers.return_value[0],\n \"admin\",\n [\"user\"],\n resource_classes=[A],\n task_id=\"task_id\"),\n mock.call().exterminate(),\n mock.call(mock_find_resource_managers.return_value[1],\n \"admin\",\n [\"user\"],\n resource_classes=[A],\n task_id=\"task_id\"),\n mock.call().exterminate()\n ])\n" }, { "alpha_fraction": 0.644995391368866, "alphanum_fraction": 0.6457300186157227, "avg_line_length": 39.63432693481445, "blob_id": "dd4c5204449d366bb637d0ddff443ef8653a7c01", "content_id": "4e43345da1eeceb776642879ed4ae3970ea82aef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5445, "license_type": "permissive", "max_line_length": 79, "num_lines": 134, "path": "/rally_openstack/task/scenarios/cinder/qos_specs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\n\n\n\"\"\"Scenarios for Cinder QoS.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderQos.create_and_list_qos\", platform=\"openstack\")\nclass CreateAndListQos(cinder_utils.CinderBasic):\n def run(self, consumer, write_iops_sec, read_iops_sec):\n \"\"\"Create a qos, then list all qos.\n\n :param consumer: Consumer behavior\n :param write_iops_sec: random write limitation\n :param read_iops_sec: random read limitation\n \"\"\"\n specs = {\n \"consumer\": consumer,\n \"write_iops_sec\": write_iops_sec,\n \"read_iops_sec\": read_iops_sec\n }\n\n qos = self.admin_cinder.create_qos(specs)\n\n pool_list = self.admin_cinder.list_qos()\n msg = (\"Qos not included into list of available qos\\n\"\n \"created qos:{}\\n\"\n \"Pool of qos:{}\").format(qos, pool_list)\n self.assertIn(qos, pool_list, err_msg=msg)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderQos.create_and_get_qos\", platform=\"openstack\")\nclass CreateAndGetQos(cinder_utils.CinderBasic):\n def run(self, consumer, write_iops_sec, read_iops_sec):\n \"\"\"Create a qos, then get details of the qos.\n\n :param consumer: Consumer behavior\n :param write_iops_sec: random write limitation\n :param read_iops_sec: random read limitation\n \"\"\"\n specs = {\n \"consumer\": consumer,\n \"write_iops_sec\": write_iops_sec,\n \"read_iops_sec\": read_iops_sec\n }\n\n qos = self.admin_cinder.create_qos(specs)\n self.admin_cinder.get_qos(qos.id)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderQos.create_and_set_qos\", platform=\"openstack\")\nclass CreateAndSetQos(cinder_utils.CinderBasic):\n def run(self, consumer, write_iops_sec, read_iops_sec,\n set_consumer, set_write_iops_sec, set_read_iops_sec):\n \"\"\"Create a qos, then Add/Update keys in qos specs.\n\n :param consumer: Consumer behavior\n :param write_iops_sec: random write limitation\n :param read_iops_sec: random read limitation\n :param set_consumer: update Consumer behavior\n :param set_write_iops_sec: update random write limitation\n :param set_read_iops_sec: update random read limitation\n \"\"\"\n create_specs = {\n \"consumer\": consumer,\n \"write_iops_sec\": write_iops_sec,\n \"read_iops_sec\": read_iops_sec\n }\n set_specs = {\n \"consumer\": set_consumer,\n \"write_iops_sec\": set_write_iops_sec,\n \"read_iops_sec\": set_read_iops_sec\n }\n\n qos = self.admin_cinder.create_qos(create_specs)\n self.admin_cinder.set_qos(qos=qos, set_specs_args=set_specs)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\"required_contexts\", contexts=(\"volume_types\"))\[email protected](\n context={\"admin_cleanup@openstack\": [\"cinder\"]},\n name=\"CinderQos.create_qos_associate_and_disassociate_type\",\n platform=\"openstack\")\nclass CreateQosAssociateAndDisassociateType(cinder_utils.CinderBasic):\n def run(self, consumer, write_iops_sec, read_iops_sec):\n \"\"\"Create a qos, Associate and Disassociate the qos from volume type.\n\n :param consumer: Consumer behavior\n :param write_iops_sec: random write limitation\n :param read_iops_sec: random read limitation\n \"\"\"\n specs = {\n \"consumer\": consumer,\n \"write_iops_sec\": write_iops_sec,\n \"read_iops_sec\": read_iops_sec\n }\n\n qos = self.admin_cinder.create_qos(specs)\n\n vt_idx = self.context[\"iteration\"] % len(self.context[\"volume_types\"])\n volume_type = self.context[\"volume_types\"][vt_idx]\n\n self.admin_cinder.qos_associate_type(qos_specs=qos,\n volume_type=volume_type[\"id\"])\n\n self.admin_cinder.qos_disassociate_type(qos_specs=qos,\n volume_type=volume_type[\"id\"])\n" }, { "alpha_fraction": 0.5963993668556213, "alphanum_fraction": 0.599345326423645, "avg_line_length": 36.71604919433594, "blob_id": "7c2404b697772edb7c143ecaed5c22787c5ab24d", "content_id": "e4c1c1a63ff8123323f245c68bd92be13825f8ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3055, "license_type": "permissive", "max_line_length": 75, "num_lines": 81, "path": "/rally_openstack/task/contexts/magnum/clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.magnum import utils as magnum_utils\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"clusters\", platform=\"openstack\", order=480)\nclass ClusterGenerator(context.OpenStackContext):\n \"\"\"Creates specified amount of Magnum clusters.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"cluster_template_uuid\": {\n \"type\": \"string\"\n },\n \"node_count\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n },\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\"node_count\": 1}\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants():\n\n nova_scenario = nova_utils.NovaScenario({\n \"user\": user,\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"],\n \"config\": {\"api_versions\": self.context[\"config\"].get(\n \"api_versions\", [])}\n })\n keypair = nova_scenario._create_keypair()\n\n magnum_scenario = magnum_utils.MagnumScenario({\n \"user\": user,\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"],\n \"config\": {\"api_versions\": self.context[\"config\"].get(\n \"api_versions\", [])}\n })\n\n # create a cluster\n ct_uuid = self.config.get(\"cluster_template_uuid\", None)\n if ct_uuid is None:\n ctx = self.context[\"tenants\"][tenant_id]\n ct_uuid = ctx.get(\"cluster_template\")\n cluster = magnum_scenario._create_cluster(\n cluster_template=ct_uuid,\n node_count=self.config.get(\"node_count\"), keypair=keypair)\n self.context[\"tenants\"][tenant_id][\"cluster\"] = cluster.uuid\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"magnum.clusters\", \"nova.keypairs\"],\n users=self.context.get(\"users\", []),\n superclass=magnum_utils.MagnumScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5786880254745483, "alphanum_fraction": 0.5820757746696472, "avg_line_length": 39.08641815185547, "blob_id": "c7b70442e0f6df2943e7ddd4659b9ea22f5dae4a", "content_id": "2a6674932671c1904d8ae3441e1be2e31ca37810", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3247, "license_type": "permissive", "max_line_length": 75, "num_lines": 81, "path": "/rally_openstack/task/contexts/designate/zones.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.designate import utils\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"zones\", platform=\"openstack\", order=600)\nclass ZoneGenerator(context.OpenStackContext):\n \"\"\"Context to add `zones_per_tenant` zones for each tenant.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"zones_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"set_zone_in_network\": {\n \"type\": \"boolean\",\n \"description\": \"Update network with created DNS zone.\"\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"zones_per_tenant\": 1,\n \"set_zone_in_network\": False\n }\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants(\n self.context[\"users\"]):\n self.context[\"tenants\"][tenant_id].setdefault(\"zones\", [])\n designate_util = utils.DesignateScenario(\n {\"user\": user,\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]})\n for i in range(self.config[\"zones_per_tenant\"]):\n zone = designate_util._create_zone()\n self.context[\"tenants\"][tenant_id][\"zones\"].append(zone)\n if self.config[\"set_zone_in_network\"]:\n for user, tenant_id in self._iterate_per_tenants(\n self.context[\"users\"]):\n tenant = self.context[\"tenants\"][tenant_id]\n\n network_update_args = {\n \"dns_domain\": tenant[\"zones\"][0][\"name\"]\n }\n body = {\"network\": network_update_args}\n scenario = neutron_utils.NeutronScenario(\n context={\"user\": user, \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]}\n )\n scenario.clients(\"neutron\").update_network(\n tenant[\"networks\"][0][\"id\"], body)\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"designate.zones\"],\n users=self.context.get(\"users\", []),\n superclass=utils.DesignateScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5546663403511047, "alphanum_fraction": 0.5572519302368164, "avg_line_length": 34.467247009277344, "blob_id": "f952f116535741743e21539b5b1905788f44ca4c", "content_id": "169e5df99cf44b14ec261d0768cca54f7a3c127e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8122, "license_type": "permissive", "max_line_length": 76, "num_lines": 229, "path": "/rally_openstack/task/contexts/vm/custom_image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom rally.common import broker\nfrom rally.common import logging\nfrom rally.common import utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.vm import vmtasks\nfrom rally_openstack.task import types\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass BaseCustomImageGenerator(context.OpenStackContext,\n metaclass=abc.ABCMeta):\n \"\"\"Base plugin for the contexts providing customized image with.\n\n Every context plugin for the specific customization must implement\n the method `_customize_image` that is able to connect to the server\n using SSH and install applications inside it.\n\n This base context plugin provides a way to prepare an image with\n custom preinstalled applications. Basically, this code boots a VM, calls\n the `_customize_image` and then snapshots the VM disk, removing the VM\n afterwards. The image UUID is stored in the user[\"custom_image\"][\"id\"]\n and can be used afterwards by scenario.\n \"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"image\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n },\n \"flavor\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n },\n \"username\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"floating_network\": {\n \"type\": \"string\"\n },\n \"internal_network\": {\n \"type\": \"string\"\n },\n \"port\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n \"maximum\": 65535\n },\n \"userdata\": {\n \"type\": \"string\"\n },\n \"workers\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n }\n },\n \"required\": [\"image\", \"flavor\"],\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"username\": \"root\",\n \"port\": 22,\n \"workers\": 1\n }\n\n def setup(self):\n \"\"\"Creates custom image(s) with preinstalled applications.\n\n When admin is present creates one public image that is usable\n from all the tenants and users. Otherwise create one image\n per user and tenant.\n \"\"\"\n\n if \"admin\" in self.context:\n if self.context[\"users\"]:\n # NOTE(pboldin): Create by first user and make it public by\n # the admin\n user = self.context[\"users\"][0]\n else:\n user = self.context[\"admin\"]\n tenant = self.context[\"tenants\"][user[\"tenant_id\"]]\n\n nics = None\n if \"networks\" in tenant:\n nics = [{\"net-id\": tenant[\"networks\"][0][\"id\"]}]\n\n custom_image = self.create_one_image(user, nics=nics)\n glance_service = image.Image(\n self.context[\"admin\"][\"credential\"].clients())\n glance_service.set_visibility(custom_image.id)\n\n for tenant in self.context[\"tenants\"].values():\n tenant[\"custom_image\"] = custom_image\n else:\n def publish(queue):\n for user, tenant_id in self._iterate_per_tenants():\n queue.append((user, tenant_id))\n\n def consume(cache, args):\n user, tenant_id = args\n tenant = self.context[\"tenants\"][tenant_id]\n tenant[\"custom_image\"] = self.create_one_image(user)\n\n broker.run(publish, consume, self.config[\"workers\"])\n\n def create_one_image(self, user, **kwargs):\n \"\"\"Create one image for the user.\"\"\"\n\n clients = osclients.Clients(user[\"credential\"])\n\n image_id = types.GlanceImage(self.context).pre_process(\n resource_spec=self.config[\"image\"], config={})\n flavor_id = types.Flavor(self.context).pre_process(\n resource_spec=self.config[\"flavor\"], config={})\n\n vm_scenario = vmtasks.BootRuncommandDelete(self.context,\n clients=clients)\n\n server, fip = vm_scenario._boot_server_with_fip(\n image=image_id, flavor=flavor_id,\n floating_network=self.config.get(\"floating_network\"),\n userdata=self.config.get(\"userdata\"),\n key_name=user[\"keypair\"][\"name\"],\n security_groups=[user[\"secgroup\"][\"name\"]],\n **kwargs)\n\n try:\n LOG.debug(\"Installing tools on %r %s\" % (server, fip[\"ip\"]))\n self.customize_image(server, fip, user)\n\n LOG.debug(\"Stopping server %r\" % server)\n vm_scenario._stop_server(server)\n\n LOG.debug(\"Creating snapshot for %r\" % server)\n custom_image = vm_scenario._create_image(server)\n finally:\n vm_scenario._delete_server_with_fip(server, fip)\n\n return custom_image\n\n def cleanup(self):\n \"\"\"Delete created custom image(s).\"\"\"\n\n if \"admin\" in self.context:\n user = self.context[\"users\"][0]\n tenant = self.context[\"tenants\"][user[\"tenant_id\"]]\n if \"custom_image\" in tenant:\n self.delete_one_image(user, tenant[\"custom_image\"])\n tenant.pop(\"custom_image\")\n else:\n def publish(queue):\n users = self.context.get(\"users\", [])\n for user, tenant_id in utils.iterate_per_tenants(users):\n queue.append((user, tenant_id))\n\n def consume(cache, args):\n user, tenant_id = args\n tenant = self.context[\"tenants\"][tenant_id]\n if \"custom_image\" in tenant:\n self.delete_one_image(user, tenant[\"custom_image\"])\n tenant.pop(\"custom_image\")\n\n broker.run(publish, consume, self.config[\"workers\"])\n\n def delete_one_image(self, user, custom_image):\n \"\"\"Delete the image created for the user and tenant.\"\"\"\n\n with logging.ExceptionLogger(\n LOG, \"Unable to delete image %s\" % custom_image.id):\n\n glance_service = image.Image(user[\"credential\"].clients())\n glance_service.delete_image(custom_image.id)\n\n @logging.log_task_wrapper(LOG.info, \"Custom image context: customizing\")\n def customize_image(self, server, ip, user):\n return self._customize_image(server, ip, user)\n\n @abc.abstractmethod\n def _customize_image(self, server, ip, user):\n \"\"\"Override this method with one that customizes image.\n\n Basically, code can simply call `VMScenario._run_command` function\n specifying an installation script and interpreter. This script will\n be then executed using SSH.\n\n :param server: nova.Server instance\n :param ip: dict with server IP details\n :param user: user who started a VM instance. Used to extract keypair\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.6541764140129089, "alphanum_fraction": 0.6627634763717651, "avg_line_length": 36.67647171020508, "blob_id": "a50b5c33485d3fb595d2604bce0f945f80c7614e", "content_id": "ff370fe0cfa89c0003f07925d90e4ae2c8e5d28e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2562, "license_type": "permissive", "max_line_length": 78, "num_lines": 68, "path": "/rally_openstack/task/scenarios/zaqar/basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (c) 2014 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import logging\n\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.zaqar import utils as zutils\n\n\n\"\"\"Scenarios for Zaqar.\"\"\"\n\n\[email protected](context={\"cleanup@openstack\": [\"zaqar\"]},\n name=\"ZaqarBasic.create_queue\", platform=\"openstack\")\nclass CreateQueue(zutils.ZaqarScenario):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to create_queue is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=None, **kwargs):\n \"\"\"Create a Zaqar queue with a random name.\n\n :param kwargs: other optional parameters to create queues like\n \"metadata\"\n \"\"\"\n self._queue_create(**kwargs)\n\n\[email protected](context={\"cleanup@openstack\": [\"zaqar\"]},\n name=\"ZaqarBasic.producer_consumer\", platform=\"openstack\")\nclass ProducerConsumer(zutils.ZaqarScenario):\n\n @logging.log_deprecated_args(\n \"The 'name_length' argument to producer_consumer is ignored\",\n \"0.1.2\", [\"name_length\"], once=True)\n def run(self, name_length=None,\n min_msg_count=50, max_msg_count=200, **kwargs):\n \"\"\"Serial message producer/consumer.\n\n Creates a Zaqar queue with random name, sends a set of messages\n and then retrieves an iterator containing those.\n\n :param min_msg_count: min number of messages to be posted\n :param max_msg_count: max number of messages to be posted\n :param kwargs: other optional parameters to create queues like\n \"metadata\"\n \"\"\"\n\n queue = self._queue_create(**kwargs)\n msg_count = random.randint(min_msg_count, max_msg_count)\n messages = [{\"body\": {\"id\": idx}, \"ttl\": 360} for idx\n in range(msg_count)]\n self._messages_post(queue, messages, min_msg_count, max_msg_count)\n self._messages_list(queue)\n self._queue_delete(queue)\n" }, { "alpha_fraction": 0.6618133783340454, "alphanum_fraction": 0.6635782122612, "avg_line_length": 37.092437744140625, "blob_id": "3d28b7b5e4bd9563f16ac329ef7625e69f408506", "content_id": "94885107eacab5efc2864bb056fae48cf646c7f8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4533, "license_type": "permissive", "max_line_length": 78, "num_lines": 119, "path": "/rally_openstack/task/scenarios/nova/keypairs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils\n\n\n\"\"\"Scenarios for Nova keypairs.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaKeypair.create_and_list_keypairs\",\n platform=\"openstack\")\nclass CreateAndListKeypairs(utils.NovaScenario):\n\n def run(self, **kwargs):\n \"\"\"Create a keypair with random name and list keypairs.\n\n This scenario creates a keypair and then lists all keypairs.\n\n :param kwargs: Optional additional arguments for keypair creation\n \"\"\"\n\n keypair_name = self._create_keypair(**kwargs)\n self.assertTrue(keypair_name, \"Keypair isn't created\")\n\n list_keypairs = self._list_keypairs()\n self.assertIn(keypair_name, [i.id for i in list_keypairs])\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaKeypair.create_and_delete_keypair\",\n platform=\"openstack\")\nclass CreateAndDeleteKeypair(utils.NovaScenario):\n\n def run(self, **kwargs):\n \"\"\"Create a keypair with random name and delete keypair.\n\n This scenario creates a keypair and then delete that keypair.\n\n :param kwargs: Optional additional arguments for keypair creation\n \"\"\"\n\n keypair = self._create_keypair(**kwargs)\n self._delete_keypair(keypair)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaKeypair.boot_and_delete_server_with_keypair\",\n platform=\"openstack\")\nclass BootAndDeleteServerWithKeypair(utils.NovaScenario):\n\n def run(self, image, flavor, boot_server_kwargs=None, **kwargs):\n \"\"\"Boot and delete server with keypair.\n\n Plan of this scenario:\n\n - create a keypair\n - boot a VM with created keypair\n - delete server\n - delete keypair\n\n :param image: ID of the image to be used for server creation\n :param flavor: ID of the flavor to be used for server creation\n :param boot_server_kwargs: Optional additional arguments for VM\n creation\n :param kwargs: Optional additional arguments for keypair creation\n \"\"\"\n\n keypair = self._create_keypair(**kwargs)\n server = self._boot_server(image, flavor,\n key_name=keypair,\n **(boot_server_kwargs) or {})\n self._delete_server(server)\n self._delete_keypair(keypair)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaKeypair.create_and_get_keypair\",\n platform=\"openstack\")\nclass CreateAndGetKeypair(utils.NovaScenario):\n\n def run(self, **kwargs):\n \"\"\"Create a keypair and get the keypair details.\n\n :param kwargs: Optional additional arguments for keypair creation\n \"\"\"\n\n keypair = self._create_keypair(**kwargs)\n\n self._get_keypair(keypair)\n" }, { "alpha_fraction": 0.6117486953735352, "alphanum_fraction": 0.617185115814209, "avg_line_length": 42.281044006347656, "blob_id": "0447fccae6fea2190b9f8034cb95a70dd7feee75", "content_id": "e77db1304a7f08c8af2e0da9786c11061d3e0ddd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6622, "license_type": "permissive", "max_line_length": 79, "num_lines": 153, "path": "/tests/unit/task/scenarios/senlin/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.senlin import utils\nfrom tests.unit import test\n\nSENLIN_UTILS = \"rally_openstack.task.scenarios.senlin.utils.\"\nCONF = cfg.CONF\n\n\nclass SenlinScenarioTestCase(test.ScenarioTestCase):\n\n def test_list_cluster(self):\n fake_cluster_list = [\"cluster1\", \"cluster2\"]\n self.admin_clients(\"senlin\").clusters.return_value = fake_cluster_list\n scenario = utils.SenlinScenario(self.context)\n result = scenario._list_clusters()\n\n self.assertEqual(list(fake_cluster_list), result)\n self.admin_clients(\"senlin\").clusters.assert_called_once_with()\n\n def test_list_cluster_with_queries(self):\n fake_cluster_list = [\"cluster1\", \"cluster2\"]\n self.admin_clients(\"senlin\").clusters.return_value = fake_cluster_list\n scenario = utils.SenlinScenario(self.context)\n result = scenario._list_clusters(status=\"ACTIVE\")\n\n self.assertEqual(list(fake_cluster_list), result)\n self.admin_clients(\"senlin\").clusters.assert_called_once_with(\n status=\"ACTIVE\")\n\n @mock.patch(SENLIN_UTILS + \"SenlinScenario.generate_random_name\",\n return_value=\"test_cluster\")\n def test_create_cluster(self, mock_generate_random_name):\n fake_cluster = mock.Mock(id=\"fake_cluster_id\")\n res_cluster = mock.Mock()\n self.admin_clients(\"senlin\").create_cluster.return_value = fake_cluster\n self.mock_wait_for_status.mock.return_value = res_cluster\n scenario = utils.SenlinScenario(self.context)\n result = scenario._create_cluster(\"fake_profile_id\",\n desired_capacity=1,\n min_size=0,\n max_size=3,\n metadata={\"k1\": \"v1\"},\n timeout=60)\n\n self.assertEqual(res_cluster, result)\n self.admin_clients(\"senlin\").create_cluster.assert_called_once_with(\n profile_id=\"fake_profile_id\", name=\"test_cluster\",\n desired_capacity=1, min_size=0, max_size=3, metadata={\"k1\": \"v1\"},\n timeout=60)\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_cluster, ready_statuses=[\"ACTIVE\"],\n failure_statuses=[\"ERROR\"],\n update_resource=scenario._get_cluster,\n timeout=CONF.openstack.senlin_action_timeout)\n mock_generate_random_name.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"senlin.create_cluster\")\n\n def test_get_cluster(self):\n fake_cluster = mock.Mock(id=\"fake_cluster_id\")\n scenario = utils.SenlinScenario(context=self.context)\n scenario._get_cluster(fake_cluster)\n\n self.admin_clients(\"senlin\").get_cluster.assert_called_once_with(\n \"fake_cluster_id\")\n\n def test_get_cluster_notfound(self):\n fake_cluster = mock.Mock(id=\"fake_cluster_id\")\n ex = Exception()\n ex.code = 404\n self.admin_clients(\"senlin\").get_cluster.side_effect = ex\n scenario = utils.SenlinScenario(context=self.context)\n\n self.assertRaises(exceptions.GetResourceNotFound,\n scenario._get_cluster,\n fake_cluster)\n self.admin_clients(\"senlin\").get_cluster.assert_called_once_with(\n \"fake_cluster_id\")\n\n def test_get_cluster_failed(self):\n fake_cluster = mock.Mock(id=\"fake_cluster_id\")\n ex = Exception()\n ex.code = 500\n self.admin_clients(\"senlin\").get_cluster.side_effect = ex\n scenario = utils.SenlinScenario(context=self.context)\n\n self.assertRaises(exceptions.GetResourceFailure,\n scenario._get_cluster,\n fake_cluster)\n self.admin_clients(\"senlin\").get_cluster.assert_called_once_with(\n \"fake_cluster_id\")\n\n def test_delete_cluster(self):\n fake_cluster = mock.Mock()\n scenario = utils.SenlinScenario(context=self.context)\n scenario._delete_cluster(fake_cluster)\n\n self.admin_clients(\"senlin\").delete_cluster.assert_called_once_with(\n fake_cluster)\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_cluster, ready_statuses=[\"DELETED\"],\n failure_statuses=[\"ERROR\"], check_deletion=True,\n update_resource=scenario._get_cluster,\n timeout=CONF.openstack.senlin_action_timeout)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"senlin.delete_cluster\")\n\n @mock.patch(SENLIN_UTILS + \"SenlinScenario.generate_random_name\",\n return_value=\"test_profile\")\n def test_create_profile(self, mock_generate_random_name):\n test_spec = {\n \"version\": \"1.0\",\n \"type\": \"test_type\",\n \"properties\": {\n \"key1\": \"value1\"\n }\n }\n scenario = utils.SenlinScenario(self.context)\n result = scenario._create_profile(test_spec, metadata={\"k2\": \"v2\"})\n\n self.assertEqual(\n self.clients(\"senlin\").create_profile.return_value, result)\n self.clients(\"senlin\").create_profile.assert_called_once_with(\n spec=test_spec, name=\"test_profile\", metadata={\"k2\": \"v2\"})\n mock_generate_random_name.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"senlin.create_profile\")\n\n def test_delete_profile(self):\n fake_profile = mock.Mock()\n scenario = utils.SenlinScenario(context=self.context)\n scenario._delete_profile(fake_profile)\n\n self.clients(\"senlin\").delete_profile.assert_called_once_with(\n fake_profile)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"senlin.delete_profile\")\n" }, { "alpha_fraction": 0.5502707958221436, "alphanum_fraction": 0.5523880124092102, "avg_line_length": 44.538116455078125, "blob_id": "44949cf974bb9156387d9ac1d501e06e415bcc99", "content_id": "e4bb92fc4397f54d60535f143ebfff3cfcf0f246", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20310, "license_type": "permissive", "max_line_length": 79, "num_lines": 446, "path": "/tests/unit/common/wrappers/test_network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nfrom neutronclient.common import exceptions as neutron_exceptions\n\nfrom rally.common import utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.wrappers import network\nfrom tests.unit import test\n\n\nSVC = \"rally_openstack.common.wrappers.network.\"\n\n\nclass Owner(utils.RandomNameGeneratorMixin):\n task = {\"uuid\": \"task-uuid\"}\n\n\[email protected]\nclass NeutronWrapperTestCase(test.TestCase):\n def setUp(self):\n super(NeutronWrapperTestCase, self).setUp()\n self.owner = Owner()\n self.owner.generate_random_name = mock.Mock()\n clients = mock.MagicMock()\n clients.credential.permission = consts.EndpointPermission.ADMIN\n self.wrapper = network.NeutronWrapper(\n clients, self.owner, config={})\n self._nc = self.wrapper.neutron.client\n\n def test_SUBNET_IP_VERSION(self):\n self.assertEqual(4, network.NeutronWrapper.SUBNET_IP_VERSION)\n\n @mock.patch(\n \"rally_openstack.common.services.network.net_utils.generate_cidr\")\n def test__generate_cidr(self, mock_generate_cidr):\n cidrs = iter(range(5))\n\n def fake_gen_cidr(ip_version=None, start_cidr=None):\n return 4, 3 + next(cidrs)\n\n mock_generate_cidr.side_effect = fake_gen_cidr\n\n self.assertEqual(3, self.wrapper._generate_cidr())\n self.assertEqual(4, self.wrapper._generate_cidr())\n self.assertEqual(5, self.wrapper._generate_cidr())\n self.assertEqual(6, self.wrapper._generate_cidr())\n self.assertEqual(7, self.wrapper._generate_cidr())\n self.assertEqual([mock.call(start_cidr=self.wrapper.start_cidr)] * 5,\n mock_generate_cidr.call_args_list)\n\n def test_external_networks(self):\n self._nc.list_networks.return_value = {\"networks\": \"foo_networks\"}\n self.assertEqual(\"foo_networks\", self.wrapper.external_networks)\n self._nc.list_networks.assert_called_once_with(\n **{\"router:external\": True})\n\n def test_get_network(self):\n neutron_net = {\"id\": \"foo_id\",\n \"name\": \"foo_name\",\n \"tenant_id\": \"foo_tenant\",\n \"status\": \"foo_status\",\n \"router:external\": \"foo_external\",\n \"subnets\": \"foo_subnets\"}\n expected_net = {\"id\": \"foo_id\",\n \"name\": \"foo_name\",\n \"tenant_id\": \"foo_tenant\",\n \"status\": \"foo_status\",\n \"external\": \"foo_external\",\n \"router_id\": None,\n \"subnets\": \"foo_subnets\"}\n self._nc.show_network.return_value = {\"network\": neutron_net}\n net = self.wrapper.get_network(net_id=\"foo_id\")\n self.assertEqual(expected_net, net)\n self._nc.show_network.assert_called_once_with(\"foo_id\")\n\n self._nc.show_network.side_effect = (\n neutron_exceptions.NeutronClientException)\n self.assertRaises(network.NetworkWrapperException,\n self.wrapper.get_network,\n net_id=\"foo_id\")\n\n self._nc.list_networks.return_value = {\"networks\": [neutron_net]}\n net = self.wrapper.get_network(name=\"foo_name\")\n self.assertEqual(expected_net, net)\n self._nc.list_networks.assert_called_once_with(name=\"foo_name\")\n\n self._nc.list_networks.return_value = {\"networks\": []}\n self.assertRaises(network.NetworkWrapperException,\n self.wrapper.get_network,\n name=\"foo_name\")\n\n def test_create_v1_pool(self):\n subnet = \"subnet_id\"\n tenant = \"foo_tenant\"\n expected_pool = {\"pool\": {\n \"id\": \"pool_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"subnet_id\": subnet,\n \"tenant_id\": tenant}}\n self.wrapper.client.create_pool.return_value = expected_pool\n resultant_pool = self.wrapper.create_v1_pool(tenant, subnet)\n self.wrapper.client.create_pool.assert_called_once_with({\n \"pool\": {\"lb_method\": \"ROUND_ROBIN\",\n \"subnet_id\": subnet,\n \"tenant_id\": tenant,\n \"protocol\": \"HTTP\",\n \"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual(expected_pool, resultant_pool)\n\n def test_create_network(self):\n self._nc.create_network.return_value = {\n \"network\": {\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\"}}\n net = self.wrapper.create_network(\"foo_tenant\")\n self._nc.create_network.assert_called_once_with({\n \"network\": {\"tenant_id\": \"foo_tenant\",\n \"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual({\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\",\n \"external\": False,\n \"tenant_id\": \"foo_tenant\",\n \"router_id\": None,\n \"subnets\": []}, net)\n\n def test_create_network_with_subnets(self):\n subnets_num = 4\n subnets_ids = iter(range(subnets_num))\n self._nc.create_subnet.side_effect = lambda i: {\n \"subnet\": {\"id\": \"subnet-%d\" % next(subnets_ids)}}\n self._nc.create_network.return_value = {\n \"network\": {\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\"}}\n\n net = self.wrapper.create_network(\"foo_tenant\",\n subnets_num=subnets_num)\n\n self._nc.create_network.assert_called_once_with({\n \"network\": {\"tenant_id\": \"foo_tenant\",\n \"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual({\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\",\n \"external\": False,\n \"router_id\": None,\n \"tenant_id\": \"foo_tenant\",\n \"subnets\": [\"subnet-%d\" % i\n for i in range(subnets_num)]}, net)\n self.assertEqual(\n [mock.call({\"subnet\":\n {\"name\": self.owner.generate_random_name.return_value,\n \"network_id\": \"foo_id\",\n \"tenant_id\": \"foo_tenant\",\n \"ip_version\": self.wrapper.SUBNET_IP_VERSION,\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"cidr\": mock.ANY}})\n for i in range(subnets_num)],\n self.wrapper.client.create_subnet.call_args_list\n )\n\n def test_create_network_with_router(self):\n self._nc.create_router.return_value = {\"router\": {\"id\": \"foo_router\"}}\n self._nc.create_network.return_value = {\n \"network\": {\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\"}}\n net = self.wrapper.create_network(\"foo_tenant\", add_router=True)\n self.assertEqual({\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\",\n \"external\": False,\n \"tenant_id\": \"foo_tenant\",\n \"router_id\": \"foo_router\",\n \"subnets\": []}, net)\n self._nc.create_router.assert_called_once_with({\n \"router\": {\n \"name\": self.owner.generate_random_name(),\n \"tenant_id\": \"foo_tenant\"\n }\n })\n\n def test_create_network_with_router_and_subnets(self):\n subnets_num = 4\n self.wrapper._generate_cidr = mock.Mock(return_value=\"foo_cidr\")\n self._nc.create_router.return_value = {\"router\": {\"id\": \"foo_router\"}}\n self._nc.create_subnet.return_value = {\"subnet\": {\"id\": \"foo_subnet\"}}\n self._nc.create_network.return_value = {\n \"network\": {\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\"}}\n net = self.wrapper.create_network(\n \"foo_tenant\", add_router=True, subnets_num=subnets_num,\n dns_nameservers=[\"foo_nameservers\"])\n self.assertEqual({\"id\": \"foo_id\",\n \"name\": self.owner.generate_random_name.return_value,\n \"status\": \"foo_status\",\n \"external\": False,\n \"tenant_id\": \"foo_tenant\",\n \"router_id\": \"foo_router\",\n \"subnets\": [\"foo_subnet\"] * subnets_num}, net)\n self._nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": self.owner.generate_random_name.return_value,\n \"tenant_id\": \"foo_tenant\"}})\n self.assertEqual(\n [\n mock.call(\n {\"subnet\": {\n \"name\": self.owner.generate_random_name.return_value,\n \"network_id\": \"foo_id\",\n \"tenant_id\": \"foo_tenant\",\n \"ip_version\": self.wrapper.SUBNET_IP_VERSION,\n \"dns_nameservers\": [\"foo_nameservers\"],\n \"cidr\": mock.ANY\n }}\n )\n ] * subnets_num,\n self._nc.create_subnet.call_args_list,\n )\n self.assertEqual(self._nc.add_interface_router.call_args_list,\n [mock.call(\"foo_router\", {\"subnet_id\": \"foo_subnet\"})\n for i in range(subnets_num)])\n\n def test_delete_v1_pool(self):\n pool = {\"pool\": {\"id\": \"pool-id\"}}\n self.wrapper.delete_v1_pool(pool[\"pool\"][\"id\"])\n self.wrapper.client.delete_pool.assert_called_once_with(\"pool-id\")\n\n def test_delete_network(self):\n self._nc.list_ports.return_value = {\"ports\": []}\n self._nc.list_subnets.return_value = {\"subnets\": []}\n self._nc.delete_network.return_value = \"foo_deleted\"\n self.wrapper.delete_network(\n {\"id\": \"foo_id\", \"router_id\": None, \"subnets\": [], \"name\": \"x\",\n \"status\": \"y\", \"external\": False})\n self.assertFalse(self._nc.remove_gateway_router.called)\n self.assertFalse(self._nc.remove_interface_router.called)\n self.assertFalse(self._nc.client.delete_router.called)\n self.assertFalse(self._nc.client.delete_subnet.called)\n self._nc.delete_network.assert_called_once_with(\"foo_id\")\n\n def test_delete_network_with_router_and_ports_and_subnets(self):\n\n subnets = [\"foo_subnet\", \"bar_subnet\"]\n ports = [{\"id\": \"foo_port\", \"device_owner\": \"network:router_interface\",\n \"device_id\": \"rounttter\"},\n {\"id\": \"bar_port\", \"device_owner\": \"network:dhcp\"}]\n self._nc.list_ports.return_value = ({\"ports\": ports})\n self._nc.list_subnets.return_value = (\n {\"subnets\": [{\"id\": id_} for id_ in subnets]})\n\n self.wrapper.delete_network(\n {\"id\": \"foo_id\", \"router_id\": \"foo_router\", \"subnets\": subnets,\n \"lb_pools\": [], \"name\": \"foo\", \"status\": \"x\", \"external\": False})\n\n self.assertEqual(self._nc.remove_gateway_router.mock_calls,\n [mock.call(\"foo_router\")])\n self._nc.delete_port.assert_called_once_with(ports[1][\"id\"])\n self._nc.remove_interface_router.assert_called_once_with(\n ports[0][\"device_id\"], {\"port_id\": ports[0][\"id\"]})\n self.assertEqual(\n [mock.call(subnet_id) for subnet_id in subnets],\n self._nc.delete_subnet.call_args_list\n )\n self._nc.delete_network.assert_called_once_with(\"foo_id\")\n\n @ddt.data({\"exception_type\": neutron_exceptions.NotFound,\n \"should_raise\": False},\n {\"exception_type\": neutron_exceptions.BadRequest,\n \"should_raise\": False},\n {\"exception_type\": KeyError,\n \"should_raise\": True})\n @ddt.unpack\n def test_delete_network_with_router_throw_exception(\n self, exception_type, should_raise):\n # Ensure cleanup context still move forward even\n # remove_interface_router throw NotFound/BadRequest exception\n\n self._nc.remove_interface_router.side_effect = exception_type\n subnets = [\"foo_subnet\", \"bar_subnet\"]\n ports = [{\"id\": \"foo_port\", \"device_owner\": \"network:router_interface\",\n \"device_id\": \"rounttter\"},\n {\"id\": \"bar_port\", \"device_owner\": \"network:dhcp\"}]\n self._nc.list_ports.return_value = {\"ports\": ports}\n self._nc.list_subnets.return_value = {\"subnets\": [\n {\"id\": id_} for id_ in subnets]}\n\n if should_raise:\n self.assertRaises(\n exception_type, self.wrapper.delete_network,\n {\"id\": \"foo_id\", \"name\": \"foo\", \"router_id\": \"foo_router\",\n \"subnets\": subnets, \"lb_pools\": [], \"status\": \"xxx\",\n \"external\": False})\n self.assertFalse(self._nc.delete_subnet.called)\n self.assertFalse(self._nc.delete_network.called)\n else:\n self.wrapper.delete_network(\n {\"id\": \"foo_id\", \"name\": \"foo\", \"status\": \"xxx\",\n \"router_id\": \"foo_router\", \"subnets\": subnets,\n \"lb_pools\": [], \"external\": False})\n\n self._nc.delete_port.assert_called_once_with(ports[1][\"id\"])\n self._nc.remove_interface_router.assert_called_once_with(\n ports[0][\"device_id\"], {\"port_id\": ports[0][\"id\"]})\n self.assertEqual(\n [mock.call(subnet_id) for subnet_id in subnets],\n self._nc.delete_subnet.call_args_list\n )\n self._nc.delete_network.assert_called_once_with(\"foo_id\")\n\n self._nc.remove_gateway_router.assert_called_once_with(\n \"foo_router\")\n\n def test_list_networks(self):\n self._nc.list_networks.return_value = {\"networks\": \"foo_nets\"}\n self.assertEqual(\"foo_nets\", self.wrapper.list_networks())\n self._nc.list_networks.assert_called_once_with()\n\n def test_create_floating_ip(self):\n self._nc.create_port.return_value = {\"port\": {\"id\": \"port_id\"}}\n self._nc.create_floatingip.return_value = {\n \"floatingip\": {\"id\": \"fip_id\", \"floating_ip_address\": \"fip_ip\"}}\n\n self.assertRaises(ValueError, self.wrapper.create_floating_ip)\n\n self._nc.list_networks.return_value = {\"networks\": []}\n self.assertRaises(network.NetworkWrapperException,\n self.wrapper.create_floating_ip,\n tenant_id=\"foo_tenant\")\n\n self._nc.list_networks.return_value = {\"networks\": [{\"id\": \"ext_id\"}]}\n fip = self.wrapper.create_floating_ip(\n tenant_id=\"foo_tenant\", port_id=\"port_id\")\n self.assertEqual({\"id\": \"fip_id\", \"ip\": \"fip_ip\"}, fip)\n\n self._nc.list_networks.return_value = {\"networks\": [\n {\"id\": \"ext_net_id\", \"name\": \"ext_net\", \"router:external\": True}]}\n self.wrapper.create_floating_ip(\n tenant_id=\"foo_tenant\", ext_network=\"ext_net\", port_id=\"port_id\")\n\n self.assertRaises(\n network.NetworkWrapperException,\n self.wrapper.create_floating_ip, tenant_id=\"foo_tenant\",\n ext_network=\"ext_net_2\")\n\n def test_delete_floating_ip(self):\n self.wrapper.delete_floating_ip(\"fip_id\")\n self.wrapper.delete_floating_ip(\"fip_id\", ignored_kwarg=\"bar\")\n self.assertEqual([mock.call(\"fip_id\")] * 2,\n self._nc.delete_floatingip.call_args_list)\n\n def test_create_router(self):\n self._nc.create_router.return_value = {\"router\": \"foo_router\"}\n self._nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]}\n self._nc.list_networks.return_value = {\"networks\": [{\"id\": \"ext_id\"}]}\n\n router = self.wrapper.create_router()\n self._nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual(\"foo_router\", router)\n\n self.wrapper.create_router(external=True, flavor_id=\"bar\")\n self._nc.create_router.assert_called_with(\n {\"router\": {\"name\": self.owner.generate_random_name.return_value,\n \"external_gateway_info\": {\n \"network_id\": \"ext_id\",\n \"enable_snat\": True},\n \"flavor_id\": \"bar\"}})\n\n def test_create_router_without_ext_gw_mode_extension(self):\n self._nc.create_router.return_value = {\"router\": \"foo_router\"}\n self._nc.list_extensions.return_value = {\"extensions\": []}\n self._nc.list_networks.return_value = {\"networks\": [{\"id\": \"ext_id\"}]}\n\n router = self.wrapper.create_router()\n self._nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual(router, \"foo_router\")\n\n self.wrapper.create_router(external=True, flavor_id=\"bar\")\n self._nc.create_router.assert_called_with(\n {\"router\": {\"name\": self.owner.generate_random_name.return_value,\n \"external_gateway_info\": {\"network_id\": \"ext_id\"},\n \"flavor_id\": \"bar\"}})\n\n def test_create_port(self):\n self._nc.create_port.return_value = {\"port\": \"foo_port\"}\n\n port = self.wrapper.create_port(\"foo_net\")\n self._nc.create_port.assert_called_once_with(\n {\"port\": {\"network_id\": \"foo_net\",\n \"name\": self.owner.generate_random_name.return_value}})\n self.assertEqual(\"foo_port\", port)\n\n port = self.wrapper.create_port(\"foo_net\", foo=\"bar\")\n self.wrapper.client.create_port.assert_called_with(\n {\"port\": {\"network_id\": \"foo_net\",\n \"name\": self.owner.generate_random_name.return_value,\n \"foo\": \"bar\"}})\n\n def test_supports_extension(self):\n self._nc.list_extensions.return_value = (\n {\"extensions\": [{\"alias\": \"extension\"}]})\n self.assertTrue(self.wrapper.supports_extension(\"extension\")[0])\n\n self.wrapper.neutron._cached_supported_extensions = None\n self._nc.list_extensions.return_value = (\n {\"extensions\": [{\"alias\": \"extension\"}]})\n self.assertFalse(self.wrapper.supports_extension(\"dummy-group\")[0])\n\n self.wrapper.neutron._cached_supported_extensions = None\n self._nc.list_extensions.return_value = {\"extensions\": []}\n self.assertFalse(self.wrapper.supports_extension(\"extension\")[0])\n\n\nclass FunctionsTestCase(test.TestCase):\n\n def test_wrap(self):\n mock_clients = mock.Mock()\n config = {\"fakearg\": \"fake\"}\n owner = Owner()\n\n mock_clients.services.return_value = {\"foo\": consts.Service.NEUTRON}\n wrapper = network.wrap(mock_clients, owner, config)\n self.assertIsInstance(wrapper, network.NeutronWrapper)\n self.assertEqual(wrapper.owner, owner)\n self.assertEqual(wrapper.config, config)\n" }, { "alpha_fraction": 0.6490002274513245, "alphanum_fraction": 0.6521320343017578, "avg_line_length": 40.92929458618164, "blob_id": "a60ce1be2e51d3570b95de3e21bc7c633fb2e25c", "content_id": "ce8ae7ff7b80f61e3c76c5bd2fce901ba86b5066", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4151, "license_type": "permissive", "max_line_length": 79, "num_lines": 99, "path": "/rally_openstack/task/scenarios/watcher/basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.watcher import utils\n\n\n\"\"\"Scenarios for Watcher servers.\"\"\"\n\n\[email protected](strategy={\"type\": \"watcher_strategy\"},\n goal={\"type\": \"watcher_goal\"})\[email protected](\"required_services\", services=[consts.Service.WATCHER])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"watcher\"]},\n name=\"Watcher.create_audit_template_and_delete\",\n platform=\"openstack\")\nclass CreateAuditTemplateAndDelete(utils.WatcherScenario):\n\n @logging.log_deprecated_args(\"Extra field has been removed \"\n \"since it isn't used.\", \"0.8.0\", [\"extra\"],\n once=True)\n def run(self, goal, strategy):\n \"\"\"Create audit template and delete it.\n\n :param goal: The goal audit template is based on\n :param strategy: The strategy used to provide resource optimization\n algorithm\n \"\"\"\n\n audit_template = self._create_audit_template(goal, strategy)\n self._delete_audit_template(audit_template.uuid)\n\n\[email protected](\"required_services\", services=[consts.Service.WATCHER])\[email protected](name=\"Watcher.list_audit_templates\", platform=\"openstack\")\nclass ListAuditTemplates(utils.WatcherScenario):\n\n def run(self, name=None, goal=None, strategy=None,\n limit=None, sort_key=None, sort_dir=None,\n detail=False):\n \"\"\"List existing audit templates.\n\n Audit templates are being created by Audit Template Context.\n\n :param name: Name of the audit template\n :param goal: Name of the goal\n :param strategy: Name of the strategy\n :param limit: The maximum number of results to return per\n request, if:\n\n 1) limit > 0, the maximum number of audit templates to return.\n 2) limit == 0, return the entire list of audit_templates.\n 3) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Watcher API\n (see Watcher's api.max_limit option).\n :param sort_key: Optional, field used for sorting.\n :param sort_dir: Optional, direction of sorting, either 'asc' (the\n default) or 'desc'.\n :param detail: Optional, boolean whether to return detailed information\n about audit_templates.\n \"\"\"\n\n self._list_audit_templates(name=name, goal=goal, strategy=strategy,\n limit=limit, sort_key=sort_key,\n sort_dir=sort_dir, detail=detail)\n\n\[email protected](\"required_services\", services=[consts.Service.WATCHER])\[email protected](\"required_contexts\", contexts=\"audit_templates\")\[email protected](context={\"admin_cleanup@openstack\": [\"watcher\"]},\n name=\"Watcher.create_audit_and_delete\",\n platform=\"openstack\")\nclass CreateAuditAndDelete(utils.WatcherScenario):\n\n def run(self):\n \"\"\"Create and delete audit.\n\n Create Audit, wait until whether Audit is in SUCCEEDED state or in\n FAILED and delete audit.\n \"\"\"\n\n audit_template_uuid = self.context[\"audit_templates\"][0]\n audit = self._create_audit(audit_template_uuid)\n self._delete_audit(audit)\n" }, { "alpha_fraction": 0.5396940112113953, "alphanum_fraction": 0.5445806384086609, "avg_line_length": 40.0625, "blob_id": "51f4e933a7269c3fd2de8a62d4292c6921e3b763", "content_id": "26a7ea2a6cba4db46ec36b7588f1cefc45c53400", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12483, "license_type": "permissive", "max_line_length": 78, "num_lines": 304, "path": "/tests/unit/task/scenarios/neutron/test_loadbalancer_v1.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.neutron import loadbalancer_v1\nfrom tests.unit import test\n\n\[email protected]\nclass NeutronLoadbalancerv1TestCase(test.TestCase):\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant\",\n \"networks\": [{\"id\": \"fake_net\",\n \"subnets\": [\"fake_subnet\"]}]}})\n return context\n\n @ddt.data(\n {},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_list_pools(self, pool_create_args=None):\n scenario = loadbalancer_v1.CreateAndListPools(self._get_context())\n pool_data = pool_create_args or {}\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario._create_v1_pools = mock.Mock()\n scenario._list_v1_pools = mock.Mock()\n scenario.run(pool_create_args=pool_create_args)\n scenario._create_v1_pools.assert_called_once_with(networks,\n **pool_data)\n scenario._list_v1_pools.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_delete_pools(self, pool_create_args=None):\n scenario = loadbalancer_v1.CreateAndDeletePools(self._get_context())\n pools = [{\n \"pool\": {\n \"id\": \"pool-id\"\n }\n }]\n pool_data = pool_create_args or {}\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario._create_v1_pools = mock.Mock(return_value=pools)\n scenario._delete_v1_pool = mock.Mock()\n scenario.run(pool_create_args=pool_create_args)\n self.assertEqual([mock.call(networks, **pool_data)],\n scenario._create_v1_pools.mock_calls)\n for _ in pools:\n self.assertEqual(1, scenario._delete_v1_pool.call_count)\n\n @ddt.data(\n {},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-name\"}},\n {\"pool_update_args\": None},\n {\"pool_update_args\": {}},\n {\"pool_update_args\": {\"name\": \"updated-name\"}},\n {\"pool_create_args\": None, \"pool_update_args\": None},\n {\"pool_create_args\": {\"name\": \"given-name\"},\n \"pool_update_args\": {\"name\": \"updated-name\"}},\n {\"pool_create_args\": None,\n \"pool_update_args\": {\"name\": \"updated-name\"}},\n {\"pool_create_args\": None, \"pool_update_args\": {}},\n {\"pool_create_args\": {}, \"pool_update_args\": None},\n )\n @ddt.unpack\n def test_create_and_update_pools(self, pool_create_args=None,\n pool_update_args=None):\n scenario = loadbalancer_v1.CreateAndUpdatePools(self._get_context())\n pools = [{\n \"pool\": {\n \"id\": \"pool-id\"\n }\n }]\n updated_pool = {\n \"pool\": {\n \"id\": \"pool-id\",\n \"name\": \"updated-pool\",\n \"admin_state_up\": True\n }\n }\n pool_data = pool_create_args or {}\n pool_update_args = pool_update_args or {}\n pool_update_args.update({\"name\": \"_updated\", \"admin_state_up\": True})\n scenario._create_v1_pools = mock.Mock(return_value=pools)\n scenario._update_v1_pool = mock.Mock(return_value=updated_pool)\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario.run(pool_create_args=pool_data,\n pool_update_args=pool_update_args)\n self.assertEqual([mock.call(networks, **pool_data)],\n scenario._create_v1_pools.mock_calls)\n for pool in pools:\n scenario._update_v1_pool.assert_called_once_with(\n pool, **pool_update_args)\n\n @ddt.data(\n {},\n {\"vip_create_args\": None},\n {\"vip_create_args\": {}},\n {\"vip_create_args\": {\"name\": \"given-vip-name\"}},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-pool-name\"}},\n )\n @ddt.unpack\n def test_create_and_list_vips(self, pool_create_args=None,\n vip_create_args=None):\n scenario = loadbalancer_v1.CreateAndListVips(self._get_context())\n pools = [{\n \"pool\": {\n \"id\": \"pool-id\"\n }\n }]\n vip_data = vip_create_args or {}\n pool_data = pool_create_args or {}\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario._create_v1_pools = mock.Mock(return_value=pools)\n scenario._create_v1_vip = mock.Mock()\n scenario._list_v1_vips = mock.Mock()\n scenario.run(pool_create_args=pool_create_args,\n vip_create_args=vip_create_args)\n scenario._create_v1_pools.assert_called_once_with(networks,\n **pool_data)\n scenario._create_v1_vip.assert_has_calls(\n [mock.call(pool, **vip_data) for pool in pools])\n scenario._list_v1_vips.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"vip_create_args\": None},\n {\"vip_create_args\": {}},\n {\"vip_create_args\": {\"name\": \"given-name\"}},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-pool-name\"}},\n )\n @ddt.unpack\n def test_create_and_delete_vips(self, pool_create_args=None,\n vip_create_args=None):\n scenario = loadbalancer_v1.CreateAndDeleteVips(self._get_context())\n pools = [{\n \"pool\": {\n \"id\": \"pool-id\"\n }\n }]\n vip = {\n \"vip\": {\n \"id\": \"vip-id\"\n }\n }\n vip_data = vip_create_args or {}\n pool_data = pool_create_args or {}\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario._create_v1_pools = mock.Mock(return_value=pools)\n scenario._create_v1_vip = mock.Mock(return_value=vip)\n scenario._delete_v1_vip = mock.Mock()\n scenario.run(pool_create_args=pool_create_args,\n vip_create_args=vip_create_args)\n scenario._create_v1_pools.assert_called_once_with(networks,\n **pool_data)\n scenario._create_v1_vip.assert_has_calls(\n [mock.call(pool, **vip_data) for pool in pools])\n scenario._delete_v1_vip.assert_has_calls([mock.call(vip[\"vip\"])])\n\n @ddt.data(\n {},\n {\"vip_create_args\": None},\n {\"vip_create_args\": {}},\n {\"vip_create_args\": {\"name\": \"given-vip-name\"}},\n {\"pool_create_args\": None},\n {\"pool_create_args\": {}},\n {\"pool_create_args\": {\"name\": \"given-pool-name\"}},\n )\n @ddt.unpack\n def test_create_and_update_vips(self, pool_create_args=None,\n vip_create_args=None,\n vip_update_args=None):\n scenario = loadbalancer_v1.CreateAndUpdateVips(self._get_context())\n pools = [{\n \"pool\": {\n \"id\": \"pool-id\",\n }\n }]\n expected_vip = {\n \"vip\": {\n \"id\": \"vip-id\",\n \"name\": \"vip-name\"\n }\n }\n updated_vip = {\n \"vip\": {\n \"id\": \"vip-id\",\n \"name\": \"updated-vip-name\"\n }\n }\n vips = [expected_vip]\n vip_data = vip_create_args or {}\n vip_update_data = vip_update_args or {}\n pool_data = pool_create_args or {}\n networks = self._get_context()[\"tenant\"][\"networks\"]\n scenario._create_v1_pools = mock.Mock(return_value=pools)\n scenario._create_v1_vip = mock.Mock(return_value=expected_vip)\n scenario._update_v1_vip = mock.Mock(return_value=updated_vip)\n scenario.run(pool_create_args=pool_create_args,\n vip_create_args=vip_create_args,\n vip_update_args=vip_update_args)\n scenario._create_v1_pools.assert_called_once_with(networks,\n **pool_data)\n scenario._create_v1_vip.assert_has_calls(\n [mock.call(pool, **vip_data) for pool in pools])\n scenario._update_v1_vip.assert_has_calls(\n [mock.call(vip, **vip_update_data) for vip in vips])\n\n @ddt.data(\n {},\n {\"healthmonitor_create_args\": None},\n {\"healthmonitor_create_args\": {}},\n {\"healthmonitor_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_list_healthmonitors(self,\n healthmonitor_create_args=None):\n scenario = loadbalancer_v1.CreateAndListHealthmonitors(\n self._get_context())\n hm_data = healthmonitor_create_args or {}\n scenario._create_v1_healthmonitor = mock.Mock()\n scenario._list_v1_healthmonitors = mock.Mock()\n scenario.run(healthmonitor_create_args=healthmonitor_create_args)\n scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)\n scenario._list_v1_healthmonitors.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"healthmonitor_create_args\": None},\n {\"healthmonitor_create_args\": {}},\n {\"healthmonitor_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_delete_healthmonitors(self,\n healthmonitor_create_args=None):\n scenario = loadbalancer_v1.CreateAndDeleteHealthmonitors(\n self._get_context())\n hm = {\"health_monitor\": {\"id\": \"hm-id\"}}\n hm_data = healthmonitor_create_args or {}\n scenario._create_v1_healthmonitor = mock.Mock(return_value=hm)\n scenario._delete_v1_healthmonitor = mock.Mock()\n scenario.run(healthmonitor_create_args=healthmonitor_create_args)\n scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)\n scenario._delete_v1_healthmonitor.assert_called_once_with(\n scenario._create_v1_healthmonitor.return_value[\"health_monitor\"])\n\n @ddt.data(\n {},\n {\"healthmonitor_create_args\": None},\n {\"healthmonitor_create_args\": {}},\n {\"healthmonitor_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_update_healthmonitors(self,\n healthmonitor_create_args=None,\n healthmonitor_update_args=None):\n scenario = loadbalancer_v1.CreateAndUpdateHealthmonitors(\n self._get_context())\n mock_random = loadbalancer_v1.random = mock.Mock()\n hm = {\"healthmonitor\": {\"id\": \"hm-id\"}}\n hm_data = healthmonitor_create_args or {}\n hm_update_data = healthmonitor_update_args or {\n \"max_retries\": mock_random.choice.return_value}\n scenario._create_v1_healthmonitor = mock.Mock(return_value=hm)\n scenario._update_v1_healthmonitor = mock.Mock()\n scenario.run(healthmonitor_create_args=healthmonitor_create_args,\n healthmonitor_update_args=healthmonitor_update_args)\n scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)\n scenario._update_v1_healthmonitor.assert_called_once_with(\n scenario._create_v1_healthmonitor.return_value, **hm_update_data)\n" }, { "alpha_fraction": 0.5301432609558105, "alphanum_fraction": 0.5444671511650085, "avg_line_length": 41.35333251953125, "blob_id": "428749830282f645c428092615fc431b1199d94e", "content_id": "8edce046ce86edaeb9153e09cb0d33122b81289c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6353, "license_type": "permissive", "max_line_length": 79, "num_lines": 150, "path": "/tests/unit/task/contexts/keystone/test_roles.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.keystone import roles\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.keystone.roles\"\n\n\nclass RoleGeneratorTestCase(test.TestCase):\n\n def create_default_roles_and_patch_add_remove_functions(self, fc):\n fc.keystone().roles.add_user_role = mock.MagicMock()\n fc.keystone().roles.remove_user_role = mock.MagicMock()\n fc.keystone().roles.create(\"r1\", \"test_role1\")\n fc.keystone().roles.create(\"r2\", \"test_role2\")\n self.assertEqual(2, len(fc.keystone().roles.list()))\n\n @property\n def context(self):\n return {\n \"config\": {\n \"roles\": [\n \"test_role1\",\n \"test_role2\"\n ]\n },\n \"admin\": {\"credential\": mock.MagicMock()},\n \"task\": mock.MagicMock()\n }\n\n @mock.patch(\"%s.osclients\" % CTX)\n def test_add_role(self, mock_osclients):\n fc = fakes.FakeClients()\n mock_osclients.Clients.return_value = fc\n self.create_default_roles_and_patch_add_remove_functions(fc)\n\n ctx = roles.RoleGenerator(self.context)\n ctx.context[\"users\"] = [{\"id\": \"u1\", \"tenant_id\": \"t1\"},\n {\"id\": \"u2\", \"tenant_id\": \"t2\"}]\n ctx.credential = mock.MagicMock()\n ctx.setup()\n\n expected = {\"r1\": \"test_role1\", \"r2\": \"test_role2\"}\n self.assertEqual(expected, ctx.context[\"roles\"])\n\n @mock.patch(\"%s.osclients\" % CTX)\n def test_add_role_which_does_not_exist(self, mock_osclients):\n fc = fakes.FakeClients()\n mock_osclients.Clients.return_value = fc\n self.create_default_roles_and_patch_add_remove_functions(fc)\n\n ctx = roles.RoleGenerator(self.context)\n ctx.context[\"users\"] = [{\"id\": \"u1\", \"tenant_id\": \"t1\"},\n {\"id\": \"u2\", \"tenant_id\": \"t2\"}]\n ctx.config = [\"unknown_role\"]\n ctx.credential = mock.MagicMock()\n ex = self.assertRaises(exceptions.NotFoundException,\n ctx._get_role_object, \"unknown_role\")\n\n expected = (\"The resource can not be found: There is no role \"\n \"with name `unknown_role`\")\n self.assertEqual(expected, str(ex))\n\n @mock.patch(\"%s.osclients\" % CTX)\n def test_remove_role(self, mock_osclients):\n fc = fakes.FakeClients()\n mock_osclients.Clients.return_value = fc\n self.create_default_roles_and_patch_add_remove_functions(fc)\n\n ctx = roles.RoleGenerator(self.context)\n ctx.context[\"roles\"] = {\"r1\": \"test_role1\",\n \"r2\": \"test_role2\"}\n ctx.context[\"users\"] = [{\"id\": \"u1\", \"tenant_id\": \"t1\",\n \"assigned_roles\": [\"r1\", \"r2\"]},\n {\"id\": \"u2\", \"tenant_id\": \"t2\",\n \"assigned_roles\": [\"r1\", \"r2\"]}]\n ctx.credential = mock.MagicMock()\n ctx.cleanup()\n calls = [\n mock.call(user=\"u1\", role=\"r1\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r1\", tenant=\"t2\"),\n mock.call(user=\"u1\", role=\"r2\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r2\", tenant=\"t2\")\n ]\n\n fc.keystone().roles.remove_user_role.assert_has_calls(calls,\n any_order=True)\n\n @mock.patch(\"%s.osclients\" % CTX)\n def test_setup_and_cleanup(self, mock_osclients):\n fc = fakes.FakeClients()\n mock_osclients.Clients.return_value = fc\n self.create_default_roles_and_patch_add_remove_functions(fc)\n\n def _get_user_role_ids_side_effect(user_id, project_id):\n return [\"r1\", \"r2\"] if user_id == \"u3\" else []\n\n with roles.RoleGenerator(self.context) as ctx:\n ctx.context[\"users\"] = [{\"id\": \"u1\", \"tenant_id\": \"t1\"},\n {\"id\": \"u2\", \"tenant_id\": \"t2\"},\n {\"id\": \"u3\", \"tenant_id\": \"t3\"}]\n\n ctx._get_user_role_ids = mock.MagicMock()\n ctx._get_user_role_ids.side_effect = _get_user_role_ids_side_effect\n ctx.setup()\n ctx.credential = mock.MagicMock()\n calls = [\n mock.call(user=\"u1\", role=\"r1\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r1\", tenant=\"t2\"),\n mock.call(user=\"u1\", role=\"r2\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r2\", tenant=\"t2\"),\n ]\n fc.keystone().roles.add_user_role.assert_has_calls(calls,\n any_order=True)\n self.assertEqual(\n 4, fc.keystone().roles.add_user_role.call_count)\n self.assertEqual(\n 0, fc.keystone().roles.remove_user_role.call_count)\n self.assertEqual(2, len(ctx.context[\"roles\"]))\n self.assertEqual(2, len(fc.keystone().roles.list()))\n\n # Cleanup (called by context manager)\n self.assertEqual(2, len(fc.keystone().roles.list()))\n self.assertEqual(4, fc.keystone().roles.add_user_role.call_count)\n self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count)\n calls = [\n mock.call(user=\"u1\", role=\"r1\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r1\", tenant=\"t2\"),\n mock.call(user=\"u1\", role=\"r2\", tenant=\"t1\"),\n mock.call(user=\"u2\", role=\"r2\", tenant=\"t2\")\n ]\n fc.keystone().roles.remove_user_role.assert_has_calls(calls,\n any_order=True)\n" }, { "alpha_fraction": 0.5848671793937683, "alphanum_fraction": 0.5898428559303284, "avg_line_length": 44.38200378417969, "blob_id": "b4604b81f98d2de8478f21830c1469df3ce6ca52", "content_id": "f98b2ee784cffc272002873826a1fcbb61900277", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53460, "license_type": "permissive", "max_line_length": 78, "num_lines": 1178, "path": "/tests/unit/task/scenarios/nova/test_servers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.nova import servers\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nNOVA_SERVERS_MODULE = \"rally_openstack.task.scenarios.nova.servers\"\nNOVA_SERVERS = NOVA_SERVERS_MODULE + \".NovaServers\"\n\n\[email protected]\nclass NovaServersTestCase(test.ScenarioTestCase):\n\n @ddt.data((\"rescue_unrescue\", [\"_rescue_server\", \"_unrescue_server\"], 1),\n (\"stop_start\", [\"_stop_server\", \"_start_server\"], 2),\n (\"pause_unpause\", [\"_pause_server\", \"_unpause_server\"], 3),\n (\"suspend_resume\", [\"_suspend_server\", \"_resume_server\"], 4),\n (\"lock_unlock\", [\"_lock_server\", \"_unlock_server\"], 5),\n (\"shelve_unshelve\", [\"_shelve_server\", \"_unshelve_server\"], 6))\n @ddt.unpack\n def test_action_pair(self, action_pair, methods, nof_calls):\n actions = [{action_pair: nof_calls}]\n fake_server = mock.MagicMock()\n scenario = servers.BootAndBounceServer(self.context)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n for method in methods:\n setattr(scenario, method, mock.MagicMock())\n\n scenario.run(\"img\", 1, actions=actions)\n\n scenario._boot_server.assert_called_once_with(\"img\", 1)\n server_calls = []\n for i in range(nof_calls):\n server_calls.append(mock.call(fake_server))\n for method in methods:\n mocked_method = getattr(scenario, method)\n self.assertEqual(nof_calls, mocked_method.call_count,\n \"%s not called %d times\" % (method, nof_calls))\n mocked_method.assert_has_calls(server_calls)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_multiple_bounce_actions(self):\n actions = [{\"hard_reboot\": 5}, {\"stop_start\": 8},\n {\"rescue_unrescue\": 3}, {\"pause_unpause\": 2},\n {\"suspend_resume\": 4}, {\"lock_unlock\": 6},\n {\"shelve_unshelve\": 7}]\n fake_server = mock.MagicMock()\n scenario = servers.BootAndBounceServer(self.context)\n\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n scenario._reboot_server = mock.MagicMock()\n scenario._stop_and_start_server = mock.MagicMock()\n scenario._rescue_and_unrescue_server = mock.MagicMock()\n scenario._pause_and_unpause_server = mock.MagicMock()\n scenario._suspend_and_resume_server = mock.MagicMock()\n scenario._lock_and_unlock_server = mock.MagicMock()\n scenario._shelve_and_unshelve_server = mock.MagicMock()\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n\n scenario.run(\"img\", 1, actions=actions)\n scenario._boot_server.assert_called_once_with(\"img\", 1)\n server_calls = []\n for i in range(5):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(5, scenario._reboot_server.call_count,\n \"Reboot not called 5 times\")\n scenario._reboot_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(8):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(8, scenario._stop_and_start_server.call_count,\n \"Stop/Start not called 8 times\")\n scenario._stop_and_start_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(3):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(3, scenario._rescue_and_unrescue_server.call_count,\n \"Rescue/Unrescue not called 3 times\")\n scenario._rescue_and_unrescue_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(2):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(2, scenario._pause_and_unpause_server.call_count,\n \"Pause/Unpause not called 2 times\")\n scenario._pause_and_unpause_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(4):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(4, scenario._suspend_and_resume_server.call_count,\n \"Suspend/Resume not called 4 times\")\n scenario._suspend_and_resume_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(6):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(6, scenario._lock_and_unlock_server.call_count,\n \"Lock/Unlock not called 6 times\")\n scenario._lock_and_unlock_server.assert_has_calls(server_calls)\n server_calls = []\n for i in range(7):\n server_calls.append(mock.call(fake_server))\n self.assertEqual(7, scenario._shelve_and_unshelve_server.call_count,\n \"Shelve/Unshelve not called 7 times\")\n scenario._shelve_and_unshelve_server.assert_has_calls(server_calls)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_boot_lock_unlock_and_delete(self):\n server = fakes.FakeServer()\n image = fakes.FakeImage()\n flavor = fakes.FakeFlavor()\n\n scenario = servers.BootLockUnlockAndDelete(self.context)\n scenario._boot_server = mock.Mock(return_value=server)\n scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock())\n scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock())\n scenario._delete_server = mock.Mock(\n side_effect=lambda s, **kwargs:\n self.assertFalse(getattr(s, \"OS-EXT-STS:locked\", False)))\n\n scenario.run(image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n fakearg=\"fakearg\")\n scenario._lock_server.assert_called_once_with(server)\n scenario._unlock_server.assert_called_once_with(server)\n scenario._delete_server.assert_called_once_with(server, force=False)\n\n @ddt.data(\"hard_reboot\", \"soft_reboot\", \"stop_start\",\n \"rescue_unrescue\", \"pause_unpause\", \"suspend_resume\",\n \"lock_unlock\", \"shelve_unshelve\")\n def test_validate_actions(self, action):\n scenario = servers.BootAndBounceServer(self.context)\n\n self.assertRaises(rally_exceptions.InvalidConfigException,\n scenario.run,\n 1, 1, actions=[{action: \"no\"}])\n self.assertRaises(rally_exceptions.InvalidConfigException,\n scenario.run,\n 1, 1, actions=[{action: -1}])\n self.assertRaises(rally_exceptions.InvalidConfigException,\n scenario.run,\n 1, 1, actions=[{action: 0}])\n\n def test_validate_actions_additional(self):\n scenario = servers.BootAndBounceServer(self.context)\n\n self.assertRaises(rally_exceptions.InvalidConfigException,\n scenario.run,\n 1, 1, actions=[{\"not_existing_action\": \"no\"}])\n # NOTE: next should fail because actions parameter is a just a\n # dictionary, not an array of dictionaries\n self.assertRaises(rally_exceptions.InvalidConfigException,\n scenario.run,\n 1, 1, actions={\"hard_reboot\": 1})\n\n def _verify_reboot(self, soft=True):\n actions = [{\"soft_reboot\" if soft else \"hard_reboot\": 5}]\n fake_server = mock.MagicMock()\n scenario = servers.BootAndBounceServer(self.context)\n\n scenario._reboot_server = mock.MagicMock()\n scenario._soft_reboot_server = mock.MagicMock()\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n\n scenario.run(\"img\", 1, actions=actions)\n\n scenario._boot_server.assert_called_once_with(\"img\", 1)\n server_calls = []\n for i in range(5):\n server_calls.append(mock.call(fake_server))\n if soft:\n self.assertEqual(5, scenario._soft_reboot_server.call_count,\n \"Reboot not called 5 times\")\n scenario._soft_reboot_server.assert_has_calls(server_calls)\n else:\n self.assertEqual(5, scenario._reboot_server.call_count,\n \"Reboot not called 5 times\")\n scenario._reboot_server.assert_has_calls(server_calls)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_boot_soft_reboot(self):\n self._verify_reboot(soft=True)\n\n def test_boot_hard_reboot(self):\n self._verify_reboot(soft=False)\n\n def test_boot_and_delete_server(self):\n fake_server = object()\n\n scenario = servers.BootAndDeleteServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n scenario.sleep_between = mock.MagicMock()\n\n scenario.run(\"img\", 0, 10, 20, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\")\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_boot_and_delete_multiple_servers(self):\n scenario = servers.BootAndDeleteMultipleServers(self.context)\n scenario._boot_servers = mock.Mock()\n scenario._delete_servers = mock.Mock()\n scenario.sleep_between = mock.Mock()\n\n scenario.run(\"img\", \"flavor\", count=15, min_sleep=10,\n max_sleep=20, fakearg=\"fakearg\")\n\n scenario._boot_servers.assert_called_once_with(\"img\", \"flavor\", 1,\n instances_amount=15,\n fakearg=\"fakearg\")\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._delete_servers.assert_called_once_with(\n scenario._boot_servers.return_value, force=False)\n\n def test_boot_and_list_server(self):\n scenario = servers.BootAndListServer(self.context)\n# scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n\n img_name = \"img\"\n flavor_uuid = 0\n details = True\n fake_server_name = mock.MagicMock()\n scenario._boot_server = mock.MagicMock()\n scenario._list_servers = mock.MagicMock()\n scenario._list_servers.return_value = [mock.MagicMock(),\n fake_server_name,\n mock.MagicMock()]\n\n # Positive case\n scenario._boot_server.return_value = fake_server_name\n scenario.run(img_name, flavor_uuid, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(img_name, flavor_uuid,\n fakearg=\"fakearg\")\n scenario._list_servers.assert_called_once_with(details)\n\n # Negative case1: server isn't created\n scenario._boot_server.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n img_name, flavor_uuid, fakearg=\"fakearg\")\n scenario._boot_server.assert_called_with(img_name, flavor_uuid,\n fakearg=\"fakearg\")\n\n # Negative case2: server not in the list of available servers\n scenario._boot_server.return_value = mock.MagicMock()\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n img_name, flavor_uuid, fakearg=\"fakearg\")\n scenario._boot_server.assert_called_with(img_name, flavor_uuid,\n fakearg=\"fakearg\")\n scenario._list_servers.assert_called_with(details)\n\n def test_suspend_and_resume_server(self):\n fake_server = object()\n\n scenario = servers.SuspendAndResumeServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._suspend_server = mock.MagicMock()\n scenario._resume_server = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n scenario.run(\"img\", 0, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\")\n\n scenario._suspend_server.assert_called_once_with(fake_server)\n scenario._resume_server.assert_called_once_with(fake_server)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_pause_and_unpause_server(self):\n fake_server = object()\n\n scenario = servers.PauseAndUnpauseServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._pause_server = mock.MagicMock()\n scenario._unpause_server = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n scenario.run(\"img\", 0, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\")\n\n scenario._pause_server.assert_called_once_with(fake_server)\n scenario._unpause_server.assert_called_once_with(fake_server)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_shelve_and_unshelve_server(self):\n fake_server = mock.MagicMock()\n scenario = servers.ShelveAndUnshelveServer(self.context)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._shelve_server = mock.MagicMock()\n scenario._unshelve_server = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n scenario.run(\"img\", 0, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\")\n\n scenario._shelve_server.assert_called_once_with(fake_server)\n scenario._unshelve_server.assert_called_once_with(fake_server)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_list_servers(self):\n scenario = servers.ListServers(self.context)\n scenario._list_servers = mock.MagicMock()\n scenario.run(True)\n scenario._list_servers.assert_called_once_with(True)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_from_volume(self, mock_block_storage):\n fake_server = object()\n scenario = servers.BootServerFromVolume(\n self.context, clients=mock.Mock())\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n\n fake_volume = fakes.FakeVolumeManager().create()\n fake_volume.id = \"volume_id\"\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario.run(\"img\", 0, 5, volume_type=None,\n auto_assign_nic=False, fakearg=\"f\")\n\n cinder.create_volume.assert_called_once_with(5, imageRef=\"img\",\n volume_type=None)\n scenario._boot_server.assert_called_once_with(\n None, 0, auto_assign_nic=False,\n block_device_mapping={\"vda\": \"volume_id:::0\"},\n fakearg=\"f\")\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_from_volume_and_delete(self, mock_block_storage):\n fake_server = object()\n scenario = servers.BootServerFromVolumeAndDelete(\n self.context, clients=mock.Mock())\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario.sleep_between = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n fake_volume = fakes.FakeVolumeManager().create()\n fake_volume.id = \"volume_id\"\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario.run(\"img\", 0, 5, None, 10, 20, fakearg=\"f\")\n\n cinder.create_volume.assert_called_once_with(5, imageRef=\"img\",\n volume_type=None)\n scenario._boot_server.assert_called_once_with(\n None, 0,\n block_device_mapping={\"vda\": \"volume_id:::0\"},\n fakearg=\"f\")\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def _prepare_boot(self, nic=None, assert_nic=False):\n fake_server = mock.MagicMock()\n\n scenario = servers.BootServer(self.context)\n\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n\n kwargs = {\"fakearg\": \"f\"}\n expected_kwargs = {\"fakearg\": \"f\"}\n\n assert_nic = nic or assert_nic\n if nic:\n kwargs[\"nics\"] = nic\n if assert_nic:\n self.clients(\"nova\").networks.create(\"net-1\")\n expected_kwargs[\"nics\"] = nic or [{\"net-id\": \"net-2\"}]\n\n return scenario, kwargs, expected_kwargs\n\n def _verify_boot_server(self, nic=None, assert_nic=False):\n scenario, kwargs, expected_kwargs = self._prepare_boot(\n nic=nic, assert_nic=assert_nic)\n\n scenario.run(\"img\", 0, **kwargs)\n scenario._boot_server.assert_called_once_with(\n \"img\", 0, auto_assign_nic=False, **expected_kwargs)\n\n def test_boot_server_no_nics(self):\n self._verify_boot_server(nic=None, assert_nic=False)\n\n def test_boot_server_with_nic(self):\n self._verify_boot_server(nic=[{\"net-id\": \"net-1\"}], assert_nic=True)\n\n def test_snapshot_server(self):\n fake_server = object()\n fake_image = fakes.FakeImageManager()._create()\n fake_image.id = \"image_id\"\n\n scenario = servers.SnapshotServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._create_image = mock.MagicMock(return_value=fake_image)\n scenario._delete_server = mock.MagicMock()\n scenario._delete_image = mock.MagicMock()\n\n scenario.run(\"i\", 0, fakearg=2)\n\n scenario._boot_server.assert_has_calls([\n mock.call(\"i\", 0, fakearg=2),\n mock.call(\"image_id\", 0, fakearg=2)])\n scenario._create_image.assert_called_once_with(fake_server)\n scenario._delete_server.assert_has_calls([\n mock.call(fake_server, force=False),\n mock.call(fake_server, force=False)])\n scenario._delete_image.assert_called_once_with(fake_image)\n\n def _test_resize(self, confirm=False):\n fake_server = object()\n fake_image = fakes.FakeImageManager()._create()\n fake_image.id = \"image_id\"\n flavor = mock.MagicMock()\n to_flavor = mock.MagicMock()\n\n scenario = servers.ResizeServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._resize_confirm = mock.MagicMock()\n scenario._resize_revert = mock.MagicMock()\n scenario._resize = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n kwargs = {\"confirm\": confirm}\n scenario.run(fake_image, flavor, to_flavor, **kwargs)\n\n scenario._resize.assert_called_once_with(fake_server, to_flavor)\n\n if confirm:\n scenario._resize_confirm.assert_called_once_with(fake_server)\n else:\n scenario._resize_revert.assert_called_once_with(fake_server)\n\n def test_resize_with_confirm(self):\n self._test_resize(confirm=True)\n\n def test_resize_with_revert(self):\n self._test_resize(confirm=False)\n\n @ddt.data({\"confirm\": True},\n {\"confirm\": False})\n @ddt.unpack\n def test_resize_shoutoff_server(self, confirm=False):\n fake_server = object()\n flavor = mock.MagicMock()\n to_flavor = mock.MagicMock()\n\n scenario = servers.ResizeShutoffServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._stop_server = mock.MagicMock()\n scenario._resize_confirm = mock.MagicMock()\n scenario._resize_revert = mock.MagicMock()\n scenario._resize = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n scenario.run(\"img\", flavor, to_flavor, confirm=confirm)\n\n scenario._boot_server.assert_called_once_with(\"img\", flavor)\n scenario._stop_server.assert_called_once_with(fake_server)\n scenario._resize.assert_called_once_with(fake_server, to_flavor)\n\n if confirm:\n scenario._resize_confirm.assert_called_once_with(fake_server,\n \"SHUTOFF\")\n else:\n scenario._resize_revert.assert_called_once_with(fake_server,\n \"SHUTOFF\")\n\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n @ddt.data({\"confirm\": True, \"do_delete\": True},\n {\"confirm\": False, \"do_delete\": True})\n @ddt.unpack\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_attach_created_volume_and_resize(\n self, mock_block_storage, confirm=False, do_delete=False):\n fake_volume = mock.MagicMock()\n fake_server = mock.MagicMock()\n flavor = mock.MagicMock()\n to_flavor = mock.MagicMock()\n fake_attachment = mock.MagicMock()\n\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario = servers.BootServerAttachCreatedVolumeAndResize(\n self.context, clients=mock.Mock())\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)\n scenario._resize_confirm = mock.MagicMock()\n scenario._resize_revert = mock.MagicMock()\n scenario._resize = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n scenario.sleep_between = mock.MagicMock()\n\n volume_size = 10\n scenario.run(\"img\", flavor, to_flavor, volume_size, min_sleep=10,\n max_sleep=20, confirm=confirm, do_delete=do_delete)\n\n scenario._boot_server.assert_called_once_with(\"img\", flavor)\n cinder.create_volume.assert_called_once_with(volume_size)\n scenario._attach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario._detach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._resize.assert_called_once_with(fake_server, to_flavor)\n\n if confirm:\n scenario._resize_confirm.assert_called_once_with(fake_server)\n else:\n scenario._resize_revert.assert_called_once_with(fake_server)\n\n if do_delete:\n scenario._detach_volume.assert_called_once_with(fake_server,\n fake_volume)\n cinder.delete_volume.assert_called_once_with(fake_volume)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_attach_created_volume_and_extend(\n self, mock_block_storage, do_delete=False):\n fake_volume = mock.MagicMock()\n fake_server = mock.MagicMock()\n flavor = mock.MagicMock()\n fake_attachment = mock.MagicMock()\n\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario = servers.BootServerAttachCreatedVolumeAndExtend(\n self.context, clients=mock.Mock())\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)\n scenario._detach_volume = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n scenario.sleep_between = mock.MagicMock()\n\n volume_size = 10\n new_volume_size = 20\n scenario.run(\"img\", flavor, volume_size, new_volume_size,\n min_sleep=10, max_sleep=20, do_delete=do_delete)\n\n scenario._boot_server.assert_called_once_with(\"img\", flavor)\n cinder.create_volume.assert_called_once_with(volume_size)\n scenario._attach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario.sleep_between.assert_called_once_with(10, 20)\n cinder.extend_volume.assert_called_once_with(\n fake_volume, new_size=new_volume_size)\n\n if do_delete:\n scenario._detach_volume.assert_called_once_with(fake_server,\n fake_volume)\n cinder.delete_volume.assert_called_once_with(fake_volume)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_list_attachments(self, mock_block_storage):\n mock_volume_service = mock_block_storage.return_value\n fake_volume = mock.MagicMock()\n fake_server = mock.MagicMock()\n flavor = mock.MagicMock()\n fake_attachment = mock.MagicMock()\n list_attachments = [mock.MagicMock(),\n fake_attachment,\n mock.MagicMock()]\n context = self.context\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\",\n \"volumes\": [{\"id\": \"uuid\", \"size\": 1}],\n \"servers\": [1]}})\n scenario = servers.BootServerAttachVolumeAndListAttachments(\n context)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._attach_volume = mock.MagicMock()\n scenario._list_attachments = mock.MagicMock()\n mock_volume_service.create_volume.return_value = fake_volume\n scenario._list_attachments.return_value = list_attachments\n\n img_name = \"img\"\n volume_size = 10\n volume_num = 1\n\n scenario._attach_volume.return_value = fake_attachment\n scenario.run(img_name, flavor, volume_size, volume_num)\n\n scenario._boot_server.assert_called_once_with(img_name, flavor)\n mock_volume_service.create_volume.assert_called_once_with(volume_size)\n scenario._attach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario._list_attachments.assert_called_once_with(fake_server.id)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_list_attachments_fails(self, mock_block_storage):\n mock_volume_service = mock_block_storage.return_value\n fake_volume = mock.MagicMock()\n fake_server = mock.MagicMock()\n flavor = mock.MagicMock()\n fake_attachment = mock.MagicMock()\n list_attachments = [mock.MagicMock(),\n mock.MagicMock(),\n mock.MagicMock()]\n\n context = self.context\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\",\n \"volumes\": [{\"id\": \"uuid\", \"size\": 1}],\n \"servers\": [1]}})\n scenario = servers.BootServerAttachVolumeAndListAttachments(\n context)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n mock_volume_service.create_volume.return_value = fake_volume\n scenario._attach_volume = mock.MagicMock()\n scenario._list_attachments = mock.MagicMock()\n scenario._attach_volume.return_value = fake_attachment\n scenario._list_attachments.return_value = list_attachments\n\n img_name = \"img\"\n volume_size = 10\n\n # Negative case: attachment not included into list of\n # available attachments\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n img_name, flavor, volume_size)\n\n scenario._boot_server.assert_called_with(img_name, flavor)\n mock_volume_service.create_volume.assert_called_with(volume_size)\n scenario._attach_volume.assert_called_with(fake_server,\n fake_volume)\n scenario._list_attachments.assert_called_with(fake_server.id)\n\n @ddt.data({\"confirm\": True, \"do_delete\": True},\n {\"confirm\": False, \"do_delete\": True})\n @ddt.unpack\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_from_volume_and_resize(\n self, mock_block_storage, confirm=False, do_delete=False):\n fake_server = object()\n flavor = mock.MagicMock()\n to_flavor = mock.MagicMock()\n scenario = servers.BootServerFromVolumeAndResize(self.context,\n clients=mock.Mock())\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._resize_confirm = mock.MagicMock()\n scenario._resize_revert = mock.MagicMock()\n scenario._resize = mock.MagicMock()\n scenario.sleep_between = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n fake_volume = fakes.FakeVolumeManager().create()\n fake_volume.id = \"volume_id\"\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n volume_size = 10\n scenario.run(\"img\", flavor, to_flavor, volume_size, min_sleep=10,\n max_sleep=20, confirm=confirm, do_delete=do_delete)\n\n cinder.create_volume.assert_called_once_with(10, imageRef=\"img\")\n scenario._boot_server.assert_called_once_with(\n None, flavor,\n block_device_mapping={\"vda\": \"volume_id:::0\"})\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._resize.assert_called_once_with(fake_server, to_flavor)\n\n if confirm:\n scenario._resize_confirm.assert_called_once_with(fake_server)\n else:\n scenario._resize_revert.assert_called_once_with(fake_server)\n\n if do_delete:\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n def test_boot_and_live_migrate_server(self):\n fake_server = mock.MagicMock()\n\n scenario = servers.BootAndLiveMigrateServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario.sleep_between = mock.MagicMock()\n scenario._live_migrate = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n scenario.run(\"img\", 0, min_sleep=10, max_sleep=20, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\")\n\n scenario.sleep_between.assert_called_once_with(10, 20)\n\n scenario._live_migrate.assert_called_once_with(fake_server,\n False, False)\n scenario._delete_server.assert_called_once_with(fake_server)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_from_volume_and_live_migrate(self,\n mock_block_storage):\n fake_server = mock.MagicMock()\n\n scenario = servers.BootServerFromVolumeAndLiveMigrate(\n self.context, clients=mock.Mock())\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario.sleep_between = mock.MagicMock()\n scenario._live_migrate = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n fake_volume = fakes.FakeVolumeManager().create()\n fake_volume.id = \"volume_id\"\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario.run(\"img\", 0, 5, volume_type=None,\n min_sleep=10, max_sleep=20, fakearg=\"f\")\n\n cinder.create_volume.assert_called_once_with(5, imageRef=\"img\",\n volume_type=None)\n\n scenario._boot_server.assert_called_once_with(\n None, 0,\n block_device_mapping={\"vda\": \"volume_id:::0\"},\n fakearg=\"f\")\n\n scenario.sleep_between.assert_called_once_with(10, 20)\n\n scenario._live_migrate.assert_called_once_with(fake_server,\n False, False)\n scenario._delete_server.assert_called_once_with(fake_server,\n force=False)\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_attach_created_volume_and_live_migrate(\n self, mock_block_storage):\n fake_volume = mock.MagicMock()\n fake_server = mock.MagicMock()\n fake_attachment = mock.MagicMock()\n\n clients = mock.Mock()\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n\n scenario = servers.BootServerAttachCreatedVolumeAndLiveMigrate(\n self.context, clients=clients)\n\n scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)\n scenario._detach_volume = mock.MagicMock()\n\n scenario.sleep_between = mock.MagicMock()\n\n scenario._live_migrate = mock.MagicMock()\n\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n\n image = \"img\"\n flavor = \"flavor\"\n size = 5\n boot_kwargs = {\"some_var\": \"asd\"}\n scenario.run(image, flavor, size, min_sleep=10, max_sleep=20,\n boot_server_kwargs=boot_kwargs)\n scenario._boot_server.assert_called_once_with(image, flavor,\n **boot_kwargs)\n cinder.create_volume.assert_called_once_with(size)\n scenario._attach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario._detach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario.sleep_between.assert_called_once_with(10, 20)\n scenario._live_migrate.assert_called_once_with(fake_server,\n False, False)\n\n cinder.delete_volume.assert_called_once_with(fake_volume)\n scenario._delete_server.assert_called_once_with(fake_server)\n\n def _test_boot_and_migrate_server(self, confirm=False):\n fake_server = mock.MagicMock()\n\n scenario = servers.BootAndMigrateServer(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._migrate = mock.MagicMock()\n scenario._resize_confirm = mock.MagicMock()\n scenario._resize_revert = mock.MagicMock()\n scenario._delete_server = mock.MagicMock()\n\n kwargs = {\"confirm\": confirm}\n scenario.run(\"img\", 0, fakearg=\"fakearg\", **kwargs)\n\n scenario._boot_server.assert_called_once_with(\"img\", 0,\n fakearg=\"fakearg\",\n confirm=confirm)\n\n scenario._migrate.assert_called_once_with(fake_server)\n\n if confirm:\n scenario._resize_confirm.assert_called_once_with(fake_server,\n status=\"ACTIVE\")\n else:\n scenario._resize_revert.assert_called_once_with(fake_server,\n status=\"ACTIVE\")\n\n scenario._delete_server.assert_called_once_with(fake_server)\n\n def test_boot_and_migrate_server_with_confirm(self):\n self._test_boot_and_migrate_server(confirm=True)\n\n def test_boot_and_migrate_server_with_revert(self):\n self._test_boot_and_migrate_server(confirm=False)\n\n def test_boot_and_rebuild_server(self):\n scenario = servers.BootAndRebuildServer(self.context)\n scenario._boot_server = mock.Mock()\n scenario._rebuild_server = mock.Mock()\n scenario._delete_server = mock.Mock()\n\n from_image = \"img1\"\n to_image = \"img2\"\n flavor = \"flavor\"\n scenario.run(from_image, to_image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(from_image, flavor,\n fakearg=\"fakearg\")\n server = scenario._boot_server.return_value\n scenario._rebuild_server.assert_called_once_with(server, to_image)\n scenario._delete_server.assert_called_once_with(server)\n\n def test_boot_and_show_server(self):\n server = fakes.FakeServer()\n image = fakes.FakeImage()\n flavor = fakes.FakeFlavor()\n\n scenario = servers.BootAndShowServer(self.context)\n scenario._boot_server = mock.MagicMock(return_value=server)\n scenario._show_server = mock.MagicMock()\n\n scenario.run(image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n fakearg=\"fakearg\")\n scenario._show_server.assert_called_once_with(server)\n\n def test_boot_server_and_list_interfaces(self):\n server = fakes.FakeServer()\n image = fakes.FakeImage()\n flavor = fakes.FakeFlavor()\n\n scenario = servers.BootServerAndListInterfaces(self.context)\n scenario._boot_server = mock.MagicMock(return_value=server)\n scenario._list_interfaces = mock.MagicMock()\n\n scenario.run(image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n fakearg=\"fakearg\")\n scenario._list_interfaces.assert_called_once_with(server)\n\n @ddt.data({\"length\": None},\n {\"length\": 10})\n @ddt.unpack\n def test_boot_and_get_console_server(self, length):\n server = fakes.FakeServer()\n image = fakes.FakeImage()\n flavor = fakes.FakeFlavor()\n kwargs = {\"fakearg\": \"fakearg\"}\n\n scenario = servers.BootAndGetConsoleOutput(self.context)\n scenario._boot_server = mock.MagicMock(return_value=server)\n scenario._get_server_console_output = mock.MagicMock()\n\n scenario.run(image, flavor, length, **kwargs)\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n **kwargs)\n scenario._get_server_console_output.assert_called_once_with(server,\n length)\n\n def test_boot_and_get_console_url(self):\n server = fakes.FakeServer()\n image = fakes.FakeImage()\n flavor = fakes.FakeFlavor()\n kwargs = {\"fakearg\": \"fakearg\"}\n\n scenario = servers.BootAndGetConsoleUrl(self.context)\n scenario._boot_server = mock.MagicMock(return_value=server)\n scenario._get_console_url_server = mock.MagicMock()\n\n scenario.run(image, flavor, console_type=\"novnc\", **kwargs)\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n **kwargs)\n scenario._get_console_url_server.assert_called_once_with(\n server, \"novnc\")\n\n def test_boot_and_associate_floating_ip(self):\n clients = mock.MagicMock(credential=mock.MagicMock(api_info={}))\n neutronclient = clients.neutron.return_value\n floatingip = \"floatingip\"\n neutronclient.create_floatingip.return_value = {\n \"floatingip\": floatingip}\n\n scenario = servers.BootAndAssociateFloatingIp(self.context,\n clients=clients)\n server = mock.Mock()\n scenario._boot_server = mock.Mock(return_value=server)\n scenario._associate_floating_ip = mock.Mock()\n\n image = \"img\"\n flavor = \"flavor\"\n scenario.run(image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n fakearg=\"fakearg\")\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": mock.ANY}\n )\n scenario._associate_floating_ip.assert_called_once_with(\n server, floatingip)\n\n # check ext_network\n neutronclient.list_networks.return_value = {\n \"networks\": [\n {\"id\": \"id1\", \"name\": \"net1\", \"router:external\": True},\n {\"id\": \"id2\", \"name\": \"net2\", \"router:external\": True},\n {\"id\": \"id3\", \"name\": \"net3\", \"router:external\": True},\n ]\n }\n neutronclient.create_floatingip.reset_mock()\n\n # case 1: new argument is used\n scenario.run(image, flavor, floating_network=\"net3\")\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 2: new argument is transmitted with an old one\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor, floating_network=\"net3\",\n create_floating_ip_args={\"ext_network\": \"net2\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 3: new argument is transmitted with an semi-old one\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor, floating_network=\"net3\",\n create_floating_ip_args={\"floating_network\": \"net1\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 4: only old argument is transmitted\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor,\n create_floating_ip_args={\"ext_network\": \"net2\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id2\"}}\n )\n # case 5: only semi-old argument is transmitted\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor,\n create_floating_ip_args={\"floating_network\": \"net1\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id1\"}}\n )\n\n def test_boot_server_associate_and_dissociate_floating_ip(self):\n clients = mock.MagicMock(credential=mock.MagicMock(api_info={}))\n neutronclient = clients.neutron.return_value\n floatingip = \"floatingip\"\n neutronclient.create_floatingip.return_value = {\n \"floatingip\": floatingip}\n\n scenario = servers.BootServerAssociateAndDissociateFloatingIP(\n self.context, clients=clients)\n server = mock.Mock()\n scenario._boot_server = mock.Mock(return_value=server)\n scenario._associate_floating_ip = mock.Mock()\n scenario._dissociate_floating_ip = mock.Mock()\n\n image = \"img\"\n flavor = \"flavor\"\n scenario.run(image, flavor, fakearg=\"fakearg\")\n\n scenario._boot_server.assert_called_once_with(image, flavor,\n fakearg=\"fakearg\")\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": mock.ANY}\n )\n scenario._associate_floating_ip.assert_called_once_with(\n server, floatingip)\n scenario._dissociate_floating_ip.assert_called_once_with(\n server, floatingip)\n\n # check ext_network\n neutronclient.list_networks.return_value = {\n \"networks\": [\n {\"id\": \"id1\", \"name\": \"net1\", \"router:external\": True},\n {\"id\": \"id2\", \"name\": \"net2\", \"router:external\": True},\n {\"id\": \"id3\", \"name\": \"net3\", \"router:external\": True},\n ]\n }\n neutronclient.create_floatingip.reset_mock()\n\n # case 1: new argument is used\n scenario.run(image, flavor, floating_network=\"net3\")\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 2: new argument is transmitted with an old one\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor, floating_network=\"net3\",\n create_floating_ip_args={\"ext_network\": \"net2\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 3: new argument is transmitted with an semi-old one\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor, floating_network=\"net3\",\n create_floating_ip_args={\"floating_network\": \"net1\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id3\"}}\n )\n # case 4: only old argument is transmitted\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor,\n create_floating_ip_args={\"ext_network\": \"net2\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id2\"}}\n )\n # case 5: only semi-old argument is transmitted\n neutronclient.create_floatingip.reset_mock()\n scenario.run(image, flavor,\n create_floating_ip_args={\"floating_network\": \"net1\"})\n neutronclient.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": \"id1\"}}\n )\n\n def test_boot_and_update_server(self):\n scenario = servers.BootAndUpdateServer(self.context)\n scenario._boot_server = mock.Mock()\n scenario._update_server = mock.Mock()\n\n scenario.run(\"img\", \"flavor\", \"desp\", fakearg=\"fakearg\")\n scenario._boot_server.assert_called_once_with(\"img\", \"flavor\",\n fakearg=\"fakearg\")\n scenario._update_server.assert_called_once_with(\n scenario._boot_server.return_value, \"desp\")\n\n def test_boot_server_and_attach_interface(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"10.1.0.0/16\"\n boot_server_args = {}\n net = mock.MagicMock()\n subnet = mock.MagicMock()\n server = mock.MagicMock()\n\n scenario = servers.BootServerAndAttachInterface(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n scenario._create_subnet = mock.Mock(return_value=subnet)\n scenario._boot_server = mock.Mock(return_value=server)\n scenario._attach_interface = mock.Mock()\n\n scenario.run(\"image\", \"flavor\",\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n boot_server_args=boot_server_args)\n\n scenario._get_or_create_network.assert_called_once_with(\n network_create_args)\n scenario._create_subnet.assert_called_once_with(net,\n subnet_create_args,\n subnet_cidr_start)\n scenario._boot_server.assert_called_once_with(\"image\", \"flavor\",\n **boot_server_args)\n scenario._attach_interface.assert_called_once_with(\n server, net_id=net[\"network\"][\"id\"])\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test_boot_server_from_volume_snapshot(self, mock_block_storage):\n fake_volume = mock.MagicMock(id=\"volume_id\")\n fake_snapshot = mock.MagicMock(id=\"snapshot_id\")\n\n cinder = mock_block_storage.return_value\n cinder.create_volume.return_value = fake_volume\n cinder.create_snapshot.return_value = fake_snapshot\n\n scenario = servers.BootServerFromVolumeSnapshot(self.context,\n clients=mock.Mock())\n scenario._boot_server = mock.MagicMock()\n\n scenario.run(\"img\", \"flavor\", 1, volume_type=None,\n auto_assign_nic=False, fakearg=\"f\")\n\n cinder.create_volume.assert_called_once_with(1, imageRef=\"img\",\n volume_type=None)\n cinder.create_snapshot.assert_called_once_with(\"volume_id\",\n force=False)\n scenario._boot_server.assert_called_once_with(\n None, \"flavor\", auto_assign_nic=False,\n block_device_mapping={\"vda\": \"snapshot_id:snap::1\"},\n fakearg=\"f\")\n" }, { "alpha_fraction": 0.6357440948486328, "alphanum_fraction": 0.639499306678772, "avg_line_length": 42.57575607299805, "blob_id": "ea428eaaa65a1e00f0b96c254f1bf6211b354728", "content_id": "8fe6e982d45edb5bb3247cd907422c02abe3b782", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7190, "license_type": "permissive", "max_line_length": 78, "num_lines": 165, "path": "/tests/unit/task/scenarios/nova/test_server_groups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.nova import server_groups\nfrom tests.unit import test\n\nSERVER_GROUPS_MODULE = \"rally_openstack.task.scenarios.nova.server_groups\"\nNOVA_SERVER_GROUPS = SERVER_GROUPS_MODULE + \".NovaServerGroups\"\n\n\[email protected]\nclass NovaServerGroupsTestCase(test.ScenarioTestCase):\n\n def test_create_and_list_server_groups(self):\n scenario = server_groups.CreateAndListServerGroups(self.context)\n fake_server_group = mock.MagicMock()\n all_projects = False\n scenario._create_server_group = mock.MagicMock()\n scenario._list_server_groups = mock.MagicMock()\n scenario._list_server_groups.return_value = [mock.MagicMock(),\n fake_server_group,\n mock.MagicMock()]\n # Positive case and kwargs is None\n scenario._create_server_group.return_value = fake_server_group\n scenario.run(policies=\"fake_policy\", all_projects=False, kwargs=None)\n kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario._create_server_group.assert_called_once_with(**kwargs)\n scenario._list_server_groups.assert_called_once_with(all_projects)\n\n # Positive case and kwargs is not None\n foo_kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario._create_server_group.return_value = fake_server_group\n scenario.run(policies=None, all_projects=False,\n kwargs=foo_kwargs)\n scenario._create_server_group.assert_called_with(**foo_kwargs)\n scenario._list_server_groups.assert_called_with(all_projects)\n\n # Negative case1: server group isn't created\n scenario._create_server_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n **kwargs)\n scenario._create_server_group.assert_called_with(**kwargs)\n\n # Negative case2: server group not in the list of available server\n # groups\n scenario._create_server_group.return_value = mock.MagicMock()\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n **kwargs)\n scenario._create_server_group.assert_called_with(**kwargs)\n scenario._list_server_groups.assert_called_with(all_projects)\n\n def test_create_and_get_server_group_positive(self):\n scenario = server_groups.CreateAndGetServerGroup(self.context)\n fake_server_group = mock.MagicMock()\n fake_server_group_info = mock.MagicMock()\n fake_server_group.id = 123\n fake_server_group_info.id = 123\n scenario._create_server_group = mock.MagicMock()\n scenario._get_server_group = mock.MagicMock()\n # Positive case and kwargs is None\n kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario._create_server_group.return_value = fake_server_group\n scenario._get_server_group.return_value = fake_server_group_info\n scenario.run(policies=\"fake_policy\", kwargs=None)\n scenario._create_server_group.assert_called_once_with(**kwargs)\n scenario._get_server_group.assert_called_once_with(\n fake_server_group.id)\n\n # Positive case and kwargs is not None\n scenario._create_server_group.return_value = fake_server_group\n scenario._get_server_group.return_value = fake_server_group_info\n foo_kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario.run(policies=None, kwargs=foo_kwargs)\n scenario._create_server_group.assert_called_with(**foo_kwargs)\n scenario._get_server_group.assert_called_with(\n fake_server_group.id)\n\n def test_create_and_get_server_group_negative(self):\n scenario = server_groups.CreateAndGetServerGroup(self.context)\n fake_server_group = mock.MagicMock()\n fake_server_group_info = mock.MagicMock()\n fake_server_group.id = 123\n fake_server_group_info.id = 123\n kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario._create_server_group = mock.MagicMock()\n scenario._get_server_group = mock.MagicMock()\n\n # Negative case1: server group isn't created\n scenario._create_server_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n **kwargs)\n scenario._create_server_group.assert_called_with(**kwargs)\n\n # Negative case2: server group to get information not the created one\n fake_server_group_info.id = 456\n scenario._create_server_group.return_value = fake_server_group\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n **kwargs)\n scenario._create_server_group.assert_called_with(**kwargs)\n scenario._get_server_group.assert_called_with(\n fake_server_group.id)\n\n def test_create_and_delete_server_group(self):\n scenario = server_groups.CreateAndDeleteServerGroup(self.context)\n fake_server_group = mock.MagicMock()\n scenario._create_server_group = mock.MagicMock()\n scenario._delete_server_group = mock.MagicMock()\n\n # Positive case and kwargs is None\n kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario._create_server_group.return_value = fake_server_group\n scenario.run(policies=\"fake_policy\", kwargs=None)\n scenario._create_server_group.assert_called_once_with(**kwargs)\n scenario._delete_server_group.assert_called_once_with(\n fake_server_group.id)\n\n # Positive case and kwargs is not None\n scenario._create_server_group.return_value = fake_server_group\n foo_kwargs = {\n \"policies\": \"fake_policy\"\n }\n scenario.run(policies=None, kwargs=foo_kwargs)\n scenario._create_server_group.assert_called_with(**foo_kwargs)\n scenario._delete_server_group.assert_called_with(\n fake_server_group.id)\n\n # Negative case: server group isn't created\n scenario._create_server_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n **kwargs)\n scenario._create_server_group.assert_called_with(**kwargs)\n" }, { "alpha_fraction": 0.6315302848815918, "alphanum_fraction": 0.642780065536499, "avg_line_length": 43.44520568847656, "blob_id": "6e50d083a407722ba38523e96d40baf844355972", "content_id": "8cefc1c122e1ac014c72ed963a9c31e6d93e8a8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6489, "license_type": "permissive", "max_line_length": 79, "num_lines": 146, "path": "/tests/unit/task/scenarios/swift/test_objects.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.swift import objects\nfrom tests.unit import test\n\n\[email protected]\nclass SwiftObjectsTestCase(test.ScenarioTestCase):\n\n def test_create_container_and_object_then_list_objects(self):\n scenario = objects.CreateContainerAndObjectThenListObjects(\n self.context)\n scenario._create_container = mock.MagicMock(return_value=\"AA\")\n scenario._upload_object = mock.MagicMock()\n scenario._list_objects = mock.MagicMock()\n\n scenario.run(objects_per_container=5, object_size=100)\n\n self.assertEqual(1, scenario._create_container.call_count)\n self.assertEqual(5, scenario._upload_object.call_count)\n scenario._list_objects.assert_called_once_with(\"AA\")\n\n def test_create_container_and_object_then_delete_all(self):\n scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context)\n scenario._create_container = mock.MagicMock(return_value=\"BB\")\n scenario._upload_object = mock.MagicMock(\n side_effect=[(\"etaaag\", \"ooobj_%i\" % i) for i in range(3)])\n scenario._delete_object = mock.MagicMock()\n scenario._delete_container = mock.MagicMock()\n\n scenario.run(objects_per_container=3, object_size=10)\n\n self.assertEqual(1, scenario._create_container.call_count)\n self.assertEqual(3, scenario._upload_object.call_count)\n scenario._delete_object.assert_has_calls(\n [mock.call(\"BB\", \"ooobj_%i\" % i) for i in range(3)])\n scenario._delete_container.assert_called_once_with(\"BB\")\n\n def test_create_container_and_object_then_download_object(self):\n scenario = objects.CreateContainerAndObjectThenDownloadObject(\n self.context\n )\n scenario._create_container = mock.MagicMock(return_value=\"CC\")\n scenario._upload_object = mock.MagicMock(\n side_effect=[(\"etaaaag\", \"obbbj_%i\" % i) for i in range(2)])\n scenario._download_object = mock.MagicMock()\n\n scenario.run(objects_per_container=2, object_size=50)\n\n self.assertEqual(1, scenario._create_container.call_count)\n self.assertEqual(2, scenario._upload_object.call_count)\n scenario._download_object.assert_has_calls(\n [mock.call(\"CC\", \"obbbj_%i\" % i) for i in range(2)])\n\n @ddt.data(1, 5)\n def test_list_objects_in_containers(self, num_cons):\n con_list = [{\"name\": \"cooon_%s\" % i} for i in range(num_cons)]\n scenario = objects.ListObjectsInContainers(self.context)\n scenario._list_containers = mock.MagicMock(return_value=(\"header\",\n con_list))\n scenario._list_objects = mock.MagicMock()\n\n scenario.run()\n scenario._list_containers.assert_called_once_with()\n con_calls = [mock.call(container[\"name\"])\n for container in con_list]\n scenario._list_objects.assert_has_calls(con_calls)\n\n @ddt.data([1, 1], [1, 2], [2, 1], [3, 5])\n @ddt.unpack\n def test_list_and_download_objects_in_containers(self, num_cons, num_objs):\n con_list = [{\"name\": \"connn_%s\" % i} for i in range(num_cons)]\n obj_list = [{\"name\": \"ooobj_%s\" % i} for i in range(num_objs)]\n scenario = objects.ListAndDownloadObjectsInContainers(self.context)\n scenario._list_containers = mock.MagicMock(return_value=(\"header\",\n con_list))\n scenario._list_objects = mock.MagicMock(return_value=(\"header\",\n obj_list))\n scenario._download_object = mock.MagicMock()\n\n scenario.run()\n scenario._list_containers.assert_called_once_with()\n con_calls = [mock.call(container[\"name\"])\n for container in con_list]\n scenario._list_objects.assert_has_calls(con_calls)\n obj_calls = []\n for container in con_list:\n for obj in obj_list:\n obj_calls.append(mock.call(container[\"name\"], obj[\"name\"]))\n scenario._download_object.assert_has_calls(obj_calls, any_order=True)\n\n def test_functional_create_container_and_object_then_list_objects(self):\n names_list = [\"AA\", \"BB\", \"CC\", \"DD\"]\n\n scenario = objects.CreateContainerAndObjectThenListObjects(\n self.context)\n scenario.generate_random_name = mock.MagicMock(side_effect=names_list)\n scenario._list_objects = mock.MagicMock()\n\n scenario.run(objects_per_container=3, object_size=100)\n\n scenario._list_objects.assert_called_once_with(\"AA\")\n\n def test_functional_create_container_and_object_then_delete_all(self):\n names_list = [\"111\", \"222\", \"333\", \"444\", \"555\"]\n\n scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context)\n scenario.generate_random_name = mock.MagicMock(side_effect=names_list)\n scenario._delete_object = mock.MagicMock()\n scenario._delete_container = mock.MagicMock()\n\n scenario.run(objects_per_container=4, object_size=240)\n\n scenario._delete_object.assert_has_calls(\n [mock.call(\"111\", name) for name in names_list[1:]])\n scenario._delete_container.assert_called_once_with(\"111\")\n\n def test_functional_create_container_and_object_then_download_object(self):\n names_list = [\"aaa\", \"bbb\", \"ccc\", \"ddd\", \"eee\", \"fff\"]\n\n scenario = objects.CreateContainerAndObjectThenDownloadObject(\n self.context)\n scenario.generate_random_name = mock.MagicMock(side_effect=names_list)\n scenario._download_object = mock.MagicMock()\n\n scenario.run(objects_per_container=5, object_size=750)\n\n scenario._download_object.assert_has_calls(\n [mock.call(\"aaa\", name) for name in names_list[1:]])\n" }, { "alpha_fraction": 0.6127588152885437, "alphanum_fraction": 0.6172356009483337, "avg_line_length": 36.22916793823242, "blob_id": "d292b7ed02cb10e20169bac189e5ee8bc2dea769", "content_id": "44bdad052240e7a902c619f354123c3f973e1bd5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1787, "license_type": "permissive", "max_line_length": 78, "num_lines": 48, "path": "/tests/unit/task/scenarios/gnocchi/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.gnocchi import utils\nfrom tests.unit import test\n\n\nclass GnocchiBaseTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(GnocchiBaseTestCase, self).setUp()\n self.context = super(GnocchiBaseTestCase, self).get_test_context()\n self.context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant_id\",\n \"name\": \"fake_tenant_name\"}\n })\n patch = mock.patch(\n \"rally_openstack.common.services.gnocchi.metric.GnocchiService\")\n self.addCleanup(patch.stop)\n self.mock_service = patch.start()\n\n def test__gnocchi_base(self):\n base = utils.GnocchiBase(self.context)\n self.assertEqual(base.admin_gnocchi,\n self.mock_service.return_value)\n self.assertEqual(base.gnocchi,\n self.mock_service.return_value)\n" }, { "alpha_fraction": 0.5959749221801758, "alphanum_fraction": 0.5993521213531494, "avg_line_length": 41.54838562011719, "blob_id": "ecf6656de38202416a214b7d19b1780c77b42919", "content_id": "095a7494b97ab7946f8ec8bd3468bca47936ee81", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14509, "license_type": "permissive", "max_line_length": 79, "num_lines": 341, "path": "/rally_openstack/common/services/identity/keystone_v3.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.identity import keystone_common\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"keystone\", service_type=\"identity\", version=\"3\")\nclass KeystoneV3Service(service.Service, keystone_common.KeystoneMixin):\n\n def _get_domain_id(self, domain_name_or_id):\n from keystoneclient import exceptions as kc_exceptions\n\n try:\n # First try to find domain by ID\n return self._clients.keystone(\"3\").domains.get(\n domain_name_or_id).id\n except kc_exceptions.NotFound:\n # Domain not found by ID, try to find it by name\n domains = self._clients.keystone(\"3\").domains.list(\n name=domain_name_or_id)\n if domains:\n return domains[0].id\n # Domain not found by name\n raise exceptions.GetResourceNotFound(\n resource=\"KeystoneDomain(%s)\" % domain_name_or_id)\n\n @atomic.action_timer(\"keystone_v3.create_project\")\n def create_project(self, project_name=None, domain_name=\"Default\"):\n project_name = project_name or self.generate_random_name()\n domain_id = self._get_domain_id(domain_name)\n return self._clients.keystone(\"3\").projects.create(name=project_name,\n domain=domain_id)\n\n @atomic.action_timer(\"keystone_v3.update_project\")\n def update_project(self, project_id, name=None, enabled=None,\n description=None):\n \"\"\"Update tenant name and description.\n\n :param project_id: Id of project to update\n :param name: project name to be set (if boolean True, random name will\n be set)\n :param enabled: enabled status of project\n :param description: project description to be set (if boolean True,\n random description will be set)\n \"\"\"\n if name is True:\n name = self.generate_random_name()\n if description is True:\n description = self.generate_random_name()\n self._clients.keystone(\"3\").projects.update(\n project_id, name=name, description=description, enabled=enabled)\n\n @atomic.action_timer(\"keystone_v3.delete_project\")\n def delete_project(self, project_id):\n self._clients.keystone(\"3\").projects.delete(project_id)\n\n @atomic.action_timer(\"keystone_v3.list_projects\")\n def list_projects(self):\n return self._clients.keystone(\"3\").projects.list()\n\n @atomic.action_timer(\"keystone_v3.get_project\")\n def get_project(self, project_id):\n \"\"\"Get project.\"\"\"\n return self._clients.keystone(\"3\").projects.get(project_id)\n\n @atomic.action_timer(\"keystone_v3.create_user\")\n def create_user(self, username=None, password=None, project_id=None,\n domain_name=\"Default\", enabled=True,\n default_role=\"member\"):\n \"\"\"Create user.\n\n\n :param username: name of user\n :param password: user password\n :param project_id: user's default project\n :param domain_name: Name or id of domain where to create project.\n :param enabled: whether the user is enabled.\n :param default_role: user's default role\n \"\"\"\n domain_id = self._get_domain_id(domain_name)\n username = username or self.generate_random_name()\n user = self._clients.keystone(\"3\").users.create(\n name=username, password=password, default_project=project_id,\n domain=domain_id, enabled=enabled)\n\n if project_id:\n # we can't setup role without project_id\n roles = self.list_roles()\n for role in roles:\n if default_role == role.name.lower():\n self.add_role(role_id=role.id,\n user_id=user.id,\n project_id=project_id)\n return user\n for role in roles:\n if default_role == role.name.lower().strip(\"_\"):\n self.add_role(role_id=role.id,\n user_id=user.id,\n project_id=project_id)\n return user\n\n LOG.warning(\"Unable to set %s role to created user.\" %\n default_role)\n return user\n\n @atomic.action_timer(\"keystone_v3.create_users\")\n def create_users(self, project_id, number_of_users, user_create_args=None):\n \"\"\"Create specified amount of users.\n\n :param project_id: Id of project\n :param number_of_users: number of users to create\n :param user_create_args: additional user creation arguments\n \"\"\"\n users = []\n for _i in range(number_of_users):\n users.append(self.create_user(project_id=project_id,\n **(user_create_args or {})))\n return users\n\n @atomic.action_timer(\"keystone_v3.update_user\")\n def update_user(self, user_id, name=None, domain_name=None,\n project_id=None, password=None, email=None,\n description=None, enabled=None, default_project=None):\n domain = None\n if domain_name:\n domain = self._get_domain_id(domain_name)\n\n self._clients.keystone(\"3\").users.update(\n user_id, name=name, domain=domain, project=project_id,\n password=password, email=email, description=description,\n enabled=enabled, default_project=default_project)\n\n @atomic.action_timer(\"keystone_v3.create_service\")\n def create_service(self, name=None, service_type=None, description=None,\n enabled=True):\n \"\"\"Creates keystone service.\n\n :param name: name of service to create\n :param service_type: type of the service\n :param description: description of the service\n :param enabled: whether the service appears in the catalog\n :returns: keystone service instance\n \"\"\"\n name = name or self.generate_random_name()\n service_type = service_type or \"rally_test_type\"\n description = description or self.generate_random_name()\n return self._clients.keystone(\"3\").services.create(\n name, type=service_type, description=description, enabled=enabled)\n\n @atomic.action_timer(\"keystone_v3.create_role\")\n def create_role(self, name=None, domain_name=None):\n domain_id = None\n if domain_name:\n domain_id = self._get_domain_id(domain_name)\n name = name or self.generate_random_name()\n return self._clients.keystone(\"3\").roles.create(name, domain=domain_id)\n\n @atomic.action_timer(\"keystone_v3.add_role\")\n def add_role(self, role_id, user_id, project_id):\n self._clients.keystone(\"3\").roles.grant(role=role_id,\n user=user_id,\n project=project_id)\n\n @atomic.action_timer(\"keystone_v3.list_roles\")\n def list_roles(self, user_id=None, project_id=None, domain_name=None):\n \"\"\"List all roles.\"\"\"\n domain_id = None\n if domain_name:\n domain_id = self._get_domain_id(domain_name)\n return self._clients.keystone(\"3\").roles.list(user=user_id,\n project=project_id,\n domain=domain_id)\n\n @atomic.action_timer(\"keystone_v3.revoke_role\")\n def revoke_role(self, role_id, user_id, project_id):\n self._clients.keystone(\"3\").roles.revoke(role=role_id,\n user=user_id,\n project=project_id)\n\n @atomic.action_timer(\"keystone_v3.create_domain\")\n def create_domain(self, name, description=None, enabled=True):\n return self._clients.keystone(\"3\").domains.create(\n name, description=description, enabled=enabled)\n\n @atomic.action_timer(\"keystone_v3.create_ec2creds\")\n def create_ec2credentials(self, user_id, project_id):\n \"\"\"Create ec2credentials.\n\n :param user_id: User ID for which to create credentials\n :param project_id: Tenant ID for which to create credentials\n\n :returns: Created ec2-credentials object\n \"\"\"\n return self._clients.keystone(\"3\").ec2.create(user_id,\n project_id=project_id)\n\n\[email protected]_layer(KeystoneV3Service)\nclass UnifiedKeystoneV3Service(keystone_common.UnifiedKeystoneMixin,\n identity.Identity):\n\n @staticmethod\n def _unify_project(project):\n return identity.Project(id=project.id, name=project.name,\n domain_id=project.domain_id)\n\n @staticmethod\n def _unify_user(user):\n # When user has default_project_id that is None user.default_project_id\n # will raise AttributeError\n project_id = getattr(user, \"project_id\",\n getattr(user, \"default_project_id\", None))\n return identity.User(id=user.id, name=user.name, project_id=project_id,\n domain_id=user.domain_id)\n\n def create_project(self, project_name=None, domain_name=\"Default\"):\n \"\"\"Creates new project/tenant and return project object.\n\n :param project_name: Name of project to be created.\n :param domain_name: Name or id of domain where to create project,\n \"\"\"\n project = self._impl.create_project(project_name,\n domain_name=domain_name)\n return self._unify_project(project)\n\n def update_project(self, project_id, name=None, enabled=None,\n description=None):\n \"\"\"Update project name, enabled and description\n\n :param project_id: Id of project to update\n :param name: project name to be set\n :param enabled: enabled status of project\n :param description: project description to be set\n \"\"\"\n self._impl.update_project(project_id=project_id, name=name,\n enabled=enabled, description=description)\n\n def delete_project(self, project_id):\n \"\"\"Deletes project.\"\"\"\n return self._impl.delete_project(project_id)\n\n def list_projects(self):\n \"\"\"List all projects.\"\"\"\n return [self._unify_project(p) for p in self._impl.list_projects()]\n\n def get_project(self, project_id):\n \"\"\"Get project.\"\"\"\n return self._unify_project(self._impl.get_project(project_id))\n\n def create_user(self, username=None, password=None, project_id=None,\n domain_name=\"Default\", enabled=True,\n default_role=\"member\"):\n \"\"\"Create user.\n\n :param username: name of user\n :param password: user password\n :param project_id: user's default project\n :param domain_name: Name or id of domain where to create project,\n :param enabled: whether the user is enabled.\n :param default_role: Name of default user's role\n \"\"\"\n return self._unify_user(self._impl.create_user(\n username=username, password=password, project_id=project_id,\n domain_name=domain_name, default_role=default_role,\n enabled=enabled))\n\n def create_users(self, project_id, number_of_users, user_create_args=None):\n \"\"\"Create specified amount of users.\n\n :param project_id: Id of project\n :param number_of_users: number of users to create\n :param user_create_args: additional user creation arguments\n \"\"\"\n return [self._unify_user(u)\n for u in self._impl.create_users(\n project_id=project_id, number_of_users=number_of_users,\n user_create_args=user_create_args)]\n\n def list_users(self):\n \"\"\"List all users.\"\"\"\n return [self._unify_user(u) for u in self._impl.list_users()]\n\n def update_user(self, user_id, enabled=None, name=None, email=None,\n password=None):\n return self._impl.update_user(user_id, enabled=enabled, name=name,\n email=email, password=password)\n\n def list_services(self):\n \"\"\"List all services.\"\"\"\n return [self._unify_service(s) for s in self._impl.list_services()]\n\n def create_role(self, name=None, domain_name=None):\n \"\"\"Add role to user.\"\"\"\n return self._unify_role(self._impl.create_role(\n name, domain_name=domain_name))\n\n def add_role(self, role_id, user_id, project_id):\n \"\"\"Add role to user.\"\"\"\n self._impl.add_role(role_id=role_id, user_id=user_id,\n project_id=project_id)\n\n def revoke_role(self, role_id, user_id, project_id):\n \"\"\"Revokes a role from a user.\"\"\"\n return self._impl.revoke_role(role_id=role_id, user_id=user_id,\n project_id=project_id)\n\n def list_roles(self, user_id=None, project_id=None, domain_name=None):\n \"\"\"List all roles.\"\"\"\n return [self._unify_role(role) for role in self._impl.list_roles(\n user_id=user_id, project_id=project_id, domain_name=domain_name)]\n\n def create_ec2credentials(self, user_id, project_id):\n \"\"\"Create ec2credentials.\n\n :param user_id: User ID for which to create credentials\n :param project_id: Project ID for which to create credentials\n\n :returns: Created ec2-credentials object\n \"\"\"\n return self._impl.create_ec2credentials(user_id=user_id,\n project_id=project_id)\n" }, { "alpha_fraction": 0.6471949219703674, "alphanum_fraction": 0.6524002552032471, "avg_line_length": 39.20930099487305, "blob_id": "206a6c677f3e71fc0085edebedf11df223130965", "content_id": "896ec83307243575472463583a080f86bd150e21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1729, "license_type": "permissive", "max_line_length": 78, "num_lines": 43, "path": "/tests/unit/test__compat.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport warnings\n\nfrom tests.unit import test\n\n\nclass CompatibilityTestCase(test.TestCase):\n def test_old_imports_work(self):\n\n with warnings.catch_warnings(record=True) as ctx:\n warnings.simplefilter(\"always\")\n\n from rally_openstack import osclients\n\n if not ctx:\n self.fail(\"`rally_openstack._compat` should raise a warning.\")\n self.assertEqual(1, len(ctx))\n catched_warning = ctx[0]\n self.assertEqual(\n \"Module rally_openstack.osclients is deprecated since \"\n \"rally-openstack 2.0.0. Use rally_openstack.common.osclients \"\n \"instead.\",\n # catched_warning.message is an instance of an exception\n str(catched_warning.message))\n\n from rally_openstack.common import osclients as right_osclients\n\n expected = set(o for o in dir(right_osclients)\n if not o.startswith(\"_\"))\n actual = set(o for o in dir(osclients) if not o.startswith(\"_\"))\n self.assertEqual(expected, actual)\n self.assertEqual(right_osclients.Clients, osclients.Clients)\n" }, { "alpha_fraction": 0.6451451182365417, "alphanum_fraction": 0.6486486196517944, "avg_line_length": 36, "blob_id": "a21da6ec70bb5a6c21cbd20143e07b59b28b32bb", "content_id": "c0094de8270de7187926703c4601f751774a9965", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1998, "license_type": "permissive", "max_line_length": 79, "num_lines": 54, "path": "/tests/unit/test_workarounds.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nrally-openstack package should not be aligned to one constant version of Rally\nframework. It means that some workarounds for compatibility stuff are\nprovided.\nThis module should contain historical notes and checks to do not forget remove\nthese workaround.\n\"\"\"\n\nimport pkg_resources\n\nfrom tests.unit import test\n\n\nclass WorkaroundTestCase(test.TestCase):\n\n WORKAROUNDS = []\n\n def get_min_required_version(self):\n package = pkg_resources.get_distribution(\"rally-openstack\")\n requirement = [p for p in package.requires() if p.name == \"rally\"][0]\n\n for statement, version in requirement.specs:\n version = [int(i) for i in version.split(\".\")]\n if statement == \">=\":\n return version\n elif statement == \">\":\n version[-1] += 1\n return version\n self.skipTest(\"Failed to get a minimum required version of Rally \"\n \"framework.\")\n\n def test_rally_version(self):\n rally_version = self.get_min_required_version()\n\n for version, workarounds in self.WORKAROUNDS:\n if rally_version >= version:\n self.fail(\n \"After bumping minimum required version of Rally, some \"\n \"workarounds become redundant. See the following list and \"\n \"update the code: \\n\\t%s\" % \"\\n\\t\".join(workarounds))\n" }, { "alpha_fraction": 0.6146202683448792, "alphanum_fraction": 0.6168913841247559, "avg_line_length": 33.19902801513672, "blob_id": "efaa32c8ac715da0b6e58ddae7656c63c9694903", "content_id": "b575a604455ddb5e27572775d35253800c266949", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7045, "license_type": "permissive", "max_line_length": 78, "num_lines": 206, "path": "/tests/unit/task/scenarios/mistral/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally_openstack.task.scenarios.mistral import utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nMISTRAL_UTILS = \"rally_openstack.task.scenarios.mistral.utils\"\nPARAMS_EXAMPLE = {\"env\": {\"env_param\": \"param_value\"}}\nINPUT_EXAMPLE = \"\"\"{\"input1\": \"value1\", \"some_json_input\": {\"a\": \"b\"}}\"\"\"\n\n\nclass MistralScenarioTestCase(test.ScenarioTestCase):\n\n def test_list_workbooks(self):\n scenario = utils.MistralScenario(context=self.context)\n return_wbs_list = scenario._list_workbooks()\n self.assertEqual(\n self.clients(\"mistral\").workbooks.list.return_value,\n return_wbs_list)\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.list_workbooks\"\n )\n\n def test_create_workbook(self):\n definition = \"version: \\\"2.0\\\"\\nname: wb\"\n scenario = utils.MistralScenario(context=self.context)\n self.assertEqual(\n self.clients(\"mistral\").workbooks.create.return_value,\n scenario._create_workbook(definition)\n )\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.create_workbook\"\n )\n\n def test_delete_workbook(self):\n scenario = utils.MistralScenario(context=self.context)\n\n scenario._delete_workbook(\"wb_name\")\n self.clients(\"mistral\").workbooks.delete.assert_called_once_with(\n \"wb_name\",\n namespace=\"\"\n )\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.delete_workbook\"\n )\n\n def test_list_executions(self):\n scenario = utils.MistralScenario(context=self.context)\n return_executions_list = scenario._list_executions()\n self.assertEqual(\n return_executions_list,\n self.clients(\"mistral\").executions.list.return_value\n )\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.list_executions\"\n )\n\n def test_create_execution(self):\n scenario = utils.MistralScenario(context=self.context)\n\n namespace = \"namespace\"\n wf_name = \"fake_wf_name\"\n\n mock_wait_for_status = self.mock_wait_for_status.mock\n mock_create_exec = self.clients(\"mistral\").executions.create\n\n self.assertEqual(\n mock_wait_for_status.return_value,\n scenario._create_execution(\"%s\" % wf_name, namespace=namespace)\n )\n\n mock_create_exec.assert_called_once_with(\n wf_name,\n workflow_input=None,\n namespace=namespace\n )\n\n args, kwargs = mock_wait_for_status.call_args\n self.assertEqual(mock_create_exec.return_value, args[0])\n self.assertEqual([\"ERROR\"], kwargs[\"failure_statuses\"])\n self.assertEqual([\"SUCCESS\"], kwargs[\"ready_statuses\"])\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.create_execution\"\n )\n\n def test_create_execution_with_input(self):\n scenario = utils.MistralScenario(context=self.context)\n\n mock_wait_for_status = self.mock_wait_for_status.mock\n wf_name = \"fake_wf_name\"\n mock_create_exec = self.clients(\"mistral\").executions.create\n\n self.assertEqual(\n mock_wait_for_status.return_value,\n scenario._create_execution(\n wf_name, wf_input=str(INPUT_EXAMPLE))\n )\n\n mock_create_exec.assert_called_once_with(\n wf_name,\n workflow_input=INPUT_EXAMPLE,\n namespace=\"\"\n )\n\n def test_create_execution_with_params(self):\n scenario = utils.MistralScenario(context=self.context)\n\n mock_wait_for_status = self.mock_wait_for_status.mock\n wf_name = \"fake_wf_name\"\n mock_create_exec = self.clients(\"mistral\").executions.create\n\n self.assertEqual(\n mock_wait_for_status.return_value,\n scenario._create_execution(\n wf_name, **PARAMS_EXAMPLE)\n )\n mock_create_exec.assert_called_once_with(\n wf_name,\n workflow_input=None,\n namespace=\"\",\n **PARAMS_EXAMPLE\n )\n\n args, kwargs = mock_wait_for_status.call_args\n self.assertEqual(mock_create_exec.return_value, args[0])\n self.assertEqual([\"ERROR\"], kwargs[\"failure_statuses\"])\n self.assertEqual([\"SUCCESS\"], kwargs[\"ready_statuses\"])\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.create_execution\"\n )\n\n args, kwargs = mock_wait_for_status.call_args\n self.assertEqual(mock_create_exec.return_value, args[0])\n self.assertEqual([\"ERROR\"], kwargs[\"failure_statuses\"])\n self.assertEqual([\"SUCCESS\"], kwargs[\"ready_statuses\"])\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.create_execution\"\n )\n\n def test_delete_execution(self):\n scenario = utils.MistralScenario(context=self.context)\n execution = fakes.FakeMistralClient().execution.create()\n scenario._delete_execution(execution)\n self.clients(\"mistral\").executions.delete.assert_called_once_with(\n execution.id\n )\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.delete_execution\"\n )\n\n def test_create_workflow(self):\n scenario = utils.MistralScenario(context=self.context)\n definition = \"\"\"\nwf:\n type: direct\n tasks:\n task1:\n action: std.noop\n\"\"\"\n self.assertEqual(\n self.clients(\"mistral\").workflows.create.return_value,\n scenario._create_workflow(definition)\n )\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.create_workflow\"\n )\n\n def test_delete_workflow(self):\n wf_identifier = \"wf_identifier\"\n namespace = \"delete_wf_test\"\n\n scenario = utils.MistralScenario(context=self.context)\n scenario._delete_workflow(wf_identifier, namespace=namespace)\n\n self.clients(\"mistral\").workflows.delete.assert_called_once_with(\n wf_identifier,\n namespace=namespace\n )\n\n self._test_atomic_action_timer(\n scenario.atomic_actions(),\n \"mistral.delete_workflow\"\n )\n" }, { "alpha_fraction": 0.6091623306274414, "alphanum_fraction": 0.6120128035545349, "avg_line_length": 37.672664642333984, "blob_id": "13eb260d598f5744114d0823ba3fda249fe80dc9", "content_id": "7d69dde1aff3a361825f6bcaecfa3ac73f5505b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34380, "license_type": "permissive", "max_line_length": 79, "num_lines": 889, "path": "/rally_openstack/task/scenarios/neutron/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass NeutronBaseScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Neutron scenarios with basic atomic actions.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NeutronBaseScenario, self).__init__(*args, **kwargs)\n if hasattr(self, \"_clients\"):\n self.neutron = neutron.NeutronService(\n clients=self._clients,\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n if hasattr(self, \"_admin_clients\"):\n self.admin_neutron = neutron.NeutronService(\n clients=self._admin_clients,\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n\n def _get_or_create_network(self, **network_create_args):\n \"\"\"Get a network from context, or create a new one.\n\n This lets users either create networks with the 'network'\n context, provide existing networks with the 'existing_network'\n context, or let the scenario create a default network for\n them.\n \"\"\"\n\n if \"networks\" in self.context[\"tenant\"]:\n networks = self.context[\"tenant\"][\"networks\"]\n net_idx = self.context[\"iteration\"] % len(networks)\n return networks[net_idx]\n else:\n LOG.warning(\"Running this scenario without either the \"\n \"'network@openstack' or 'existing_network@openstack' \"\n \"context is deprecated since Rally-OpenStack 2.0.0.\")\n return self.neutron.create_network(**network_create_args)\n\n\nclass NeutronScenario(NeutronBaseScenario):\n \"\"\"Base class for Neutron scenarios with basic atomic actions.\"\"\"\n\n # TODO(rkiran): modify in case LBaaS-v2 requires\n LB_METHOD = \"ROUND_ROBIN\"\n LB_PROTOCOL = \"HTTP\"\n LB_PROTOCOL_PORT = 80\n HM_TYPE = \"PING\"\n HM_MAX_RETRIES = 3\n HM_DELAY = 20\n HM_TIMEOUT = 10\n\n def _get_network_id(self, network, **kwargs):\n \"\"\"Get Neutron network ID for the network name.\n\n :param network: str, network name/id\n :param kwargs: dict, network options\n :returns: str, Neutron network-id\n \"\"\"\n try:\n return self.neutron.find_network(network)[\"id\"]\n except exceptions.GetResourceFailure:\n raise exceptions.NotFoundException(\n message=\"Network %s not found.\" % network)\n\n @property\n def _ext_gw_mode_enabled(self):\n \"\"\"Determine if the ext-gw-mode extension is enabled.\n\n Without this extension, we can't pass the enable_snat parameter.\n \"\"\"\n return self.neutron.supports_extension(\"ext-gw-mode\", silent=True)\n\n def _create_network(self, network_create_args):\n \"\"\"Create neutron network.\n\n :param network_create_args: dict, POST /v2.0/networks request options\n :returns: neutron network dict\n \"\"\"\n network_create_args.pop(\"name\", None)\n return {\"network\": self.neutron.create_network(**network_create_args)}\n\n def _list_networks(self, **kwargs):\n \"\"\"Return user networks list.\n\n :param kwargs: network list options\n \"\"\"\n return self.neutron.list_networks(**kwargs)\n\n def _list_agents(self, **kwargs):\n \"\"\"Fetches agents.\n\n :param kwargs: neutron agent list options\n :returns: user agents list\n \"\"\"\n return self.neutron.list_agents(**kwargs)\n\n def _update_network(self, network, network_update_args):\n \"\"\"Update the network.\n\n This atomic function updates the network with network_update_args.\n\n :param network: Network object\n :param network_update_args: dict, POST /v2.0/networks update options\n :returns: updated neutron network dict\n \"\"\"\n network_update_args[\"name\"] = self.generate_random_name()\n return {\"network\": self.neutron.update_network(\n network[\"network\"][\"id\"], **network_update_args)}\n\n def _show_network(self, network, **kwargs):\n \"\"\"show network details.\n\n :param network: Network object\n :param kwargs: dict, POST /v2.0/networks show options\n :returns: details of the network\n \"\"\"\n network = self.neutron.get_network(network[\"network\"][\"id\"], **kwargs)\n return {\"network\": network}\n\n def _delete_network(self, network):\n \"\"\"Delete neutron network.\n\n :param network: Network object\n \"\"\"\n self.neutron.delete_network(network[\"id\"])\n\n def _create_subnet(self, network, subnet_create_args, start_cidr=None):\n \"\"\"Create neutron subnet.\n\n :param network: neutron network dict\n :param subnet_create_args: POST /v2.0/subnets request options\n :returns: neutron subnet dict\n \"\"\"\n\n subnet_create_args.pop(\"name\", None)\n subnet_create_args[\"network_id\"] = network[\"network\"][\"id\"]\n subnet_create_args[\"start_cidr\"] = start_cidr\n\n return {\"subnet\": self.neutron.create_subnet(**subnet_create_args)}\n\n def _list_subnets(self):\n \"\"\"Returns user subnetworks list.\"\"\"\n return self.neutron.list_subnets()\n\n def _show_subnet(self, subnet, **kwargs):\n \"\"\"show subnet details.\n\n :param subnet: Subnet object\n :param kwargs: Optional additional arguments for subnet show\n :returns: details of the subnet\n \"\"\"\n return {\"subnet\": self.neutron.get_subnet(subnet[\"subnet\"][\"id\"])}\n\n def _update_subnet(self, subnet, subnet_update_args):\n \"\"\"Update the neutron subnet.\n\n This atomic function updates the subnet with subnet_update_args.\n\n :param subnet: Subnet object\n :param subnet_update_args: dict, PUT /v2.0/subnets update options\n :returns: updated neutron subnet dict\n \"\"\"\n subnet_update_args[\"name\"] = self.generate_random_name()\n return {\"subnet\": self.neutron.update_subnet(\n subnet[\"subnet\"][\"id\"], **subnet_update_args)}\n\n def _delete_subnet(self, subnet):\n \"\"\"Delete neutron subnet\n\n :param subnet: Subnet object\n \"\"\"\n self.neutron.delete_subnet(subnet[\"subnet\"][\"id\"])\n\n def _create_router(self, router_create_args, external_gw=False):\n \"\"\"Create neutron router.\n\n :param router_create_args: POST /v2.0/routers request options\n :returns: neutron router dict\n \"\"\"\n router_create_args.pop(\"name\", None)\n if (\"tenant_id\" in router_create_args\n and \"project_id\" not in router_create_args):\n router_create_args[\"project_id\"] = router_create_args.pop(\n \"tenant_id\")\n\n return {\"router\": self.neutron.create_router(\n discover_external_gw=external_gw, **router_create_args)}\n\n def _list_routers(self):\n \"\"\"Returns user routers list.\"\"\"\n return self.neutron.list_routers()\n\n def _show_router(self, router, **kwargs):\n \"\"\"Show information of a given router.\n\n :param router: ID or name of router to look up\n :kwargs: dict, POST /v2.0/routers show options\n :return: details of the router\n \"\"\"\n return {\"router\": self.neutron.get_router(\n router[\"router\"][\"id\"], **kwargs)}\n\n def _delete_router(self, router):\n \"\"\"Delete neutron router\n\n :param router: Router object\n \"\"\"\n self.neutron.delete_router(router[\"router\"][\"id\"])\n\n def _update_router(self, router, router_update_args):\n \"\"\"Update the neutron router.\n\n This atomic function updates the router with router_update_args.\n\n :param router: dict, neutron router\n :param router_update_args: dict, PUT /v2.0/routers update options\n :returns: updated neutron router dict\n \"\"\"\n router_update_args[\"name\"] = self.generate_random_name()\n return {\"router\": self.neutron.update_router(\n router[\"router\"][\"id\"], **router_update_args)}\n\n def _create_port(self, network, port_create_args):\n \"\"\"Create neutron port.\n\n :param network: neutron network dict\n :param port_create_args: POST /v2.0/ports request options\n :returns: neutron port dict\n \"\"\"\n return {\"port\": self.neutron.create_port(\n network_id=network[\"network\"][\"id\"], **port_create_args)}\n\n def _list_ports(self):\n \"\"\"Return user ports list.\"\"\"\n return self.neutron.list_ports()\n\n def _show_port(self, port, **params):\n \"\"\"Return user port details.\n\n :param port: dict, neutron port\n :param params: neutron port show options\n :returns: neutron port dict\n \"\"\"\n return {\"port\": self.neutron.get_port(port[\"port\"][\"id\"], **params)}\n\n def _update_port(self, port, port_update_args):\n \"\"\"Update the neutron port.\n\n This atomic function updates port with port_update_args.\n\n :param port: dict, neutron port\n :param port_update_args: dict, PUT /v2.0/ports update options\n :returns: updated neutron port dict\n \"\"\"\n port_update_args[\"name\"] = self.generate_random_name()\n return {\"port\": self.neutron.update_port(port[\"port\"][\"id\"],\n **port_update_args)}\n\n def _delete_port(self, port):\n \"\"\"Delete neutron port.\n\n :param port: Port object\n \"\"\"\n self.neutron.delete_port(port[\"port\"][\"id\"])\n\n @logging.log_deprecated_args(\n \"network_create_args is deprecated; use the network context instead\",\n \"0.1.0\", \"network_create_args\")\n def _get_or_create_network(self, network_create_args=None):\n \"\"\"Get a network from context, or create a new one.\n\n This lets users either create networks with the 'network'\n context, provide existing networks with the 'existing_network'\n context, or let the scenario create a default network for\n them. Running this without one of the network contexts is\n deprecated.\n\n :param network_create_args: Deprecated way to provide network\n creation args; use the network\n context instead.\n :returns: Network dict\n \"\"\"\n if \"networks\" in self.context[\"tenant\"]:\n return {\"network\":\n random.choice(self.context[\"tenant\"][\"networks\"])}\n else:\n LOG.warning(\"Running this scenario without either the 'network' \"\n \"or 'existing_network' context is deprecated\")\n return self._create_network(network_create_args or {})\n\n def _create_subnets(self, network,\n subnet_create_args=None,\n subnet_cidr_start=None,\n subnets_per_network=1):\n \"\"\"Create <count> new subnets in the given network.\n\n :param network: network to create subnets in\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n :returns: List of subnet dicts\n \"\"\"\n return [self._create_subnet(network, subnet_create_args or {},\n subnet_cidr_start)\n for i in range(subnets_per_network)]\n\n def _create_network_and_subnets(self,\n network_create_args=None,\n subnet_create_args=None,\n subnets_per_network=1,\n subnet_cidr_start=\"1.0.0.0/24\"):\n \"\"\"Create network and subnets.\n\n :parm network_create_args: dict, POST /v2.0/networks request options\n :parm subnet_create_args: dict, POST /v2.0/subnets request options\n :parm subnets_per_network: int, number of subnets for one network\n :parm subnet_cidr_start: str, start value for subnets CIDR\n :returns: tuple of result network and subnets list\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n subnets = [{\"subnet\": s} for s in net_topo[\"subnets\"]]\n return {\"network\": net_topo[\"network\"]}, subnets\n\n def _create_network_structure(self, network_create_args=None,\n subnet_create_args=None,\n subnet_cidr_start=None,\n subnets_per_network=None,\n router_create_args=None):\n \"\"\"Create a network and a given number of subnets and routers.\n\n :param network_create_args: dict, POST /v2.0/networks request options\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n :param router_create_args: dict, POST /v2.0/routers request options\n :returns: tuple of (network, subnets, routers)\n \"\"\"\n\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n return ({\"network\": net_topo[\"network\"]},\n [{\"subnet\": s} for s in net_topo[\"subnets\"]],\n [{\"router\": r} for r in net_topo[\"routers\"]])\n\n def _add_interface_router(self, subnet, router):\n \"\"\"Connect subnet to router.\n\n :param subnet: dict, neutron subnet\n :param router: dict, neutron router\n \"\"\"\n self.neutron.add_interface_to_router(router_id=router[\"id\"],\n subnet_id=subnet[\"id\"])\n\n def _remove_interface_router(self, subnet, router):\n \"\"\"Remove subnet from router\n\n :param subnet: dict, neutron subnet\n :param router: dict, neutron router\n \"\"\"\n self.neutron.remove_interface_from_router(\n router_id=router[\"id\"], subnet_id=subnet[\"id\"])\n\n def _add_gateway_router(self, router, ext_net, enable_snat=None):\n \"\"\"Set the external network gateway for a router.\n\n :param router: dict, neutron router\n :param ext_net: external network for the gateway\n :param enable_snat: True if enable snat, None to avoid update\n \"\"\"\n self.neutron.add_gateway_to_router(\n router_id=router[\"router\"][\"id\"],\n network_id=ext_net[\"network\"][\"id\"],\n enable_snat=enable_snat\n )\n\n def _remove_gateway_router(self, router):\n \"\"\"Removes an external network gateway from the specified router.\n\n :param router: dict, neutron router\n \"\"\"\n self.neutron.remove_gateway_from_router(router[\"router\"][\"id\"])\n\n @atomic.action_timer(\"neutron.create_pool\")\n def _create_lb_pool(self, subnet_id, **pool_create_args):\n \"\"\"Create LB pool(v1)\n\n :param subnet_id: str, neutron subnet-id\n :param pool_create_args: dict, POST /lb/pools request options\n :returns: dict, neutron lb pool\n \"\"\"\n args = {\"lb_method\": self.LB_METHOD,\n \"protocol\": self.LB_PROTOCOL,\n \"name\": self.generate_random_name(),\n \"subnet_id\": subnet_id}\n args.update(pool_create_args)\n return self.clients(\"neutron\").create_pool({\"pool\": args})\n\n def _create_v1_pools(self, networks, **pool_create_args):\n \"\"\"Create LB pools(v1)\n\n :param networks: list, neutron networks\n :param pool_create_args: dict, POST /lb/pools request options\n :returns: list, neutron lb pools\n \"\"\"\n subnets = []\n pools = []\n for net in networks:\n subnets.extend(net.get(\"subnets\", []))\n for subnet_id in subnets:\n pools.append(self._create_lb_pool(\n subnet_id, **pool_create_args))\n return pools\n\n @atomic.action_timer(\"neutron.list_pools\")\n def _list_v1_pools(self, **kwargs):\n \"\"\"Return user lb pool list(v1).\"\"\"\n return self.clients(\"neutron\").list_pools(**kwargs)\n\n @atomic.action_timer(\"neutron.delete_pool\")\n def _delete_v1_pool(self, pool):\n \"\"\"Delete neutron pool.\n\n :param pool: Pool object\n \"\"\"\n self.clients(\"neutron\").delete_pool(pool[\"id\"])\n\n @atomic.action_timer(\"neutron.update_pool\")\n def _update_v1_pool(self, pool, **pool_update_args):\n \"\"\"Update pool.\n\n This atomic function updates the pool with pool_update_args.\n\n :param pool: Pool object\n :param pool_update_args: dict, POST /lb/pools update options\n :returns: updated neutron pool dict\n \"\"\"\n pool_update_args[\"name\"] = self.generate_random_name()\n body = {\"pool\": pool_update_args}\n return self.clients(\"neutron\").update_pool(pool[\"pool\"][\"id\"], body)\n\n def _create_v1_vip(self, pool, **vip_create_args):\n \"\"\"Create VIP(v1)\n\n :parm pool: dict, neutron lb-pool\n :parm vip_create_args: dict, POST /lb/vips request options\n :returns: dict, neutron lb vip\n \"\"\"\n args = {\"protocol\": self.LB_PROTOCOL,\n \"protocol_port\": self.LB_PROTOCOL_PORT,\n \"name\": self.generate_random_name(),\n \"pool_id\": pool[\"pool\"][\"id\"],\n \"subnet_id\": pool[\"pool\"][\"subnet_id\"]}\n args.update(vip_create_args)\n return self.clients(\"neutron\").create_vip({\"vip\": args})\n\n @atomic.action_timer(\"neutron.list_vips\")\n def _list_v1_vips(self, **kwargs):\n \"\"\"Return user lb vip list(v1).\"\"\"\n return self.clients(\"neutron\").list_vips(**kwargs)\n\n @atomic.action_timer(\"neutron.delete_vip\")\n def _delete_v1_vip(self, vip):\n \"\"\"Delete neutron vip.\n\n :param vip: neutron Virtual IP object\n \"\"\"\n self.clients(\"neutron\").delete_vip(vip[\"id\"])\n\n @atomic.action_timer(\"neutron.update_vip\")\n def _update_v1_vip(self, vip, **vip_update_args):\n \"\"\"Updates vip.\n\n This atomic function updates vip name and admin state\n\n :param vip: Vip object\n :param vip_update_args: dict, POST /lb/vips update options\n :returns: updated neutron vip dict\n \"\"\"\n vip_update_args[\"name\"] = self.generate_random_name()\n body = {\"vip\": vip_update_args}\n return self.clients(\"neutron\").update_vip(vip[\"vip\"][\"id\"], body)\n\n def _create_floatingip(self, floating_network, **floating_ip_args):\n \"\"\"Create floating IP with floating_network.\n\n :param floating_network: str, external network to create floating IP\n :param floating_ip_args: dict, POST /floatingips create options\n :returns: dict, neutron floating IP\n \"\"\"\n\n return {\"floatingip\": self.neutron.create_floatingip(\n floating_network=floating_network, **floating_ip_args)}\n\n def _list_floating_ips(self, **kwargs):\n \"\"\"Return floating IPs list.\"\"\"\n return {\"floatingips\": self.neutron.list_floatingips(**kwargs)}\n\n def _delete_floating_ip(self, floating_ip):\n \"\"\"Delete floating IP.\n\n :param dict, floating IP object\n \"\"\"\n return self.neutron.delete_floatingip(floating_ip[\"id\"])\n\n def _associate_floating_ip(self, floatingip, port):\n \"\"\"Associate floating IP with port.\n\n :param floatingip: floating IP dict\n :param port: port dict\n :returns: updated floating IP dict\n \"\"\"\n return self.neutron.associate_floatingip(\n port_id=port[\"id\"],\n floatingip_id=floatingip[\"id\"])\n\n def _dissociate_floating_ip(self, floatingip):\n \"\"\"Dissociate floating IP from ports.\n\n :param floatingip: floating IP dict\n :returns: updated floating IP dict\n \"\"\"\n return self.neutron.dissociate_floatingip(\n floatingip_id=floatingip[\"id\"])\n\n @atomic.action_timer(\"neutron.create_healthmonitor\")\n def _create_v1_healthmonitor(self, **healthmonitor_create_args):\n \"\"\"Create LB healthmonitor.\n\n This atomic function creates healthmonitor with the provided\n healthmonitor_create_args.\n\n :param healthmonitor_create_args: dict, POST /lb/healthmonitors\n :returns: neutron healthmonitor dict\n \"\"\"\n args = {\"type\": self.HM_TYPE,\n \"delay\": self.HM_DELAY,\n \"max_retries\": self.HM_MAX_RETRIES,\n \"timeout\": self.HM_TIMEOUT}\n args.update(healthmonitor_create_args)\n return self.clients(\"neutron\").create_health_monitor(\n {\"health_monitor\": args})\n\n @atomic.action_timer(\"neutron.list_healthmonitors\")\n def _list_v1_healthmonitors(self, **kwargs):\n \"\"\"List LB healthmonitors.\n\n This atomic function lists all helthmonitors.\n\n :param kwargs: optional parameters\n :returns: neutron lb healthmonitor list\n \"\"\"\n return self.clients(\"neutron\").list_health_monitors(**kwargs)\n\n @atomic.action_timer(\"neutron.delete_healthmonitor\")\n def _delete_v1_healthmonitor(self, healthmonitor):\n \"\"\"Delete neutron healthmonitor.\n\n :param healthmonitor: neutron healthmonitor dict\n \"\"\"\n self.clients(\"neutron\").delete_health_monitor(healthmonitor[\"id\"])\n\n @atomic.action_timer(\"neutron.update_healthmonitor\")\n def _update_v1_healthmonitor(self, healthmonitor,\n **healthmonitor_update_args):\n \"\"\"Update neutron healthmonitor.\n\n :param healthmonitor: neutron lb healthmonitor dict\n :param healthmonitor_update_args: POST /lb/healthmonitors\n update options\n :returns: updated neutron lb healthmonitor dict\n \"\"\"\n body = {\"health_monitor\": healthmonitor_update_args}\n return self.clients(\"neutron\").update_health_monitor(\n healthmonitor[\"health_monitor\"][\"id\"], body)\n\n def _create_security_group(self, **security_group_create_args):\n \"\"\"Create Neutron security-group.\n\n :param security_group_create_args: dict, POST /v2.0/security-groups\n request options\n :returns: dict, neutron security-group\n \"\"\"\n security_group_create_args[\"name\"] = self.generate_random_name()\n return {\"security_group\": self.neutron.create_security_group(\n **security_group_create_args)}\n\n def _delete_security_group(self, security_group):\n \"\"\"Delete Neutron security group.\n\n :param security_group: dict, neutron security_group\n \"\"\"\n return self.neutron.delete_security_group(\n security_group[\"security_group\"][\"id\"])\n\n def _list_security_groups(self, **kwargs):\n \"\"\"Return list of Neutron security groups.\"\"\"\n return {\"security_groups\": self.neutron.list_security_groups(**kwargs)}\n\n def _show_security_group(self, security_group, **kwargs):\n \"\"\"Show security group details.\n\n :param security_group: dict, neutron security_group\n :param kwargs: Optional additional arguments for security_group show\n :returns: security_group details\n \"\"\"\n return {\"security_group\": self.neutron.get_security_group(\n security_group[\"security_group\"][\"id\"], **kwargs)}\n\n def _update_security_group(self, security_group,\n **security_group_update_args):\n \"\"\"Update Neutron security-group.\n\n :param security_group: dict, neutron security_group\n :param security_group_update_args: dict, POST /v2.0/security-groups\n update options\n :returns: dict, updated neutron security-group\n \"\"\"\n security_group_update_args[\"name\"] = self.generate_random_name()\n return {\"security_group\": self.neutron.update_security_group(\n security_group[\"security_group\"][\"id\"],\n **security_group_update_args)}\n\n def update_loadbalancer_resource(self, lb):\n try:\n new_lb = self.clients(\"neutron\").show_loadbalancer(lb[\"id\"])\n except Exception as e:\n if getattr(e, \"status_code\", 400) == 404:\n raise exceptions.GetResourceNotFound(resource=lb)\n raise exceptions.GetResourceFailure(resource=lb, err=e)\n return new_lb[\"loadbalancer\"]\n\n @atomic.action_timer(\"neutron.create_lbaasv2_loadbalancer\")\n def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args):\n \"\"\"Create LB loadbalancer(v2)\n\n :param subnet_id: str, neutron subnet-id\n :param lb_create_args: dict, POST /lbaas/loadbalancers request options\n :returns: dict, neutron lb\n \"\"\"\n args = {\"name\": self.generate_random_name(),\n \"vip_subnet_id\": subnet_id}\n args.update(lb_create_args)\n neutronclient = self.clients(\"neutron\")\n lb = neutronclient.create_loadbalancer({\"loadbalancer\": args})\n lb = lb[\"loadbalancer\"]\n lb = utils.wait_for_status(\n lb,\n ready_statuses=[\"ACTIVE\"],\n status_attr=\"provisioning_status\",\n update_resource=self.update_loadbalancer_resource,\n timeout=CONF.openstack.neutron_create_loadbalancer_timeout,\n check_interval=(\n CONF.openstack.neutron_create_loadbalancer_poll_interval)\n )\n return lb\n\n @atomic.action_timer(\"neutron.list_lbaasv2_loadbalancers\")\n def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args):\n \"\"\"List LB loadbalancers(v2)\n\n :param lb_list_args: dict, POST /lbaas/loadbalancers request options\n :returns: dict, neutron lb loadbalancers(v2)\n \"\"\"\n return self.clients(\"neutron\").list_loadbalancers(retrieve_all,\n **lb_list_args)\n\n @atomic.action_timer(\"neutron.create_bgpvpn\")\n def _create_bgpvpn(self, **kwargs):\n \"\"\"Create Bgpvpn resource (POST /bgpvpn/bgpvpn)\n\n :param kwargs: optional parameters to create BGP VPN\n :returns dict, bgpvpn resource details\n \"\"\"\n kwargs[\"name\"] = self.generate_random_name()\n return self.admin_clients(\"neutron\").create_bgpvpn({\"bgpvpn\": kwargs})\n\n @atomic.action_timer(\"neutron.delete_bgpvpn\")\n def _delete_bgpvpn(self, bgpvpn):\n \"\"\"Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id})\n\n :param bgpvpn: dict, bgpvpn\n :return dict, bgpvpn\n \"\"\"\n return self.admin_clients(\"neutron\").delete_bgpvpn(\n bgpvpn[\"bgpvpn\"][\"id\"])\n\n @atomic.action_timer(\"neutron.list_bgpvpns\")\n def _list_bgpvpns(self, **kwargs):\n \"\"\"Return bgpvpns list.\n\n :param kwargs: dict, GET /bgpvpn/bgpvpns request options\n :returns: bgpvpns list\n \"\"\"\n return self.admin_clients(\"neutron\").list_bgpvpns(\n True, **kwargs)[\"bgpvpns\"]\n\n @atomic.action_timer(\"neutron.update_bgpvpn\")\n def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs):\n \"\"\"Update a bgpvpn.\n\n :param bgpvpn: dict, bgpvpn\n :param update_name: update_name: bool, whether or not to modify\n BGP VPN name\n :param **kwargs: dict, PUT /bgpvpn/bgpvpns update options\n :return dict, updated bgpvpn\n \"\"\"\n if update_name or \"name\" in kwargs:\n kwargs[\"name\"] = self.generate_random_name()\n return self.admin_clients(\"neutron\").update_bgpvpn(\n bgpvpn[\"bgpvpn\"][\"id\"], {\"bgpvpn\": kwargs})\n\n @atomic.action_timer(\"neutron.create_bgpvpn_network_assoc\")\n def _create_bgpvpn_network_assoc(self, bgpvpn, network):\n \"\"\"Creates a new BGP VPN network association.\n\n :param bgpvpn: dict, bgpvpn\n :param network: dict, network\n :return dict: network_association\n \"\"\"\n netassoc = {\"network_id\": network[\"id\"]}\n return self.clients(\"neutron\").create_bgpvpn_network_assoc(\n bgpvpn[\"bgpvpn\"][\"id\"], {\"network_association\": netassoc})\n\n @atomic.action_timer(\"neutron.delete_bgpvpn_network_assoc\")\n def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):\n \"\"\"Delete the specified BGP VPN network association\n\n :param bgpvpn: dict, bgpvpn\n :param net_assoc: dict, network\n :return dict: network_association\n \"\"\"\n return self.clients(\"neutron\").delete_bgpvpn_network_assoc(\n bgpvpn[\"bgpvpn\"][\"id\"], net_assoc[\"network_association\"][\"id\"])\n\n @atomic.action_timer(\"neutron.create_bgpvpn_router_assoc\")\n def _create_bgpvpn_router_assoc(self, bgpvpn, router):\n \"\"\"Creates a new BGP VPN router association.\n\n :param bgpvpn: dict, bgpvpn\n :param router: dict, router\n :return dict: network_association\n \"\"\"\n router_assoc = {\"router_id\": router[\"id\"]}\n return self.clients(\"neutron\").create_bgpvpn_router_assoc(\n bgpvpn[\"bgpvpn\"][\"id\"], {\"router_association\": router_assoc})\n\n @atomic.action_timer(\"neutron.delete_bgpvpn_router_assoc\")\n def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):\n \"\"\"Delete the specified BGP VPN router association\n\n :param bgpvpn: dict, bgpvpn\n :param router_assoc: dict, router\n :return dict: router_association\n \"\"\"\n return self.clients(\"neutron\").delete_bgpvpn_router_assoc(\n bgpvpn[\"bgpvpn\"][\"id\"], router_assoc[\"router_association\"][\"id\"])\n\n @atomic.action_timer(\"neutron.list_bgpvpn_network_assocs\")\n def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs):\n \"\"\"List network association of bgpvpn\n\n :param bgpvpn: dict, bgpvpn\n :param **kwargs: dict, optional parameters\n :return dict: network_association\n \"\"\"\n return self.clients(\"neutron\").list_bgpvpn_network_assocs(\n bgpvpn[\"bgpvpn\"][\"id\"], **kwargs)\n\n @atomic.action_timer(\"neutron.list_bgpvpn_router_assocs\")\n def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs):\n \"\"\"List router association of bgpvpn\n\n :param bgpvpn: dict, bgpvpn\n :param **kwargs: dict, optional parameters\n :return dict: router_association\n \"\"\"\n return self.clients(\"neutron\").list_bgpvpn_router_assocs(\n bgpvpn[\"bgpvpn\"][\"id\"], **kwargs)\n\n def _create_security_group_rule(self, security_group_id,\n **security_group_rule_args):\n \"\"\"Create Neutron security-group-rule.\n\n :param security_group_id: id of neutron security_group\n :param security_group_rule_args: dict, POST\n /v2.0/security-group-rules request options\n :returns: dict, neutron security-group-rule\n \"\"\"\n return {\"security_group_rule\": self.neutron.create_security_group_rule(\n security_group_id, **security_group_rule_args\n )}\n\n def _list_security_group_rules(self, **kwargs):\n \"\"\"List all security group rules.\n\n :param kwargs: Optional additional arguments for roles list\n :return: list of security group rules\n \"\"\"\n result = self.neutron.list_security_group_rules(**kwargs)\n return {\"security_group_rules\": result}\n\n def _show_security_group_rule(self, security_group_rule, **kwargs):\n \"\"\"Show information of a given security group rule.\n\n :param security_group_rule: id of security group rule\n :param kwargs: Optional additional arguments for roles list\n :return: details of security group rule\n \"\"\"\n return {\"security_group_rule\": self.neutron.get_security_group_rule(\n security_group_rule, **kwargs)}\n\n def _delete_security_group_rule(self, security_group_rule):\n \"\"\"Delete a given security group rule.\n\n :param security_group_rule: id of security group rule\n \"\"\"\n self.neutron.delete_security_group_rule(security_group_rule)\n\n @atomic.action_timer(\"neutron.delete_trunk\")\n def _delete_trunk(self, trunk_port):\n self.clients(\"neutron\").delete_trunk(trunk_port[\"port_id\"])\n\n @atomic.action_timer(\"neutron.create_trunk\")\n def _create_trunk(self, trunk_payload):\n trunk_payload[\"name\"] = self.generate_random_name()\n return self.clients(\"neutron\").create_trunk({\"trunk\": trunk_payload})\n\n @atomic.action_timer(\"neutron.list_trunks\")\n def _list_trunks(self, **kwargs):\n return self.clients(\"neutron\").list_trunks(**kwargs)[\"trunks\"]\n\n @atomic.action_timer(\"neutron.list_subports_by_trunk\")\n def _list_subports_by_trunk(self, trunk_id):\n return self.clients(\"neutron\").trunk_get_subports(trunk_id)\n\n @atomic.action_timer(\"neutron._add_subports_to_trunk\")\n def _add_subports_to_trunk(self, trunk_id, subports):\n return self.clients(\"neutron\").trunk_add_subports(\n trunk_id, {\"sub_ports\": subports})\n\n def _list_ports_by_device_id(self, device_id):\n return self.neutron.list_ports(device_id=device_id)\n" }, { "alpha_fraction": 0.5679584741592407, "alphanum_fraction": 0.5731071829795837, "avg_line_length": 36.09033203125, "blob_id": "c535146bc8d4fe1634d9058062172364986b05cd", "content_id": "fef92f463dffbfcf701ea1ab52fa544e67d429de", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46808, "license_type": "permissive", "max_line_length": 79, "num_lines": 1262, "path": "/tests/unit/common/services/network/test_neutron.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.common import credential\nfrom rally_openstack.common.services.network import neutron\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.services.network.neutron\"\n\n\nclass NeutronServiceTestCase(test.TestCase):\n def setUp(self):\n super(NeutronServiceTestCase, self).setUp()\n self.clients = mock.MagicMock(\n credential=credential.OpenStackCredential(\n auth_url=\"example.com\",\n username=\"root\",\n password=\"changeme\"\n )\n )\n self.nc = self.clients.neutron.return_value\n self.atomic_inst = []\n\n self.name_generator_count = 0\n\n def name_generator():\n self.name_generator_count += 1\n return f\"s-{self.name_generator_count}\"\n\n self.neutron = neutron.NeutronService(\n clients=self.clients,\n name_generator=name_generator,\n atomic_inst=self.atomic_inst\n )\n\n def test_create_network_topology_without_a_router(self):\n network = {\"id\": \"net-id\", \"name\": \"s-1\"}\n subnets = [\n {\"id\": \"subnet1-id\", \"name\": \"subnet1-name\"},\n {\"id\": \"subnet2-id\", \"name\": \"subnet2-name\"}\n ]\n self.nc.create_network.return_value = {\"network\": network.copy()}\n self.nc.create_subnet.side_effect = [{\"subnet\": s} for s in subnets]\n\n network_create_args = {}\n subnet_create_args = {}\n\n topo = self.neutron.create_network_topology(\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args\n )\n\n self.assertEqual(\n {\n \"network\": dict(subnets=[subnets[0][\"id\"]], **network),\n \"subnets\": [subnets[0]],\n \"routers\": []\n },\n topo\n )\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": \"s-1\"}})\n self.nc.create_subnet.assert_called_once_with(\n {\"subnet\": {\"name\": \"s-2\", \"network_id\": \"net-id\",\n \"dns_nameservers\": mock.ANY, \"ip_version\": 4,\n \"cidr\": mock.ANY}}\n )\n\n self.assertFalse(self.nc.create_router.called)\n self.assertFalse(self.nc.add_interface_router.called)\n\n def test_create_network_topology(self):\n network = {\"id\": \"net-id\", \"name\": \"s-1\"}\n subnets = [\n {\"id\": \"subnet1-id\", \"name\": \"subnet1-name\"},\n {\"id\": \"subnet2-id\", \"name\": \"subnet2-name\"}\n ]\n router = {\"id\": \"router\"}\n self.nc.create_network.return_value = {\"network\": network.copy()}\n self.nc.create_router.return_value = {\"router\": router.copy()}\n self.nc.create_subnet.side_effect = [{\"subnet\": s} for s in subnets]\n\n network_create_args = {}\n subnet_create_args = {}\n\n topo = self.neutron.create_network_topology(\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n router_create_args={},\n subnets_count=2,\n subnets_dualstack=True\n )\n\n self.assertEqual(\n {\n \"network\": dict(subnets=[subnets[0][\"id\"], subnets[1][\"id\"]],\n **network),\n \"subnets\": [subnets[0], subnets[1]],\n \"routers\": [router]\n },\n topo\n )\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": \"s-1\"}})\n self.nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": \"s-2\"}})\n self.assertEqual(\n [\n mock.call({\"subnet\": {\n \"name\": f\"s-{i}\", \"network_id\": \"net-id\",\n \"dns_nameservers\": mock.ANY,\n \"ip_version\": 4 if i % 3 == 0 else 6,\n \"cidr\": mock.ANY}})\n for i in range(3, 5)],\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [\n mock.call(router[\"id\"], {\"subnet_id\": subnets[0][\"id\"]}),\n mock.call(router[\"id\"], {\"subnet_id\": subnets[1][\"id\"]})\n ],\n self.nc.add_interface_router.call_args_list\n )\n\n def test_delete_network_topology(self):\n topo = {\n \"network\": {\"id\": \"net-id\"},\n \"routers\": [{\"id\": \"r1\"}, {\"id\": \"r2\"}, {\"id\": \"r3\"}],\n \"subnets\": [{\"id\": \"s-1\"}, {\"id\": \"s-2\"}, {\"id\": \"s-3\"}]\n }\n self.nc.list_ports.return_value = {\n \"ports\": [\n {\"id\": \"p1\", \"device_owner\": \"1\"},\n {\"id\": \"p2\", \"device_owner\": \"2\"}\n ]\n }\n self.nc.list_subnets.return_value = {\n \"subnets\": [{\"id\": \"snet-1\"}, {\"id\": \"snet-2\"}]\n }\n\n self.neutron.delete_network_topology(topo)\n\n self.assertEqual(\n [mock.call(r[\"id\"]) for r in topo[\"routers\"]],\n self.nc.remove_gateway_router.call_args_list\n )\n self.nc.list_ports.assert_called_once_with(\n network_id=topo[\"network\"][\"id\"]\n )\n self.assertEqual(\n # subnets from topo object should be ignored and all subnets should\n # be listed\n [mock.call(s[\"id\"])\n for s in self.nc.list_subnets.return_value[\"subnets\"]],\n self.nc.delete_subnet.call_args_list\n )\n self.nc.delete_network.assert_called_once_with(topo[\"network\"][\"id\"])\n self.assertEqual(\n [mock.call(r[\"id\"]) for r in topo[\"routers\"]],\n self.nc.delete_router.call_args_list\n )\n\n def test_create_network(self):\n net = \"foo\"\n self.nc.create_network.return_value = {\"network\": net}\n\n self.assertEqual(\n net,\n self.neutron.create_network(\n provider_physical_network=\"ppn\",\n **{\"router:external\": True}\n )\n )\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": \"s-1\", \"provider:physical_network\": \"ppn\",\n \"router:external\": True}}\n )\n\n def test_get_network(self):\n network = \"foo\"\n self.nc.show_network.return_value = {\"network\": network}\n net_id = \"net-id\"\n\n self.assertEqual(network, self.neutron.get_network(net_id))\n self.nc.show_network.assert_called_once_with(net_id)\n\n self.nc.show_network.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(network,\n self.neutron.get_network(net_id, fields=fields))\n self.nc.show_network.assert_called_once_with(net_id, fields=fields)\n\n def test_find_network(self):\n net1 = {\"id\": \"net-1\", \"name\": \"foo\"}\n net2 = {\"id\": \"net-2\", \"name\": \"bar\"}\n self.nc.list_networks.return_value = {\"networks\": [net1, net2]}\n\n self.assertEqual(net2, self.neutron.find_network(\"bar\"))\n self.assertEqual(net1, self.neutron.find_network(\"net-1\"))\n self.assertRaises(exceptions.GetResourceFailure,\n self.neutron.find_network, \"net-3\")\n\n def test_update_network(self):\n network = \"foo\"\n self.nc.update_network.return_value = {\"network\": network}\n net_id = \"net-id\"\n\n self.assertEqual(network, self.neutron.update_network(\n net_id, admin_state_up=False))\n self.nc.update_network.assert_called_once_with(\n net_id, {\"network\": {\"admin_state_up\": False}})\n\n self.nc.update_network.reset_mock()\n\n self.assertRaises(TypeError,\n self.neutron.update_network, net_id)\n self.assertFalse(self.nc.update_network.called)\n\n def test_delete_network(self):\n net_id = \"net-id\"\n self.neutron.delete_network(net_id)\n self.nc.delete_network.assert_called_once_with(net_id)\n\n def test_list_networks(self):\n net1 = {\"id\": \"net-1\", \"name\": \"foo\"}\n net2 = {\"id\": \"net-2\", \"name\": \"bar\"}\n self.nc.list_networks.return_value = {\"networks\": [net1, net2]}\n\n self.assertEqual([net1, net2], self.neutron.list_networks())\n self.nc.list_networks.assert_called_once_with()\n\n @mock.patch(\"%s.net_utils.generate_cidr\" % PATH)\n def test_create_subnet(self, mock_generate_cidr):\n net_id = \"net-id\"\n router_id = \"router-id\"\n mock_generate_cidr.return_value = (6, \"generated_cidr\")\n subnet = {\"id\": \"subnet-id\"}\n self.nc.create_subnet.return_value = {\"subnet\": subnet}\n\n # case 1:\n # - cidr is not specified, so it should be generated\n # - ip_version equals to 6, so proper dns nameserbers should be used\n # - router_id is specified, so add_interface_router method should be\n # called\n self.assertEqual(\n subnet,\n self.neutron.create_subnet(network_id=net_id,\n router_id=router_id,\n ip_version=6)\n )\n self.nc.create_subnet.assert_called_once_with({\"subnet\": {\n \"name\": \"s-1\",\n \"network_id\": net_id,\n \"ip_version\": 6,\n \"cidr\": \"generated_cidr\",\n \"dns_nameservers\": self.neutron.IPv6_DEFAULT_DNS_NAMESERVERS\n }})\n mock_generate_cidr.assert_called_once_with(\n ip_version=6,\n start_cidr=None\n )\n self.nc.add_interface_router.assert_called_once_with(\n router_id, {\"subnet_id\": subnet[\"id\"]}\n )\n\n mock_generate_cidr.reset_mock()\n self.nc.create_subnet.reset_mock()\n self.nc.add_interface_router.reset_mock()\n\n # case 2:\n # - cidr is specified, so it should not be generated\n # - ip_version equals to 4, so proper dns nameserbers should be used\n # - router_id is not specified, so add_interface_router method should\n # not be called\n self.assertEqual(\n subnet,\n self.neutron.create_subnet(network_id=net_id,\n cidr=\"some-cidr\",\n ip_version=4)\n )\n self.nc.create_subnet.assert_called_once_with({\"subnet\": {\n \"name\": \"s-2\",\n \"network_id\": net_id,\n \"ip_version\": 4,\n \"cidr\": \"some-cidr\",\n \"dns_nameservers\": self.neutron.IPv4_DEFAULT_DNS_NAMESERVERS\n }})\n self.assertFalse(mock_generate_cidr.called)\n self.assertFalse(self.nc.add_interface_router.called)\n\n mock_generate_cidr.reset_mock()\n self.nc.create_subnet.reset_mock()\n self.nc.add_interface_router.reset_mock()\n\n # case 3:\n # - cidr is specified, so it should not be generated\n # - dns_nameservers equals to None, so default values should not be\n # applied\n # - router_id is specified, so add_interface_router method should\n # be called\n self.assertEqual(\n subnet,\n self.neutron.create_subnet(network_id=net_id,\n router_id=router_id,\n cidr=\"some-cidr\",\n dns_nameservers=None,\n ip_version=4)\n )\n self.nc.create_subnet.assert_called_once_with({\"subnet\": {\n \"name\": \"s-3\",\n \"network_id\": net_id,\n \"ip_version\": 4,\n \"cidr\": \"some-cidr\",\n \"dns_nameservers\": None\n }})\n self.assertFalse(mock_generate_cidr.called)\n self.nc.add_interface_router.assert_called_once_with(\n router_id, {\"subnet_id\": subnet[\"id\"]}\n )\n\n def test_get_subnet(self):\n subnet = \"foo\"\n self.nc.show_subnet.return_value = {\"subnet\": subnet}\n subnet_id = \"subnet-id\"\n\n self.assertEqual(subnet, self.neutron.get_subnet(subnet_id))\n self.nc.show_subnet.assert_called_once_with(subnet_id)\n\n def test_update_subnet(self):\n subnet = \"foo\"\n self.nc.update_subnet.return_value = {\"subnet\": subnet}\n subnet_id = \"subnet-id\"\n\n self.assertEqual(subnet, self.neutron.update_subnet(\n subnet_id, enable_dhcp=False))\n self.nc.update_subnet.assert_called_once_with(\n subnet_id, {\"subnet\": {\"enable_dhcp\": False}})\n\n self.nc.update_subnet.reset_mock()\n\n self.assertRaises(TypeError,\n self.neutron.update_subnet, subnet_id)\n self.assertFalse(self.nc.update_subnet.called)\n\n def test_delete_subnet(self):\n subnet_id = \"subnet-id\"\n self.neutron.delete_subnet(subnet_id)\n self.nc.delete_subnet.assert_called_once_with(subnet_id)\n\n def test_list_subnets(self):\n subnet1 = {\"id\": \"subnet-1\", \"name\": \"foo\"}\n subnet2 = {\"id\": \"subnet-2\", \"name\": \"bar\"}\n self.nc.list_subnets.return_value = {\"subnets\": [subnet1, subnet2]}\n\n self.assertEqual([subnet1, subnet2], self.neutron.list_subnets())\n self.nc.list_subnets.assert_called_once_with()\n\n def test_create_router(self):\n net1 = {\"id\": \"net-1\", \"name\": \"foo\"}\n net2 = {\"id\": \"net-2\", \"name\": \"bar\"}\n self.nc.list_networks.return_value = {\"networks\": [net1, net2]}\n\n router = {\"id\": \"router-id\"}\n self.nc.create_router.return_value = {\"router\": router}\n\n # case 1: external_gateway_info is specified, list_networks should\n # not be called\n self.assertEqual(\n router,\n self.neutron.create_router(\n external_gateway_info={\"network_id\": \"net-id\"},\n ha=True\n )\n )\n self.nc.create_router.assert_called_once_with({\"router\": {\n \"name\": \"s-1\",\n \"external_gateway_info\": {\"network_id\": \"net-id\"},\n \"ha\": True\n }})\n\n self.assertFalse(self.nc.list_networks.called)\n\n self.nc.create_router.reset_mock()\n\n # case 2: external_gateway_info is not specified, but\n # discover_external_gw is False, so list_networks should not be\n # called as well\n self.assertEqual(\n router,\n self.neutron.create_router(\n discover_external_gw=False,\n ha=True\n )\n )\n self.nc.create_router.assert_called_once_with({\"router\": {\n \"name\": \"s-2\",\n \"ha\": True\n }})\n\n self.assertFalse(self.nc.list_networks.called)\n\n self.nc.create_router.reset_mock()\n\n # case 3: external_gateway_info is not specified, so list_networks\n # should be called to discover external network\n self.assertEqual(\n router,\n self.neutron.create_router(ha=True, discover_external_gw=True)\n )\n self.nc.create_router.assert_called_once_with({\"router\": {\n \"name\": \"s-3\",\n \"external_gateway_info\": {\"network_id\": net1[\"id\"]},\n \"ha\": True\n }})\n\n self.nc.list_networks.assert_called_once_with(\n **{\"router:external\": True}\n )\n\n def test_get_router(self):\n router = \"foo\"\n self.nc.show_router.return_value = {\"router\": router}\n router_id = \"router-id\"\n\n self.assertEqual(router, self.neutron.get_router(router_id))\n self.nc.show_router.assert_called_once_with(router_id)\n\n self.nc.show_router.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(router,\n self.neutron.get_router(router_id, fields=fields))\n self.nc.show_router.assert_called_once_with(router_id, fields=fields)\n\n def test_add_interface_to_router(self):\n router_id = \"router-id\"\n subnet_id = \"subnet-id\"\n port_id = \"port-id\"\n\n self.neutron.add_interface_to_router(router_id, subnet_id=subnet_id)\n self.nc.add_interface_router.assert_called_once_with(\n router_id, {\"subnet_id\": subnet_id})\n self.nc.add_interface_router.reset_mock()\n\n self.neutron.add_interface_to_router(router_id, port_id=port_id)\n self.nc.add_interface_router.assert_called_once_with(\n router_id, {\"port_id\": port_id})\n self.nc.add_interface_router.reset_mock()\n\n self.assertRaises(TypeError,\n self.neutron.add_interface_to_router, router_id)\n self.assertFalse(self.nc.add_interface_router.called)\n\n self.assertRaises(TypeError,\n self.neutron.add_interface_to_router, router_id,\n port_id=port_id, subnet_id=subnet_id)\n self.assertFalse(self.nc.add_interface_router.called)\n\n def test_remove_interface_from_router(self):\n router_id = \"router-id\"\n subnet_id = \"subnet-id\"\n port_id = \"port-id\"\n\n # case 1: use subnet-id\n self.neutron.remove_interface_from_router(\n router_id, subnet_id=subnet_id)\n self.nc.remove_interface_router.assert_called_once_with(\n router_id, {\"subnet_id\": subnet_id})\n self.nc.remove_interface_router.reset_mock()\n\n # case 2: use port-id\n self.neutron.remove_interface_from_router(router_id, port_id=port_id)\n self.nc.remove_interface_router.assert_called_once_with(\n router_id, {\"port_id\": port_id})\n self.nc.remove_interface_router.reset_mock()\n\n # case 3: no port and subnet are specified\n self.assertRaises(TypeError,\n self.neutron.remove_interface_from_router, router_id)\n self.assertFalse(self.nc.remove_interface_router.called)\n\n # case 4: both port and subnet are specified\n self.assertRaises(TypeError,\n self.neutron.remove_interface_from_router, router_id,\n port_id=port_id, subnet_id=subnet_id)\n self.assertFalse(self.nc.remove_interface_router.called)\n\n def test_test_remove_interface_from_router_silent_error(self):\n from neutronclient.common import exceptions as neutron_exceptions\n\n router_id = \"router-id\"\n subnet_id = \"subnet-id\"\n\n for exc in (neutron_exceptions.BadRequest,\n neutron_exceptions.NotFound):\n self.nc.remove_interface_router.side_effect = exc\n\n self.neutron.remove_interface_from_router(\n router_id, subnet_id=subnet_id)\n self.nc.remove_interface_router.assert_called_once_with(\n router_id, {\"subnet_id\": subnet_id})\n\n self.nc.remove_interface_router.reset_mock()\n\n def test_add_gateway_to_router(self):\n router_id = \"r-id\"\n net_id = \"net-id\"\n external_fixed_ips = \"ex-net-obj\"\n self.nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]\n }\n\n # case 1\n self.neutron.add_gateway_to_router(\n router_id,\n network_id=net_id,\n external_fixed_ips=external_fixed_ips,\n enable_snat=True\n )\n self.nc.add_gateway_router.assert_called_once_with(\n router_id, {\"network_id\": net_id,\n \"enable_snat\": True,\n \"external_fixed_ips\": external_fixed_ips})\n self.nc.add_gateway_router.reset_mock()\n\n # case 2\n self.neutron.add_gateway_to_router(router_id, network_id=net_id)\n self.nc.add_gateway_router.assert_called_once_with(\n router_id, {\"network_id\": net_id})\n\n def test_remove_gateway_from_router(self):\n router_id = \"r-id\"\n self.neutron.remove_gateway_from_router(router_id)\n self.nc.remove_gateway_router.assert_called_once_with(router_id)\n\n def test_update_router(self):\n router = \"foo\"\n self.nc.update_router.return_value = {\"router\": router}\n router_id = \"subnet-id\"\n\n self.assertEqual(router, self.neutron.update_router(\n router_id, admin_state_up=False))\n self.nc.update_router.assert_called_once_with(\n router_id, {\"router\": {\"admin_state_up\": False}})\n\n self.nc.update_router.reset_mock()\n\n self.assertRaises(TypeError,\n self.neutron.update_router, router_id)\n self.assertFalse(self.nc.update_router.called)\n\n def test_delete_router(self):\n router_id = \"r-id\"\n self.neutron.delete_router(router_id)\n self.nc.delete_router.assert_called_once_with(router_id)\n\n def test_list_routers(self):\n router1 = {\n \"id\": \"router-1\",\n \"name\": \"r1\",\n \"external_gateway_info\": None\n }\n router2 = {\n \"id\": \"router-2\",\n \"name\": \"r2\",\n \"external_gateway_info\": {\"external_fixed_ips\": []}\n }\n router3 = {\n \"id\": \"router-3\",\n \"name\": \"r3\",\n \"external_gateway_info\": {\n \"external_fixed_ips\": [{\"subnet_id\": \"s1\"}]\n }\n }\n router4 = {\n \"id\": \"router-4\",\n \"name\": \"r4\",\n \"external_gateway_info\": {\n \"external_fixed_ips\": [{\"subnet_id\": \"s1\"},\n {\"subnet_id\": \"s2\"}]\n }\n }\n router5 = {\n \"id\": \"router-5\",\n \"name\": \"r5\",\n \"external_gateway_info\": {\n \"external_fixed_ips\": [{\"subnet_id\": \"s2\"}]\n }\n }\n self.nc.list_routers.return_value = {\"routers\": [\n router1, router2, router3, router4, router5]}\n\n # case 1: use native neutron api filters\n self.assertEqual(\n [router1, router2, router3, router4, router5],\n self.neutron.list_routers(admin_state_up=True)\n )\n self.nc.list_routers.assert_called_once_with(admin_state_up=True)\n\n self.nc.list_routers.reset_mock()\n\n # case 2: use additional post api filtering by subnet\n self.assertEqual(\n [router4, router5],\n self.neutron.list_routers(subnet_ids=[\"s2\"])\n )\n self.nc.list_routers.assert_called_once_with()\n\n def test_create_port(self):\n net_id = \"net-id\"\n port = \"foo\"\n self.nc.create_port.return_value = {\"port\": port}\n\n self.assertEqual(port, self.neutron.create_port(network_id=net_id))\n self.nc.create_port.assert_called_once_with(\n {\"port\": {\"name\": \"s-1\", \"network_id\": net_id}}\n )\n\n def test_get_port(self):\n port = \"foo\"\n self.nc.show_port.return_value = {\"port\": port}\n port_id = \"net-id\"\n\n self.assertEqual(port, self.neutron.get_port(port_id))\n self.nc.show_port.assert_called_once_with(port_id)\n\n self.nc.show_port.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(port,\n self.neutron.get_port(port_id, fields=fields))\n self.nc.show_port.assert_called_once_with(port_id, fields=fields)\n\n def test_update_port(self):\n port = \"foo\"\n self.nc.update_port.return_value = {\"port\": port}\n port_id = \"net-id\"\n\n self.assertEqual(port, self.neutron.update_port(\n port_id, admin_state_up=False))\n self.nc.update_port.assert_called_once_with(\n port_id, {\"port\": {\"admin_state_up\": False}})\n\n self.nc.update_port.reset_mock()\n\n self.assertRaises(TypeError, self.neutron.update_port, port_id)\n self.assertFalse(self.nc.update_port.called)\n\n def test_delete_port(self):\n # case 1: port argument is a string with port ID\n port = \"port-id\"\n\n self.neutron.delete_port(port)\n\n self.nc.delete_port.assert_called_once_with(port)\n self.assertFalse(self.nc.remove_gateway_router.called)\n self.assertFalse(self.nc.remove_interface_router.called)\n self.nc.delete_port.reset_mock()\n\n # case 2: port argument is a dict with an id and not-special\n # device_owner\n port = {\"id\": \"port-id\", \"device_owner\": \"someone\",\n \"device_id\": \"device-id\"}\n\n self.neutron.delete_port(port)\n\n self.nc.delete_port.assert_called_once_with(port[\"id\"])\n self.assertFalse(self.nc.remove_interface_router.called)\n self.nc.delete_port.reset_mock()\n\n # case 3: port argument is a dict with an id and owner is a router\n # interface\n port = {\"id\": \"port-id\",\n \"device_id\": \"device-id\",\n \"device_owner\": \"network:router_interface_distributed\"}\n\n self.neutron.delete_port(port)\n\n self.assertFalse(self.nc.delete_port.called)\n self.assertFalse(self.nc.remove_gateway_router.called)\n self.nc.remove_interface_router.assert_called_once_with(\n port[\"device_id\"], {\"port_id\": port[\"id\"]}\n )\n\n self.nc.delete_port.reset_mock()\n self.nc.remove_interface_router.reset_mock()\n\n # case 4: port argument is a dict with an id and owner is a router\n # gateway\n port = {\"id\": \"port-id\",\n \"device_id\": \"device-id\",\n \"device_owner\": \"network:router_gateway\"}\n\n self.neutron.delete_port(port)\n\n self.assertFalse(self.nc.delete_port.called)\n self.nc.remove_gateway_router.assert_called_once_with(\n port[\"device_id\"]\n )\n self.nc.remove_interface_router.assert_called_once_with(\n port[\"device_id\"], {\"port_id\": port[\"id\"]}\n )\n\n def test_delete_port_silently(self):\n from neutronclient.common import exceptions as neutron_exceptions\n\n self.nc.delete_port.side_effect = neutron_exceptions.PortNotFoundClient\n\n port = \"port-id\"\n\n self.neutron.delete_port(port)\n\n self.nc.delete_port.assert_called_once_with(port)\n self.assertFalse(self.nc.remove_gateway_router.called)\n self.assertFalse(self.nc.remove_interface_router.called)\n\n def test_list_ports(self):\n port1 = {\"id\": \"port-1\", \"name\": \"foo\"}\n port2 = {\"id\": \"port-2\", \"name\": \"bar\"}\n self.nc.list_ports.return_value = {\"ports\": [port1, port2]}\n\n self.assertEqual([port1, port2], self.neutron.list_ports())\n self.nc.list_ports.assert_called_once_with()\n\n def test_create_floatingip(self):\n floatingip = \"foo\"\n self.nc.create_floatingip.return_value = {\"floatingip\": floatingip}\n networks = [\n {\"id\": \"net1-id\", \"name\": \"net1\"},\n {\"id\": \"net2-id\", \"name\": \"net2\", \"router:external\": True},\n {\"id\": \"net3-id\", \"name\": \"net3\", \"router:external\": False}\n ]\n self.nc.list_networks.return_value = {\"networks\": networks}\n\n # case 1: floating_network is a dict with network id\n floating_network = {\"id\": \"net-id\"}\n\n self.assertEqual(\n floatingip,\n self.neutron.create_floatingip(floating_network=floating_network)\n )\n self.nc.create_floatingip.assert_called_once_with(\n {\n \"floatingip\": {\"description\": \"s-1\",\n \"floating_network_id\": floating_network[\"id\"]}\n }\n )\n self.assertFalse(self.nc.list_networks.called)\n self.nc.create_floatingip.reset_mock()\n\n # case 2: floating_network is an ID\n floating_network = \"net2-id\"\n\n self.assertEqual(\n floatingip,\n self.neutron.create_floatingip(floating_network=floating_network)\n )\n self.nc.create_floatingip.assert_called_once_with(\n {\n \"floatingip\": {\"description\": \"s-2\",\n \"floating_network_id\": floating_network}\n }\n )\n self.nc.list_networks.assert_called_once_with()\n self.nc.create_floatingip.reset_mock()\n self.nc.list_networks.reset_mock()\n\n # case 3: floating_network is an ID\n floating_network = \"net2-id\"\n\n self.assertEqual(\n floatingip,\n self.neutron.create_floatingip(floating_network=floating_network)\n )\n self.nc.create_floatingip.assert_called_once_with(\n {\n \"floatingip\": {\"description\": \"s-3\",\n \"floating_network_id\": floating_network}\n }\n )\n self.nc.list_networks.assert_called_once_with()\n self.nc.create_floatingip.reset_mock()\n self.nc.list_networks.reset_mock()\n\n # case 4: floating_network is a name of not external network\n floating_network = \"net3\"\n\n self.assertRaises(\n exceptions.NotFoundException,\n self.neutron.create_floatingip, floating_network=floating_network\n )\n self.assertFalse(self.nc.create_floatingip.called)\n\n self.nc.list_networks.assert_called_once_with()\n self.nc.create_floatingip.reset_mock()\n self.nc.list_networks.reset_mock()\n\n # case 4: floating_network is not specified\n self.assertEqual(\n floatingip,\n self.neutron.create_floatingip()\n )\n self.nc.create_floatingip.assert_called_once_with(\n {\n \"floatingip\": {\"description\": \"s-4\",\n \"floating_network_id\": networks[0][\"id\"]}\n }\n )\n self.nc.list_networks.assert_called_once_with(\n **{\"router:external\": True})\n self.nc.create_floatingip.reset_mock()\n self.nc.list_networks.reset_mock()\n\n def test_create_floatingip_pre_newton(self):\n self.clients.credential.api_info[\"neutron\"] = {\"pre_newton\": True}\n floatingip = \"foo\"\n self.nc.create_floatingip.return_value = {\"floatingip\": floatingip}\n floating_network = {\"id\": \"net-id\"}\n\n self.assertEqual(\n floatingip,\n self.neutron.create_floatingip(floating_network=floating_network)\n )\n self.nc.create_floatingip.assert_called_once_with(\n {\n \"floatingip\": {\"floating_network_id\": floating_network[\"id\"]}\n }\n )\n # generate random name should not be called\n self.assertEqual(0, self.name_generator_count)\n\n @mock.patch(\"%s.LOG.info\" % PATH)\n def test_create_floatingip_failure(self, mock_log_info):\n from neutronclient.common import exceptions as neutron_exceptions\n\n # case 1: an error which we should not handle\n self.nc.create_floatingip.side_effect = neutron_exceptions.BadRequest(\n \"oops\"\n )\n self.assertRaises(\n neutron_exceptions.BadRequest,\n self.neutron.create_floatingip, floating_network={\"id\": \"net-id\"}\n )\n self.assertFalse(mock_log_info.called)\n\n # case 2: exception that we should handle\n self.nc.create_floatingip.side_effect = neutron_exceptions.BadRequest(\n \"Unrecognized attribute: 'description'\"\n )\n self.assertRaises(\n neutron_exceptions.BadRequest,\n self.neutron.create_floatingip, floating_network={\"id\": \"net-id\"}\n )\n self.assertTrue(mock_log_info.called)\n\n def test_get_floatingip(self):\n floatingip = \"foo\"\n self.nc.show_floatingip.return_value = {\"floatingip\": floatingip}\n floatingip_id = \"fip-id\"\n\n self.assertEqual(floatingip,\n self.neutron.get_floatingip(floatingip_id))\n self.nc.show_floatingip.assert_called_once_with(floatingip_id)\n\n self.nc.show_floatingip.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(\n floatingip,\n self.neutron.get_floatingip(floatingip_id, fields=fields)\n )\n self.nc.show_floatingip.assert_called_once_with(floatingip_id,\n fields=fields)\n\n def test_update_floatingip(self):\n floatingip = \"foo\"\n self.nc.update_floatingip.return_value = {\"floatingip\": floatingip}\n floatingip_id = \"fip-id\"\n\n self.assertEqual(floatingip, self.neutron.update_floatingip(\n floatingip_id, port_id=\"port-id\"))\n self.nc.update_floatingip.assert_called_once_with(\n floatingip_id, {\"floatingip\": {\"port_id\": \"port-id\"}})\n\n self.nc.update_floatingip.reset_mock()\n\n self.assertRaises(TypeError,\n self.neutron.update_floatingip, floatingip_id)\n self.assertFalse(self.nc.update_floatingip.called)\n\n def test_associate_floatingip(self):\n floatingip_id = \"fip-id\"\n device_id = \"device-id\"\n floating_ip_address = \"floating_ip_address\"\n\n floatingip = \"foo\"\n self.nc.update_floatingip.return_value = {\"floatingip\": floatingip}\n\n port_id = \"port-id\"\n self.nc.list_ports.return_value = {\n \"ports\": [{\"id\": port_id, \"device_id\": device_id}]\n }\n\n self.nc.list_floatingips.return_value = {\n \"floatingips\": [{\"id\": floatingip_id}]\n }\n\n # case 1:\n # - port_id is None, so it should be discovered using device_id\n # - floatingip_id is not None, so nothing should be specified here\n\n self.assertEqual(\n floatingip,\n self.neutron.associate_floatingip(\n device_id=device_id, floatingip_id=floatingip_id))\n self.nc.update_floatingip.assert_called_once_with(\n floatingip_id, {\"floatingip\": {\"port_id\": port_id}})\n self.nc.list_ports.assert_called_once_with(device_id=device_id)\n self.assertFalse(self.nc.list_floatingips.called)\n\n self.nc.update_floatingip.reset_mock()\n self.nc.list_ports.reset_mock()\n\n # case 2:\n # - port_id is not None, so not discovery should be performed\n # - floatingip_id is None, so it should be discovered\n\n self.assertEqual(\n floatingip,\n self.neutron.associate_floatingip(\n port_id=port_id, floating_ip_address=floating_ip_address,\n fixed_ip_address=\"fixed_ip_addr\"\n ))\n self.nc.update_floatingip.assert_called_once_with(\n floatingip_id,\n {\"floatingip\": {\"port_id\": port_id,\n \"fixed_ip_address\": \"fixed_ip_addr\"}})\n self.assertFalse(self.nc.list_ports.called)\n self.nc.list_floatingips.assert_called_once_with(\n floating_ip_address=floating_ip_address\n )\n\n self.nc.update_floatingip.reset_mock()\n self.nc.list_ports.reset_mock()\n self.nc.list_floatingips.reset_mock()\n\n # case 3:\n # - port_id is not None, so not discovery should be performed\n # - floatingip_id is None, so it should be discovered, but error\n # happens\n\n self.nc.list_floatingips.return_value = {\"floatingips\": []}\n\n self.assertRaises(\n exceptions.GetResourceFailure,\n self.neutron.associate_floatingip,\n port_id=port_id, floating_ip_address=floating_ip_address\n )\n self.assertFalse(self.nc.update_floatingip.called)\n self.assertFalse(self.nc.list_ports.called)\n self.nc.list_floatingips.assert_called_once_with(\n floating_ip_address=floating_ip_address\n )\n\n self.nc.update_floatingip.reset_mock()\n self.nc.list_floatingips.reset_mock()\n\n # case 4:\n # - port_id is None, so discovery should be performed, but error\n # happens\n # - floatingip_id is None, so discovery should not be performed\n # since port discovery fails first\n\n self.nc.list_floatingips.return_value = {\"floatingips\": []}\n self.nc.list_ports.return_value = {\"ports\": []}\n\n self.assertRaises(\n exceptions.GetResourceFailure,\n self.neutron.associate_floatingip,\n device_id=device_id, floating_ip_address=floating_ip_address\n )\n self.nc.list_ports.assert_called_once_with(device_id=device_id)\n self.assertFalse(self.nc.update_floatingip.called)\n self.assertFalse(self.nc.list_floatingips.called)\n\n def test_associate_floatingip_typeerror(self):\n # no device_id and port_id\n self.assertRaises(TypeError, self.neutron.associate_floatingip)\n # both args are specified\n self.assertRaises(TypeError, self.neutron.associate_floatingip,\n device_id=\"d-id\", port_id=\"p-id\")\n\n # no floating_ip_address and floating_ip_id\n self.assertRaises(TypeError, self.neutron.associate_floatingip,\n port_id=\"p-id\")\n # both args are specified\n self.assertRaises(TypeError, self.neutron.associate_floatingip,\n port_id=\"p-id\",\n floating_ip_address=\"fip\", floating_ip_id=\"fip_id\")\n\n def test_disassociate_floatingip(self):\n floatingip_id = \"fip-id\"\n floating_ip_address = \"floating_ip_address\"\n\n floatingip = \"foo\"\n self.nc.update_floatingip.return_value = {\"floatingip\": floatingip}\n\n self.nc.list_floatingips.return_value = {\n \"floatingips\": [{\"id\": floatingip_id}]\n }\n\n # case 1: floatingip_id is specified\n\n self.assertEqual(\n floatingip,\n self.neutron.dissociate_floatingip(floatingip_id=floatingip_id))\n self.nc.update_floatingip.assert_called_once_with(\n floatingip_id, {\"floatingip\": {\"port_id\": None}})\n self.assertFalse(self.nc.list_floatingips.called)\n\n self.nc.update_floatingip.reset_mock()\n\n # case 2: floating_ip_address is specified\n\n self.assertEqual(\n floatingip,\n self.neutron.dissociate_floatingip(\n floating_ip_address=floating_ip_address\n ))\n self.nc.update_floatingip.assert_called_once_with(\n floatingip_id, {\"floatingip\": {\"port_id\": None}})\n self.nc.list_floatingips.assert_called_once_with(\n floating_ip_address=floating_ip_address\n )\n\n self.nc.update_floatingip.reset_mock()\n self.nc.list_floatingips.reset_mock()\n\n # case 3: floating_ip_address is specified but failing to\n # find floatingip by it\n\n self.nc.list_floatingips.return_value = {\"floatingips\": []}\n\n self.assertRaises(\n exceptions.GetResourceFailure,\n self.neutron.dissociate_floatingip,\n floating_ip_address=floating_ip_address\n )\n self.assertFalse(self.nc.update_floatingip.called)\n self.nc.list_floatingips.assert_called_once_with(\n floating_ip_address=floating_ip_address\n )\n\n def test_disassociate_floatingip_typeerror(self):\n # no floating_ip_address and floating_ip_id\n self.assertRaises(TypeError, self.neutron.dissociate_floatingip)\n # both args are specified\n self.assertRaises(TypeError, self.neutron.dissociate_floatingip,\n floating_ip_address=\"fip\", floating_ip_id=\"fip_id\")\n\n def delete_floatingip(self):\n floatingip_id = \"fip-id\"\n self.neutron.delete_floatingip(floatingip_id)\n self.nc.delete_floatingip.assert_called_once_with(floatingip_id)\n\n def test_list_floatingips(self):\n floatingip_1 = {\"id\": \"fip-1\", \"name\": \"foo\"}\n floatingip_2 = {\"id\": \"fip-2\", \"name\": \"bar\"}\n self.nc.list_floatingips.return_value = {\n \"floatingips\": [floatingip_1, floatingip_2]\n }\n\n self.assertEqual(\n [floatingip_1, floatingip_2],\n self.neutron.list_floatingips(port_id=\"port-id\")\n )\n self.nc.list_floatingips.assert_called_once_with(port_id=\"port-id\")\n\n def test_create_security_group(self):\n security_group = \"foo\"\n self.nc.create_security_group.return_value = {\n \"security_group\": security_group}\n\n self.assertEqual(\n security_group, self.neutron.create_security_group(stateful=True)\n )\n self.nc.create_security_group.assert_called_once_with(\n {\"security_group\": {\"name\": \"s-1\", \"stateful\": True}}\n )\n\n def test_get_security_group(self):\n security_group = \"foo\"\n self.nc.show_security_group.return_value = {\n \"security_group\": security_group}\n security_group_id = \"security-group-id\"\n\n self.assertEqual(security_group,\n self.neutron.get_security_group(security_group_id))\n self.nc.show_security_group.assert_called_once_with(security_group_id)\n\n self.nc.show_security_group.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(\n security_group,\n self.neutron.get_security_group(security_group_id, fields=fields))\n self.nc.show_security_group.assert_called_once_with(\n security_group_id, fields=fields)\n\n def test_update_update_security_group(self):\n security_group = \"foo\"\n self.nc.update_security_group.return_value = {\n \"security_group\": security_group}\n security_group_id = \"security-group-id\"\n\n self.assertEqual(\n security_group,\n self.neutron.update_security_group(\n security_group_id, stateful=False))\n self.nc.update_security_group.assert_called_once_with(\n security_group_id, {\"security_group\": {\"stateful\": False}})\n\n self.nc.update_security_group.reset_mock()\n\n self.assertRaises(\n TypeError,\n self.neutron.update_security_group, security_group_id)\n self.assertFalse(self.nc.update_security_group.called)\n\n def test_delete_security_group(self):\n security_group_id = \"security-group-id\"\n self.neutron.delete_security_group(security_group_id)\n self.nc.delete_security_group.assert_called_once_with(\n security_group_id)\n\n def test_list_security_groups(self):\n sg1 = {\"id\": \"sg-1\", \"name\": \"foo\"}\n sg2 = {\"id\": \"sg-2\", \"name\": \"bar\"}\n self.nc.list_security_groups.return_value = {\n \"security_groups\": [sg1, sg2]\n }\n\n self.assertEqual([sg1, sg2], self.neutron.list_security_groups())\n self.nc.list_security_groups.assert_called_once_with()\n\n def test_create_security_group_rule(self):\n security_group_rule = \"foo\"\n self.nc.create_security_group_rule.return_value = {\n \"security_group_rule\": security_group_rule}\n\n self.assertEqual(\n security_group_rule,\n self.neutron.create_security_group_rule(\n security_group_id=\"sg1\", )\n )\n self.nc.create_security_group_rule.assert_called_once_with(\n {\"security_group_rule\": {\n \"security_group_id\": \"sg1\", \"direction\": \"ingress\",\n \"protocol\": \"tcp\"\n }}\n )\n\n def test_get_security_group_rule(self):\n security_group_rule = \"foo\"\n self.nc.show_security_group_rule.return_value = {\n \"security_group_rule\": security_group_rule}\n security_group_rule_id = \"security-group-id\"\n\n self.assertEqual(\n security_group_rule,\n self.neutron.get_security_group_rule(security_group_rule_id))\n self.nc.show_security_group_rule.assert_called_once_with(\n security_group_rule_id)\n\n self.nc.show_security_group_rule.reset_mock()\n\n fields = [\"a\", \"b\"]\n self.assertEqual(\n security_group_rule,\n self.neutron.get_security_group_rule(\n security_group_rule_id, fields=fields))\n self.nc.show_security_group_rule.assert_called_once_with(\n security_group_rule_id, fields=fields)\n\n def test_delete_security_group_rule(self):\n security_group_rule_id = \"security-group-rule-id\"\n self.neutron.delete_security_group_rule(security_group_rule_id)\n self.nc.delete_security_group_rule.assert_called_once_with(\n security_group_rule_id)\n\n def test_list_security_groups_rule(self):\n sgr1 = {\"id\": \"sg-1\", \"name\": \"foo\"}\n sgr2 = {\"id\": \"sg-2\", \"name\": \"bar\"}\n self.nc.list_security_group_rules.return_value = {\n \"security_group_rules\": [sgr1, sgr2]\n }\n\n self.assertEqual([sgr1, sgr2],\n self.neutron.list_security_group_rules())\n self.nc.list_security_group_rules.assert_called_once_with()\n\n def test_list_agents(self):\n agent1 = {\"id\": \"agent-1\", \"name\": \"foo\"}\n agent2 = {\"id\": \"agent-2\", \"name\": \"bar\"}\n self.nc.list_agents.return_value = {\"agents\": [agent1, agent2]}\n\n self.assertEqual([agent1, agent2], self.neutron.list_agents())\n self.nc.list_agents.assert_called_once_with()\n\n def test_list_extensions(self):\n ext1 = {\"alias\": \"foo\"}\n ext2 = {\"alias\": \"bar\"}\n self.nc.list_extensions.return_value = {\"extensions\": [ext1, ext2]}\n\n self.assertEqual([ext1, ext2], self.neutron.list_extensions())\n self.nc.list_extensions.assert_called_once_with()\n\n def test_cached_supported_extensions(self):\n ext1 = {\"alias\": \"foo\"}\n ext2 = {\"alias\": \"bar\"}\n self.nc.list_extensions.return_value = {\"extensions\": [ext1, ext2]}\n\n self.assertEqual([ext1, ext2],\n self.neutron.cached_supported_extensions)\n self.nc.list_extensions.assert_called_once_with()\n\n self.nc.list_extensions.reset_mock()\n # another try\n self.assertEqual([ext1, ext2],\n self.neutron.cached_supported_extensions)\n self.assertFalse(self.nc.list_extensions.called)\n\n def test_supports_extension(self):\n ext1 = {\"alias\": \"foo\"}\n ext2 = {\"alias\": \"bar\"}\n self.nc.list_extensions.return_value = {\"extensions\": [ext1, ext2]}\n\n self.assertTrue(self.neutron.supports_extension(\"foo\"))\n self.assertTrue(self.neutron.supports_extension(\"bar\"))\n self.assertFalse(self.neutron.supports_extension(\"xxx\", silent=True))\n self.assertRaises(exceptions.NotFoundException,\n self.neutron.supports_extension, \"xxx\")\n\n # this should be called once\n self.nc.list_extensions.assert_called_once_with()\n" }, { "alpha_fraction": 0.5888619422912598, "alphanum_fraction": 0.5949969291687012, "avg_line_length": 44.51298522949219, "blob_id": "4a2423c52b59e9bba23dd9bedb723b2333b16c1c", "content_id": "9e503b9e77c7902daa47a399375dc6c28d92e214", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21027, "license_type": "permissive", "max_line_length": 79, "num_lines": 462, "path": "/tests/unit/verification/tempest/test_context.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nfrom unittest import mock\n\nimport ddt\nimport requests\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.verification.tempest import config\nfrom rally_openstack.verification.tempest import context\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nCONF = cfg.CONF\n\n\nCRED = {\n \"username\": \"admin\",\n \"tenant_name\": \"admin\",\n \"password\": \"admin-12345\",\n \"auth_url\": \"http://test:5000/v2.0/\",\n \"permission\": \"admin\",\n \"region_name\": \"test\",\n \"https_insecure\": False,\n \"https_cacert\": \"/path/to/cacert/file\",\n \"user_domain_name\": \"admin\",\n \"project_domain_name\": \"admin\"\n}\n\nNET_PATH = \"rally_openstack.common.services.network\"\nPATH = \"rally_openstack.verification.tempest.context\"\n\n\[email protected]\nclass TempestContextTestCase(test.TestCase):\n\n def setUp(self):\n super(TempestContextTestCase, self).setUp()\n\n self.mock_isfile = mock.patch(\"os.path.isfile\",\n return_value=True).start()\n\n self.cred = fakes.FakeCredential(**CRED)\n p_cred = mock.patch(PATH + \".credential.OpenStackCredential\",\n return_value=self.cred)\n p_cred.start()\n self.addCleanup(p_cred.stop)\n self.env = mock.Mock(data={\"platforms\": {\"openstack\": {\n \"platform_data\": {\"admin\": {}}}}}\n )\n cfg = {\"verifier\": mock.Mock(env=self.env),\n \"verification\": {\"uuid\": \"uuid\"}}\n cfg[\"verifier\"].manager.home_dir = \"/p/a/t/h\"\n cfg[\"verifier\"].manager.configfile = \"/fake/path/to/config\"\n self.context = context.TempestContext(cfg)\n self.context.conf.add_section(\"compute\")\n self.context.conf.add_section(\"orchestration\")\n self.context.conf.add_section(\"scenario\")\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open(), create=True)\n def test__download_image_from_glance(self, mock_open):\n self.mock_isfile.return_value = False\n img_path = os.path.join(self.context.data_dir, \"foo\")\n img = mock.MagicMock()\n glanceclient = self.context.clients.glance()\n glanceclient.images.data.return_value = \"data\"\n\n self.context._download_image_from_source(img_path, img)\n mock_open.assert_called_once_with(img_path, \"wb\")\n glanceclient.images.data.assert_called_once_with(img.id)\n mock_open().write.assert_has_calls([mock.call(\"d\"),\n mock.call(\"a\"),\n mock.call(\"t\"),\n mock.call(\"a\")])\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n @mock.patch(\"requests.get\", return_value=mock.MagicMock(status_code=200))\n def test__download_image_from_url_success(self, mock_get, mock_open):\n self.mock_isfile.return_value = False\n img_path = os.path.join(self.context.data_dir, \"foo\")\n mock_get.return_value.iter_content.return_value = \"data\"\n\n self.context._download_image_from_source(img_path)\n mock_get.assert_called_once_with(CONF.openstack.img_url, stream=True)\n mock_open.assert_called_once_with(img_path, \"wb\")\n mock_open().write.assert_has_calls([mock.call(\"d\"),\n mock.call(\"a\"),\n mock.call(\"t\"),\n mock.call(\"a\")])\n\n @mock.patch(\"requests.get\")\n @ddt.data(404, 500)\n def test__download_image_from_url_failure(self, status_code, mock_get):\n self.mock_isfile.return_value = False\n mock_get.return_value = mock.MagicMock(status_code=status_code)\n self.assertRaises(exceptions.RallyException,\n self.context._download_image_from_source,\n os.path.join(self.context.data_dir, \"foo\"))\n\n @mock.patch(\"requests.get\", side_effect=requests.ConnectionError())\n def test__download_image_from_url_connection_error(\n self, mock_requests_get):\n self.mock_isfile.return_value = False\n self.assertRaises(exceptions.RallyException,\n self.context._download_image_from_source,\n os.path.join(self.context.data_dir, \"foo\"))\n\n @mock.patch(\"rally_openstack.common.wrappers.\"\n \"network.NeutronWrapper.create_network\")\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n def test_options_configured_manually(\n self, mock_open, mock_neutron_wrapper_create_network):\n self.context.available_services = [\"glance\", \"heat\", \"nova\", \"neutron\"]\n\n self.context.conf.set(\"compute\", \"image_ref\", \"id1\")\n self.context.conf.set(\"compute\", \"image_ref_alt\", \"id2\")\n self.context.conf.set(\"compute\", \"flavor_ref\", \"id3\")\n self.context.conf.set(\"compute\", \"flavor_ref_alt\", \"id4\")\n self.context.conf.set(\"compute\", \"fixed_network_name\", \"name1\")\n self.context.conf.set(\"orchestration\", \"instance_type\", \"id5\")\n self.context.conf.set(\"scenario\", \"img_file\", \"id6\")\n\n self.context.__enter__()\n\n glanceclient = self.context.clients.glance()\n novaclient = self.context.clients.nova()\n\n self.assertEqual(0, glanceclient.images.create.call_count)\n self.assertEqual(0, novaclient.flavors.create.call_count)\n self.assertEqual(0, mock_neutron_wrapper_create_network.call_count)\n\n def test__create_tempest_roles(self):\n role1 = CONF.openstack.swift_operator_role\n role2 = CONF.openstack.swift_reseller_admin_role\n role3 = CONF.openstack.heat_stack_owner_role\n role4 = CONF.openstack.heat_stack_user_role\n\n client = self.context.clients.verified_keystone()\n client.roles.list.return_value = [fakes.FakeRole(name=role1),\n fakes.FakeRole(name=role2)]\n client.roles.create.side_effect = [fakes.FakeFlavor(name=role3),\n fakes.FakeFlavor(name=role4)]\n\n self.context._create_tempest_roles()\n self.assertEqual(2, client.roles.create.call_count)\n\n created_roles = [role.name for role in self.context._created_roles]\n self.assertIn(role3, created_roles)\n self.assertIn(role4, created_roles)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__discover_image(self, mock_image):\n client = mock_image.return_value\n client.list_images.return_value = [fakes.FakeImage(name=\"Foo\"),\n fakes.FakeImage(name=\"CirrOS\")]\n\n image = self.context._discover_image()\n self.assertEqual(\"CirrOS\", image.name)\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open(), create=True)\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n @mock.patch(\"os.path.isfile\", return_value=False)\n def test__download_image(self, mock_isfile, mock_image, mock_open):\n img_1 = mock.MagicMock()\n img_1.name = \"Foo\"\n img_2 = mock.MagicMock()\n img_2.name = \"CirrOS\"\n glanceclient = self.context.clients.glance()\n glanceclient.images.data.return_value = \"data\"\n mock_image.return_value.list_images.return_value = [img_1, img_2]\n\n self.context._download_image()\n img_path = os.path.join(self.context.data_dir, self.context.image_name)\n mock_image.return_value.list_images.assert_called_once_with(\n status=\"active\", visibility=\"public\")\n glanceclient.images.data.assert_called_once_with(img_2.id)\n mock_open.assert_called_once_with(img_path, \"wb\")\n mock_open().write.assert_has_calls([mock.call(\"d\"),\n mock.call(\"a\"),\n mock.call(\"t\"),\n mock.call(\"a\")])\n\n # We can choose any option to test the '_configure_option' method. So let's\n # configure the 'flavor_ref' option.\n def test__configure_option(self):\n helper_method = mock.MagicMock()\n helper_method.side_effect = [fakes.FakeFlavor(id=\"id1\")]\n\n self.context.conf.set(\"compute\", \"flavor_ref\", \"\")\n self.context._configure_option(\"compute\", \"flavor_ref\",\n helper_method=helper_method, flv_ram=64,\n flv_disk=5)\n self.assertEqual(1, helper_method.call_count)\n\n result = self.context.conf.get(\"compute\", \"flavor_ref\")\n self.assertEqual(\"id1\", result)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__discover_or_create_image_when_image_exists(self, mock_image):\n client = mock_image.return_value\n client.list_images.return_value = [fakes.FakeImage(name=\"CirrOS\")]\n\n image = self.context._discover_or_create_image()\n self.assertEqual(\"CirrOS\", image.name)\n self.assertEqual(0, client.create_image.call_count)\n self.assertEqual(0, len(self.context._created_images))\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__discover_or_create_image(self, mock_image):\n client = mock_image.return_value\n\n image = self.context._discover_or_create_image()\n self.assertEqual(image, mock_image().create_image.return_value)\n self.assertEqual(self.context._created_images[0],\n client.create_image.return_value)\n params = {\"container_format\": CONF.openstack.img_container_format,\n \"image_location\": mock.ANY,\n \"disk_format\": CONF.openstack.img_disk_format,\n \"image_name\": mock.ANY,\n \"visibility\": \"public\"}\n client.create_image.assert_called_once_with(**params)\n\n def test__discover_or_create_flavor_when_flavor_exists(self):\n client = self.context.clients.nova()\n client.flavors.list.return_value = [fakes.FakeFlavor(id=\"id1\", ram=64,\n vcpus=1, disk=5)]\n\n flavor = self.context._discover_or_create_flavor(64, 5)\n self.assertEqual(\"id1\", flavor.id)\n self.assertEqual(0, len(self.context._created_flavors))\n\n def test__discover_or_create_flavor(self):\n client = self.context.clients.nova()\n client.flavors.list.return_value = []\n client.flavors.create.side_effect = [fakes.FakeFlavor(id=\"id1\")]\n\n flavor = self.context._discover_or_create_flavor(64, 5)\n self.assertEqual(\"id1\", flavor.id)\n self.assertEqual(\"id1\", self.context._created_flavors[0].id)\n\n def test__create_network_resources(self):\n client = self.context.clients.neutron()\n fake_network = {\n \"id\": \"nid1\",\n \"name\": \"network\",\n \"status\": \"status\"}\n\n client.create_network.side_effect = [{\"network\": fake_network}]\n client.create_router.side_effect = [{\"router\": {\"id\": \"rid1\"}}]\n client.create_subnet.side_effect = [{\"subnet\": {\"id\": \"subid1\"}}]\n client.list_networks.return_value = {\"networks\": []}\n\n net_topo = self.context._create_network_resources()\n\n self.assertEqual(\"nid1\", net_topo[\"network\"][\"id\"])\n self.assertEqual(\"rid1\", net_topo[\"routers\"][0][\"id\"])\n self.assertEqual(\"subid1\", net_topo[\"subnets\"][0][\"id\"])\n\n @mock.patch(\"%s.neutron.NeutronService.supports_extension\" % PATH)\n def test__create_network_resources_public_network_override(\n self, mock_supports_extension):\n mock_supports_extension.return_value = True\n\n client = self.context.clients.neutron()\n conf = self.context.conf\n\n conf.add_section(\"network\")\n conf.set(\"network\", \"public_network_id\", \"my-uuid\")\n\n fake_network = {\n \"id\": \"nid1\",\n \"name\": \"network\",\n \"status\": \"status\"}\n\n client.create_network.side_effect = [{\"network\": fake_network}]\n client.create_router.side_effect = [{\"router\": {\"id\": \"rid1\"}}]\n client.create_subnet.side_effect = [{\"subnet\": {\"id\": \"subid1\"}}]\n client.list_networks.return_value = {\"networks\": []}\n\n self.context._create_network_resources()\n _name, pos, _kwargs = client.create_router.mock_calls[0]\n router = pos[0][\"router\"]\n external_gateway_info = router[\"external_gateway_info\"]\n self.assertEqual(\"my-uuid\", external_gateway_info[\"network_id\"])\n self.assertTrue(external_gateway_info[\"enable_snat\"])\n\n def test__cleanup_tempest_roles(self):\n self.context._created_roles = [fakes.FakeRole(), fakes.FakeRole()]\n\n self.context._cleanup_tempest_roles()\n client = self.context.clients.keystone()\n self.assertEqual(2, client.roles.delete.call_count)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test__cleanup_images(self, mock_image):\n self.context._created_images = [fakes.FakeImage(id=\"id1\"),\n fakes.FakeImage(id=\"id2\")]\n\n self.context.conf.set(\"compute\", \"image_ref\", \"id1\")\n self.context.conf.set(\"compute\", \"image_ref_alt\", \"id2\")\n\n image_service = mock_image.return_value\n image_service.get_image.side_effect = [\n fakes.FakeImage(id=\"id1\", status=\"DELETED\"),\n fakes.FakeImage(id=\"id2\"),\n fakes.FakeImage(id=\"id2\", status=\"DELETED\")]\n\n self.context._cleanup_images()\n client = self.context.clients.glance()\n client.images.delete.assert_has_calls([mock.call(\"id1\"),\n mock.call(\"id2\")])\n\n self.assertEqual(\"\", self.context.conf.get(\"compute\", \"image_ref\"))\n self.assertEqual(\"\", self.context.conf.get(\"compute\", \"image_ref_alt\"))\n\n def test__cleanup_flavors(self):\n self.context._created_flavors = [fakes.FakeFlavor(id=\"id1\"),\n fakes.FakeFlavor(id=\"id2\"),\n fakes.FakeFlavor(id=\"id3\")]\n\n self.context.conf.set(\"compute\", \"flavor_ref\", \"id1\")\n self.context.conf.set(\"compute\", \"flavor_ref_alt\", \"id2\")\n self.context.conf.set(\"orchestration\", \"instance_type\", \"id3\")\n\n self.context._cleanup_flavors()\n client = self.context.clients.nova()\n self.assertEqual(3, client.flavors.delete.call_count)\n\n self.assertEqual(\"\", self.context.conf.get(\"compute\", \"flavor_ref\"))\n self.assertEqual(\"\", self.context.conf.get(\"compute\",\n \"flavor_ref_alt\"))\n self.assertEqual(\"\", self.context.conf.get(\"orchestration\",\n \"instance_type\"))\n\n @mock.patch(\"%s.neutron.NeutronService.delete_network_topology\" % PATH)\n def test__cleanup_network_resources(self, mock_delete_network_topology):\n self.context._created_networks = [{\"network\": {\"name\": \"net-12345\"}}]\n self.context.conf.set(\"compute\", \"fixed_network_name\", \"net-12345\")\n\n self.context._cleanup_network_resources()\n\n mock_delete_network_topology.assert_called_once_with(\n self.context._created_networks[0]\n )\n self.assertEqual(\"\", self.context.conf.get(\"compute\",\n \"fixed_network_name\"))\n\n @mock.patch(\"%s.open\" % PATH, side_effect=mock.mock_open())\n @mock.patch(\"%s.TempestContext._configure_option\" % PATH)\n @mock.patch(\"%s.TempestContext._create_tempest_roles\" % PATH)\n @mock.patch(\"rally.verification.utils.create_dir\")\n def test_setup(self, mock_create_dir,\n mock__create_tempest_roles, mock__configure_option,\n mock_open):\n verifier = mock.Mock(env=self.env, version=\"27.1.0\")\n verifier.manager.home_dir = \"/p/a/t/h\"\n\n # case #1: no neutron and heat\n self.cred.clients.return_value.services.return_value = {}\n\n ctx = context.TempestContext({\"verifier\": verifier})\n ctx.conf = mock.Mock()\n ctx.setup()\n\n ctx.conf.read.assert_called_once_with(verifier.manager.configfile)\n mock_create_dir.assert_called_once_with(ctx.data_dir)\n mock__create_tempest_roles.assert_called_once_with()\n mock_open.assert_called_once_with(verifier.manager.configfile, \"w\")\n ctx.conf.write(mock_open.side_effect())\n self.assertEqual(\n [mock.call(\"DEFAULT\", \"log_file\", \"/p/a/t/h/tempest.log\"),\n mock.call(\"oslo_concurrency\", \"lock_path\", \"/p/a/t/h/lock_files\"),\n mock.call(\"scenario\", \"img_file\", \"/p/a/t/h/\" + ctx.image_name,\n helper_method=ctx._download_image),\n mock.call(\"compute\", \"image_ref\",\n helper_method=ctx._discover_or_create_image),\n mock.call(\"compute\", \"image_ref_alt\",\n helper_method=ctx._discover_or_create_image),\n mock.call(\"compute\", \"flavor_ref\",\n helper_method=ctx._discover_or_create_flavor,\n flv_ram=config.CONF.openstack.flavor_ref_ram,\n flv_disk=config.CONF.openstack.flavor_ref_disk),\n mock.call(\"compute\", \"flavor_ref_alt\",\n helper_method=ctx._discover_or_create_flavor,\n flv_ram=config.CONF.openstack.flavor_ref_alt_ram,\n flv_disk=config.CONF.openstack.flavor_ref_alt_disk)],\n mock__configure_option.call_args_list)\n\n mock_create_dir.reset_mock()\n mock__create_tempest_roles.reset_mock()\n mock_open.reset_mock()\n mock__configure_option.reset_mock()\n\n # case #2: neutron and heat are presented\n self.cred.clients.return_value.services.return_value = {\n \"network\": \"neutron\", \"orchestration\": \"heat\"}\n\n ctx = context.TempestContext({\"verifier\": verifier})\n neutron = ctx.clients.neutron()\n neutron.list_networks.return_value = {\"networks\": [\"fake_net\"]}\n ctx.conf = mock.Mock()\n ctx.setup()\n\n ctx.conf.read.assert_called_once_with(verifier.manager.configfile)\n mock_create_dir.assert_called_once_with(ctx.data_dir)\n mock__create_tempest_roles.assert_called_once_with()\n mock_open.assert_called_once_with(verifier.manager.configfile, \"w\")\n ctx.conf.write(mock_open.side_effect())\n self.assertEqual([\n mock.call(\"DEFAULT\", \"log_file\", \"/p/a/t/h/tempest.log\"),\n mock.call(\"oslo_concurrency\", \"lock_path\", \"/p/a/t/h/lock_files\"),\n mock.call(\"scenario\", \"img_file\", \"/p/a/t/h/\" + ctx.image_name,\n helper_method=ctx._download_image),\n mock.call(\"compute\", \"image_ref\",\n helper_method=ctx._discover_or_create_image),\n mock.call(\"compute\", \"image_ref_alt\",\n helper_method=ctx._discover_or_create_image),\n mock.call(\"compute\", \"flavor_ref\",\n helper_method=ctx._discover_or_create_flavor,\n flv_ram=config.CONF.openstack.flavor_ref_ram,\n flv_disk=config.CONF.openstack.flavor_ref_disk),\n mock.call(\"compute\", \"flavor_ref_alt\",\n helper_method=ctx._discover_or_create_flavor,\n flv_ram=config.CONF.openstack.flavor_ref_alt_ram,\n flv_disk=config.CONF.openstack.flavor_ref_alt_disk),\n mock.call(\"compute\", \"fixed_network_name\",\n helper_method=ctx._create_network_resources),\n mock.call(\"orchestration\", \"instance_type\",\n helper_method=ctx._discover_or_create_flavor,\n flv_ram=config.CONF.openstack.heat_instance_type_ram,\n flv_disk=config.CONF.openstack.heat_instance_type_disk)\n ], mock__configure_option.call_args_list)\n\n # case 3: tempest is old.\n verifier.version = \"17.0.0\"\n ctx = context.TempestContext({\"verifier\": verifier})\n ctx.conf = mock.Mock()\n ctx.setup()\n mock__configure_option.assert_has_calls(\n [\n mock.call(\"scenario\", \"img_dir\", \"/p/a/t/h\"),\n mock.call(\"scenario\", \"img_file\", ctx.image_name,\n helper_method=ctx._download_image)\n ]\n )\n" }, { "alpha_fraction": 0.6283121705055237, "alphanum_fraction": 0.634482741355896, "avg_line_length": 38.92753601074219, "blob_id": "1c3131a91127b16634e126777bda657d0fc86afd", "content_id": "43c54ceac841d014d58a37509271bd6985bef4c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2755, "license_type": "permissive", "max_line_length": 78, "num_lines": 69, "path": "/tests/unit/task/scenarios/zaqar/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (c) 2014 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.zaqar import utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nUTILS = \"rally_openstack.task.scenarios.zaqar.utils.\"\n\n\nclass ZaqarScenarioTestCase(test.ScenarioTestCase):\n\n @mock.patch(UTILS + \"ZaqarScenario.generate_random_name\",\n return_value=\"kitkat\")\n def test_queue_create(self, mock_generate_random_name):\n scenario = utils.ZaqarScenario(self.context)\n result = scenario._queue_create(fakearg=\"fakearg\")\n\n self.assertEqual(self.clients(\"zaqar\").queue.return_value, result)\n self.clients(\"zaqar\").queue.assert_called_once_with(\"kitkat\",\n fakearg=\"fakearg\")\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"zaqar.create_queue\")\n\n def test_queue_delete(self):\n queue = fakes.FakeQueue()\n queue.delete = mock.MagicMock()\n\n scenario = utils.ZaqarScenario(context=self.context)\n scenario._queue_delete(queue)\n queue.delete.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"zaqar.delete_queue\")\n\n def test_messages_post(self):\n queue = fakes.FakeQueue()\n queue.post = mock.MagicMock()\n\n messages = [{\"body\": {\"id\": \"one\"}, \"ttl\": 100},\n {\"body\": {\"id\": \"two\"}, \"ttl\": 120},\n {\"body\": {\"id\": \"three\"}, \"ttl\": 140}]\n min_msg_count = max_msg_count = len(messages)\n\n scenario = utils.ZaqarScenario(context=self.context)\n scenario._messages_post(queue, messages, min_msg_count, max_msg_count)\n queue.post.assert_called_once_with(messages)\n\n def test_messages_list(self):\n queue = fakes.FakeQueue()\n queue.messages = mock.MagicMock()\n\n scenario = utils.ZaqarScenario(context=self.context)\n scenario._messages_list(queue)\n queue.messages.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"zaqar.list_messages\")\n" }, { "alpha_fraction": 0.627173900604248, "alphanum_fraction": 0.655434787273407, "avg_line_length": 35.79999923706055, "blob_id": "dffb67ff4ff77710b81b79002090bdc8fce68778", "content_id": "330180b362f427c97b898f26ffb47f142fc36a3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 920, "license_type": "permissive", "max_line_length": 80, "num_lines": 25, "path": "/samples/tasks/support/instance_linpack.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# Location of Linpack binary\nLINPACK='/opt/linpack/xlinpack_xeon64'\ntype -P $LINPACK &>/dev/null && continue || { echo \"Linpack Not Found\"; exit 1 }\n\n# Location to create linpack dat file\nLINPACK_DAT='~/linpack.dat'\n\nNUM_CPU=`cat /proc/cpuinfo | grep processor | wc -l`\nexport OMP_NUM_THREADS=$NUM_CPU\necho \"Sample Intel(R) LINPACK data file (from lininput_xeon64)\" > ${LINPACK_DAT}\necho \"Intel(R) LINPACK data\" >> ${LINPACK_DAT}\necho \"1 # number of tests\" >> ${LINPACK_DAT}\necho \"10514 # problem sizes\" >> ${LINPACK_DAT}\necho \"20016 # leading dimensions\" >> ${LINPACK_DAT}\necho \"2 # times to run a test \" >> ${LINPACK_DAT}\necho \"4 # alignment values (in KBytes)\" >> ${LINPACK_DAT}\nOUTPUT=$(${LINPACK} < ${LINPACK_DAT} | grep -A 1 Average | grep 20016)\nAVERAGE=$(echo $OUTPUT | awk '{print $4}')\nMAX=$(echo $OUTPUT | awk '{print $5}')\n\necho \"{\n \\\"average_gflops\\\": $AVERAGE,\n \\\"max_gflops\\\": $MAX\n }\"\n" }, { "alpha_fraction": 0.631116509437561, "alphanum_fraction": 0.6361497640609741, "avg_line_length": 28.81971549987793, "blob_id": "b86ac3708805568dc97f2656973e468542b62f9a", "content_id": "9723a2854ce7a33c87e4eda81ca30be9730624b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33577, "license_type": "permissive", "max_line_length": 79, "num_lines": 1126, "path": "/rally_openstack/task/cleanup/resources.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.task import utils as task_utils\n\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.image import glance_v2\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.task.cleanup import base\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\ndef get_order(start):\n return iter(range(start, start + 99))\n\n\nclass SynchronizedDeletion(object):\n\n def is_deleted(self):\n return True\n\n\nclass QuotaMixin(SynchronizedDeletion, base.ResourceManager):\n # NOTE(andreykurilin): Quotas resources are quite complex in terms of\n # cleanup. First of all, they do not have name, id fields at all. There\n # is only one identifier - reference to Keystone Project/Tenant. Also,\n # we should remove them in case of existing users case... To cover both\n # cases we should use project name as name field (it will allow to pass\n # existing users case) and project id as id of resource\n\n def list(self):\n if not self.tenant_uuid:\n return []\n client = self._admin_required and self.admin or self.user\n project = identity.Identity(client).get_project(self.tenant_uuid)\n return [project]\n\n\n# MAGNUM\n\n_magnum_order = get_order(80)\n\n\[email protected](service=None, resource=None)\nclass MagnumMixin(base.ResourceManager):\n\n def id(self):\n \"\"\"Returns id of resource.\"\"\"\n return self.raw_resource.uuid\n\n def list(self):\n result = []\n marker = None\n while True:\n resources = self._manager().list(marker=marker)\n if not resources:\n break\n result.extend(resources)\n marker = resources[-1].uuid\n return result\n\n\[email protected](\"magnum\", \"clusters\", order=next(_magnum_order),\n tenant_resource=True)\nclass MagnumCluster(MagnumMixin):\n \"\"\"Resource class for Magnum cluster.\"\"\"\n\n\[email protected](\"magnum\", \"cluster_templates\", order=next(_magnum_order),\n tenant_resource=True)\nclass MagnumClusterTemplate(MagnumMixin):\n \"\"\"Resource class for Magnum cluster_template.\"\"\"\n\n\n# HEAT\n\[email protected](\"heat\", \"stacks\", order=100, tenant_resource=True)\nclass HeatStack(base.ResourceManager):\n def name(self):\n return self.raw_resource.stack_name\n\n\n# SENLIN\n\n_senlin_order = get_order(150)\n\n\[email protected](service=None, resource=None, admin_required=True)\nclass SenlinMixin(base.ResourceManager):\n\n def id(self):\n return self.raw_resource[\"id\"]\n\n def _manager(self):\n client = self._admin_required and self.admin or self.user\n return getattr(client, self._service)()\n\n def list(self):\n return getattr(self._manager(), self._resource)()\n\n def delete(self):\n # make singular form of resource name from plural form\n res_name = self._resource[:-1]\n return getattr(self._manager(), \"delete_%s\" % res_name)(self.id())\n\n\[email protected](\"senlin\", \"clusters\",\n admin_required=True, order=next(_senlin_order))\nclass SenlinCluster(SenlinMixin):\n \"\"\"Resource class for Senlin Cluster.\"\"\"\n\n\[email protected](\"senlin\", \"profiles\", order=next(_senlin_order),\n admin_required=False, tenant_resource=True)\nclass SenlinProfile(SenlinMixin):\n \"\"\"Resource class for Senlin Profile.\"\"\"\n\n\n# NOVA\n\n_nova_order = get_order(200)\n\n\[email protected](\"nova\", \"servers\", order=next(_nova_order),\n tenant_resource=True)\nclass NovaServer(base.ResourceManager):\n def list(self):\n \"\"\"List all servers.\"\"\"\n return self._manager().list(limit=-1)\n\n def delete(self):\n if getattr(self.raw_resource, \"OS-EXT-STS:locked\", False):\n self.raw_resource.unlock()\n super(NovaServer, self).delete()\n\n\[email protected](\"nova\", \"server_groups\", order=next(_nova_order),\n tenant_resource=True)\nclass NovaServerGroups(base.ResourceManager):\n pass\n\n\[email protected](\"nova\", \"keypairs\", order=next(_nova_order))\nclass NovaKeypair(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"nova\", \"quotas\", order=next(_nova_order),\n admin_required=True, tenant_resource=True)\nclass NovaQuotas(QuotaMixin):\n pass\n\n\[email protected](\"nova\", \"flavors\", order=next(_nova_order),\n admin_required=True, perform_for_admin_only=True)\nclass NovaFlavors(base.ResourceManager):\n pass\n\n def is_deleted(self):\n from novaclient import exceptions as nova_exc\n\n try:\n self._manager().get(self.name())\n except nova_exc.NotFound:\n return True\n\n return False\n\n\[email protected](\"nova\", \"aggregates\", order=next(_nova_order),\n admin_required=True, perform_for_admin_only=True)\nclass NovaAggregate(SynchronizedDeletion, base.ResourceManager):\n\n def delete(self):\n for host in self.raw_resource.hosts:\n self.raw_resource.remove_host(host)\n super(NovaAggregate, self).delete()\n\n\n# NEUTRON\n\n_neutron_order = get_order(300)\n\n\[email protected](service=None, resource=None, admin_required=True)\nclass NeutronMixin(SynchronizedDeletion, base.ResourceManager):\n\n @property\n def _neutron(self):\n return neutron.NeutronService(\n self._admin_required and self.admin or self.user)\n\n def _manager(self):\n client = self._admin_required and self.admin or self.user\n return getattr(client, self._service)()\n\n def id(self):\n return self.raw_resource[\"id\"]\n\n def name(self):\n return self.raw_resource[\"name\"]\n\n def delete(self):\n key = \"delete_%s\" % self._resource\n delete_method = getattr(\n self._neutron, key, getattr(self._manager(), key)\n )\n delete_method(self.id())\n\n @property\n def _plural_key(self):\n if self._resource.endswith(\"y\"):\n return self._resource[:-1] + \"ies\"\n else:\n return self._resource + \"s\"\n\n def list(self):\n list_method = getattr(self._manager(), \"list_%s\" % self._plural_key)\n result = list_method(tenant_id=self.tenant_uuid)[self._plural_key]\n if self.tenant_uuid:\n result = [r for r in result if r[\"tenant_id\"] == self.tenant_uuid]\n\n return result\n\n\nclass NeutronLbaasV1Mixin(NeutronMixin):\n\n def list(self):\n if self._neutron.supports_extension(\"lbaas\", silent=True):\n return super(NeutronLbaasV1Mixin, self).list()\n return []\n\n\[email protected](\"neutron\", \"vip\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronV1Vip(NeutronLbaasV1Mixin):\n pass\n\n\[email protected](\"neutron\", \"health_monitor\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronV1Healthmonitor(NeutronLbaasV1Mixin):\n pass\n\n\[email protected](\"neutron\", \"pool\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronV1Pool(NeutronLbaasV1Mixin):\n pass\n\n\nclass NeutronLbaasV2Mixin(NeutronMixin):\n\n def list(self):\n if self._neutron.supports_extension(\"lbaasv2\", silent=True):\n return super(NeutronLbaasV2Mixin, self).list()\n return []\n\n\[email protected](\"neutron\", \"loadbalancer\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronV2Loadbalancer(NeutronLbaasV2Mixin):\n\n def is_deleted(self):\n try:\n self._manager().show_loadbalancer(self.id())\n except Exception as e:\n return getattr(e, \"status_code\", 400) == 404\n\n return False\n\n# OCTAVIA\n\n\nclass OctaviaMixIn(NeutronMixin):\n\n @property\n def _client(self):\n # TODO(andreykurilin): use proper helper class from\n # rally_openstack.common.services as soon as it will have unified\n # style of arguments across all methods\n client = self.admin or self.user\n return getattr(client, self._service)()\n\n def delete(self):\n from octaviaclient.api.v2 import octavia as octavia_exc\n\n delete_method = getattr(self._client, \"%s_delete\" % self._resource)\n try:\n return delete_method(self.id())\n except octavia_exc.OctaviaClientException as e:\n if e.code == 409 and \"Invalid state PENDING_DELETE\" in e.message:\n # NOTE(andreykurilin): it is not ok. Probably this resource\n # is not properly cleanup-ed (without wait-for loop)\n # during the workload. No need to fail, continue silently.\n return\n raise\n\n def is_deleted(self):\n from osc_lib import exceptions as osc_exc\n\n show_method = getattr(self._client, \"%s_show\" % self._resource)\n try:\n show_method(self.id())\n except osc_exc.NotFound:\n return True\n return False\n\n def list(self):\n list_method = getattr(self._client, \"%s_list\" % self._resource)\n return list_method()[self._plural_key.replace(\"_\", \"\")]\n\n\[email protected](\"octavia\", \"load_balancer\", order=next(_neutron_order),\n tenant_resource=True)\nclass OctaviaLoadBalancers(OctaviaMixIn):\n def delete(self):\n from octaviaclient.api.v2 import octavia as octavia_exc\n\n delete_method = getattr(self._client, \"load_balancer_delete\")\n try:\n return delete_method(self.id(), cascade=True)\n except octavia_exc.OctaviaClientException as e:\n if e.code == 409 and \"Invalid state PENDING_DELETE\" in e.message:\n # NOTE(andreykurilin): it is not ok. Probably this resource\n # is not properly cleanup-ed (without wait-for loop)\n # during the workload. No need to fail, continue silently.\n return\n raise\n\n\[email protected](\"octavia\", \"pool\", order=next(_neutron_order),\n tenant_resource=True)\nclass OctaviaPools(OctaviaMixIn):\n pass\n\n\[email protected](\"octavia\", \"listener\", order=next(_neutron_order),\n tenant_resource=True)\nclass OctaviaListeners(OctaviaMixIn):\n pass\n\n\[email protected](\"octavia\", \"l7policy\", order=next(_neutron_order),\n tenant_resource=True)\nclass OctaviaL7Policies(OctaviaMixIn):\n pass\n\n\[email protected](\"octavia\", \"health_monitor\", order=next(_neutron_order),\n tenant_resource=True)\nclass OctaviaHealthMonitors(OctaviaMixIn):\n pass\n\n\[email protected](\"neutron\", \"bgpvpn\", order=next(_neutron_order),\n admin_required=True, perform_for_admin_only=True)\nclass NeutronBgpvpn(NeutronMixin):\n def list(self):\n if self._neutron.supports_extension(\"bgpvpn\", silent=True):\n return self._manager().list_bgpvpns()[\"bgpvpns\"]\n return []\n\n\[email protected](\"neutron\", \"floatingip\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronFloatingIP(NeutronMixin):\n def name(self):\n return self.raw_resource.get(\"description\", \"\")\n\n def list(self):\n if CONF.openstack.pre_newton_neutron:\n # NOTE(andreykurilin): Neutron API of pre-newton openstack\n # releases does not support description field in Floating IPs.\n # We do not want to remove not-rally resources, so let's just do\n # nothing here and move pre-newton logic into separate plugins\n return []\n return super(NeutronFloatingIP, self).list()\n\n\[email protected](\"neutron\", \"trunk\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronTrunk(NeutronMixin):\n # Trunks must be deleted before the parent/subports are deleted\n\n def list(self):\n try:\n return super(NeutronTrunk, self).list()\n except Exception as e:\n if getattr(e, \"status_code\", 400) == 404:\n return []\n raise\n\n\[email protected](\"neutron\", \"port\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronPort(NeutronMixin):\n # NOTE(andreykurilin): port is the kind of resource that can be created\n # automatically. In this case it doesn't have name field which matches\n # our resource name templates.\n\n def __init__(self, *args, **kwargs):\n super(NeutronPort, self).__init__(*args, **kwargs)\n self._cache = {}\n\n @property\n def ROUTER_INTERFACE_OWNERS(self):\n return self._neutron.ROUTER_INTERFACE_OWNERS\n\n @property\n def ROUTER_GATEWAY_OWNER(self):\n return self._neutron.ROUTER_GATEWAY_OWNER\n\n def _get_resources(self, resource):\n if resource not in self._cache:\n resources = getattr(self._neutron, \"list_%s\" % resource)()\n self._cache[resource] = [r for r in resources\n if r[\"tenant_id\"] == self.tenant_uuid]\n return self._cache[resource]\n\n def list(self):\n ports = self._get_resources(\"ports\")\n for port in ports:\n if not port.get(\"name\"):\n parent_name = None\n if (port[\"device_owner\"] in self.ROUTER_INTERFACE_OWNERS\n or port[\"device_owner\"] == self.ROUTER_GATEWAY_OWNER):\n # first case is a port created while adding an interface to\n # the subnet\n # second case is a port created while adding gateway for\n # the network\n port_router = [r for r in self._get_resources(\"routers\")\n if r[\"id\"] == port[\"device_id\"]]\n if port_router:\n parent_name = port_router[0][\"name\"]\n if parent_name:\n port[\"parent_name\"] = parent_name\n return ports\n\n def name(self):\n return self.raw_resource.get(\"parent_name\",\n self.raw_resource.get(\"name\", \"\"))\n\n def delete(self):\n found = self._neutron.delete_port(self.raw_resource)\n if not found:\n # Port can be already auto-deleted, skip silently\n LOG.debug(f\"Port {self.id()} was not deleted. Skip silently \"\n f\"because port can be already auto-deleted.\")\n\n\[email protected](\"neutron\", \"subnet\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronSubnet(NeutronMixin):\n pass\n\n\[email protected](\"neutron\", \"network\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronNetwork(NeutronMixin):\n pass\n\n\[email protected](\"neutron\", \"router\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronRouter(NeutronMixin):\n pass\n\n\[email protected](\"neutron\", \"security_group\", order=next(_neutron_order),\n tenant_resource=True)\nclass NeutronSecurityGroup(NeutronMixin):\n def list(self):\n try:\n tenant_sgs = super(NeutronSecurityGroup, self).list()\n # NOTE(pirsriva): Filter out \"default\" security group deletion\n # by non-admin role user\n return filter(lambda r: r[\"name\"] != \"default\",\n tenant_sgs)\n except Exception as e:\n if getattr(e, \"status_code\", 400) == 404:\n return []\n raise\n\n\[email protected](\"neutron\", \"quota\", order=next(_neutron_order),\n admin_required=True, tenant_resource=True)\nclass NeutronQuota(QuotaMixin):\n\n def delete(self):\n self.admin.neutron().delete_quota(self.tenant_uuid)\n\n\n# CINDER\n\n_cinder_order = get_order(400)\n\n\[email protected](\"cinder\", \"backups\", order=next(_cinder_order),\n tenant_resource=True)\nclass CinderVolumeBackup(base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"volume_types\", order=next(_cinder_order),\n admin_required=True, perform_for_admin_only=True)\nclass CinderVolumeType(base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"volume_snapshots\", order=next(_cinder_order),\n tenant_resource=True)\nclass CinderVolumeSnapshot(base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"transfers\", order=next(_cinder_order),\n tenant_resource=True)\nclass CinderVolumeTransfer(base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"volumes\", order=next(_cinder_order),\n tenant_resource=True)\nclass CinderVolume(base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"image_volumes_cache\", order=next(_cinder_order),\n admin_required=True, perform_for_admin_only=True)\nclass CinderImageVolumeCache(base.ResourceManager):\n\n def _glance(self):\n return image.Image(self.admin)\n\n def _manager(self):\n return self.admin.cinder().volumes\n\n def list(self):\n images = dict((\"image-%s\" % i.id, i)\n for i in self._glance().list_images())\n return [{\"volume\": v, \"image\": images[v.name]}\n for v in self._manager().list(search_opts={\"all_tenants\": 1})\n if v.name in images]\n\n def name(self):\n return self.raw_resource[\"image\"].name\n\n def id(self):\n return self.raw_resource[\"volume\"].id\n\n\[email protected](\"cinder\", \"quotas\", order=next(_cinder_order),\n admin_required=True, tenant_resource=True)\nclass CinderQuotas(QuotaMixin, base.ResourceManager):\n pass\n\n\[email protected](\"cinder\", \"qos_specs\", order=next(_cinder_order),\n admin_required=True, perform_for_admin_only=True)\nclass CinderQos(base.ResourceManager):\n pass\n\n# MANILA\n\n\n_manila_order = get_order(450)\n\n\[email protected](\"manila\", \"shares\", order=next(_manila_order),\n tenant_resource=True)\nclass ManilaShare(base.ResourceManager):\n pass\n\n\[email protected](\"manila\", \"share_networks\", order=next(_manila_order),\n tenant_resource=True)\nclass ManilaShareNetwork(base.ResourceManager):\n pass\n\n\[email protected](\"manila\", \"security_services\", order=next(_manila_order),\n tenant_resource=True)\nclass ManilaSecurityService(base.ResourceManager):\n pass\n\n\n# GLANCE\n\[email protected](\"glance\", \"images\", order=500, tenant_resource=True)\nclass GlanceImage(base.ResourceManager):\n\n def _client(self):\n return image.Image(self.admin or self.user)\n\n def list(self):\n images = (self._client().list_images(owner=self.tenant_uuid)\n + self._client().list_images(status=\"deactivated\",\n owner=self.tenant_uuid))\n return images\n\n def delete(self):\n client = self._client()\n if self.raw_resource.status == \"deactivated\":\n glancev2 = glance_v2.GlanceV2Service(self.admin or self.user)\n glancev2.reactivate_image(self.raw_resource.id)\n client.delete_image(self.raw_resource.id)\n task_utils.wait_for_status(\n self.raw_resource, [\"deleted\"],\n check_deletion=True,\n update_resource=self._client().get_image,\n timeout=CONF.openstack.glance_image_delete_timeout,\n check_interval=CONF.openstack.glance_image_delete_poll_interval)\n\n\n# SAHARA\n\n_sahara_order = get_order(600)\n\n\[email protected](\"sahara\", \"job_executions\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"jobs\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaJob(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"job_binary_internals\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"job_binaries\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"data_sources\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaDataSource(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"clusters\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaCluster(base.ResourceManager):\n\n # Need special treatment for Sahara Cluster because of the way the\n # exceptions are described in:\n # https://github.com/openstack/python-saharaclient/blob/master/\n # saharaclient/api/base.py#L145\n\n def is_deleted(self):\n from saharaclient.api import base as saharaclient_base\n\n try:\n self._manager().get(self.id())\n return False\n except saharaclient_base.APIException as e:\n return e.error_code == 404\n\n\[email protected](\"sahara\", \"cluster_templates\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"sahara\", \"node_group_templates\", order=next(_sahara_order),\n tenant_resource=True)\nclass SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\n# CEILOMETER\n\[email protected](\"ceilometer\", \"alarms\", order=700, tenant_resource=True)\nclass CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):\n\n def id(self):\n return self.raw_resource.alarm_id\n\n def list(self):\n query = [{\n \"field\": \"project_id\",\n \"op\": \"eq\",\n \"value\": self.tenant_uuid\n }]\n return self._manager().list(q=query)\n\n\n# ZAQAR\n\[email protected](\"zaqar\", \"queues\", order=800)\nclass ZaqarQueues(SynchronizedDeletion, base.ResourceManager):\n\n def list(self):\n return self.user.zaqar().queues()\n\n\n# DESIGNATE\n_designate_order = get_order(900)\n\n\nclass DesignateResource(SynchronizedDeletion, base.ResourceManager):\n\n # TODO(boris-42): This should be handled somewhere else.\n NAME_PREFIX = \"s_rally_\"\n\n def _manager(self, resource=None):\n # Map resource names to api / client version\n resource = resource or self._resource\n version = {\n \"domains\": \"1\",\n \"servers\": \"1\",\n \"records\": \"1\",\n \"recordsets\": \"2\",\n \"zones\": \"2\"\n }[resource]\n\n client = self._admin_required and self.admin or self.user\n return getattr(getattr(client, self._service)(version), resource)\n\n def id(self):\n \"\"\"Returns id of resource.\"\"\"\n return self.raw_resource[\"id\"]\n\n def name(self):\n \"\"\"Returns name of resource.\"\"\"\n return self.raw_resource[\"name\"]\n\n def list(self):\n return [item for item in self._manager().list()\n if item[\"name\"].startswith(self.NAME_PREFIX)]\n\n\[email protected](\"designate\", \"servers\", order=next(_designate_order),\n admin_required=True, perform_for_admin_only=True, threads=1)\nclass DesignateServer(DesignateResource):\n pass\n\n\[email protected](\"designate\", \"zones\", order=next(_designate_order),\n tenant_resource=True, threads=1)\nclass DesignateZones(DesignateResource):\n\n def list(self):\n marker = None\n criterion = {\"name\": \"%s*\" % self.NAME_PREFIX}\n\n while True:\n items = self._manager().list(marker=marker, limit=100,\n criterion=criterion)\n if not items:\n break\n for item in items:\n yield item\n marker = items[-1][\"id\"]\n\n\n# SWIFT\n\n_swift_order = get_order(1000)\n\n\nclass SwiftMixin(SynchronizedDeletion, base.ResourceManager):\n\n def _manager(self):\n client = self._admin_required and self.admin or self.user\n return getattr(client, self._service)()\n\n def id(self):\n return self.raw_resource\n\n def name(self):\n # NOTE(stpierre): raw_resource is a list of either [container\n # name, object name] (as in SwiftObject) or just [container\n # name] (as in SwiftContainer).\n return self.raw_resource[-1]\n\n def delete(self):\n delete_method = getattr(self._manager(), \"delete_%s\" % self._resource)\n # NOTE(weiwu): *self.raw_resource is required because for deleting\n # container we are passing only container name, to delete object we\n # should pass as first argument container and second is object name.\n delete_method(*self.raw_resource)\n\n\[email protected](\"swift\", \"object\", order=next(_swift_order),\n tenant_resource=True)\nclass SwiftObject(SwiftMixin):\n\n def list(self):\n object_list = []\n containers = self._manager().get_account(full_listing=True)[1]\n for con in containers:\n objects = self._manager().get_container(con[\"name\"],\n full_listing=True)[1]\n for obj in objects:\n raw_resource = [con[\"name\"], obj[\"name\"]]\n object_list.append(raw_resource)\n return object_list\n\n\[email protected](\"swift\", \"container\", order=next(_swift_order),\n tenant_resource=True)\nclass SwiftContainer(SwiftMixin):\n\n def list(self):\n containers = self._manager().get_account(full_listing=True)[1]\n return [[con[\"name\"]] for con in containers]\n\n\n# MISTRAL\n\n_mistral_order = get_order(1100)\n\n\[email protected](\"mistral\", \"workbooks\", order=next(_mistral_order),\n tenant_resource=True)\nclass MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):\n def delete(self):\n self._manager().delete(self.raw_resource.name)\n\n\[email protected](\"mistral\", \"workflows\", order=next(_mistral_order),\n tenant_resource=True)\nclass MistralWorkflows(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"mistral\", \"executions\", order=next(_mistral_order),\n tenant_resource=True)\nclass MistralExecutions(SynchronizedDeletion, base.ResourceManager):\n\n def name(self):\n # NOTE(andreykurilin): Mistral Execution doesn't have own name which\n # we can use for filtering, but it stores workflow id and name, even\n # after workflow deletion.\n return self.raw_resource.workflow_name\n\n# MURANO\n\n\n_murano_order = get_order(1200)\n\n\[email protected](\"murano\", \"environments\", tenant_resource=True,\n order=next(_murano_order))\nclass MuranoEnvironments(SynchronizedDeletion, base.ResourceManager):\n pass\n\n\[email protected](\"murano\", \"packages\", tenant_resource=True,\n order=next(_murano_order))\nclass MuranoPackages(base.ResourceManager):\n def list(self):\n return filter(lambda x: x.name != \"Core library\",\n super(MuranoPackages, self).list())\n\n\n# IRONIC\n\n_ironic_order = get_order(1300)\n\n\[email protected](\"ironic\", \"node\", admin_required=True,\n order=next(_ironic_order), perform_for_admin_only=True)\nclass IronicNodes(base.ResourceManager):\n\n def id(self):\n return self.raw_resource.uuid\n\n\n# GNOCCHI\n\n_gnocchi_order = get_order(1400)\n\n\nclass GnocchiMixin(base.ResourceManager):\n\n def name(self):\n return self.raw_resource[\"name\"]\n\n def id(self):\n return self.raw_resource[\"name\"]\n\n\[email protected](\"gnocchi\", \"archive_policy_rule\", order=next(_gnocchi_order),\n admin_required=True, perform_for_admin_only=True)\nclass GnocchiArchivePolicyRule(GnocchiMixin):\n pass\n\n\[email protected](\"gnocchi\", \"archive_policy\", order=next(_gnocchi_order),\n admin_required=True, perform_for_admin_only=True)\nclass GnocchiArchivePolicy(GnocchiMixin):\n pass\n\n\[email protected](\"gnocchi\", \"resource_type\", order=next(_gnocchi_order),\n admin_required=True, perform_for_admin_only=True)\nclass GnocchiResourceType(GnocchiMixin):\n pass\n\n\[email protected](\"gnocchi\", \"metric\", order=next(_gnocchi_order),\n tenant_resource=True)\nclass GnocchiMetric(GnocchiMixin):\n\n def id(self):\n return self.raw_resource[\"id\"]\n\n def list(self):\n result = []\n marker = None\n while True:\n metrics = self._manager().list(marker=marker)\n if not metrics:\n break\n result.extend(metrics)\n marker = metrics[-1][\"id\"]\n if self.tenant_uuid:\n result = [r for r in result\n if r[\"creator\"].partition(\":\")[2] == self.tenant_uuid]\n\n return result\n\n\[email protected](\"gnocchi\", \"resource\", order=next(_gnocchi_order),\n tenant_resource=True)\nclass GnocchiResource(GnocchiMixin):\n def id(self):\n return self.raw_resource[\"id\"]\n\n def name(self):\n return self.raw_resource[\"original_resource_id\"]\n\n def is_deleted(self):\n from gnocchiclient import exceptions as gnocchi_exc\n try:\n self._manager().get(self.raw_resource[\"type\"], self.id())\n except gnocchi_exc.NotFound:\n return True\n return False\n\n def list(self):\n result = []\n marker = None\n while True:\n resources = self._manager().list(marker=marker)\n if not resources:\n break\n result.extend(resources)\n marker = resources[-1][\"id\"]\n\n return result\n\n\n# WATCHER\n\n_watcher_order = get_order(1500)\n\n\nclass WatcherMixin(SynchronizedDeletion, base.ResourceManager):\n\n def id(self):\n return self.raw_resource.uuid\n\n def list(self):\n return self._manager().list(limit=0)\n\n def is_deleted(self):\n from watcherclient.common.apiclient import exceptions\n try:\n self._manager().get(self.id())\n return False\n except exceptions.NotFound:\n return True\n\n\[email protected](\"watcher\", \"audit_template\", order=next(_watcher_order),\n admin_required=True, perform_for_admin_only=True)\nclass WatcherTemplate(WatcherMixin):\n pass\n\n\[email protected](\"watcher\", \"action_plan\", order=next(_watcher_order),\n admin_required=True, perform_for_admin_only=True)\nclass WatcherActionPlan(WatcherMixin):\n\n def name(self):\n return base.NoName(self._resource)\n\n\[email protected](\"watcher\", \"audit\", order=next(_watcher_order),\n admin_required=True, perform_for_admin_only=True)\nclass WatcherAudit(WatcherMixin):\n\n def name(self):\n return self.raw_resource.uuid\n\n\n# KEYSTONE\n\n_keystone_order = get_order(9000)\n\n\nclass KeystoneMixin(SynchronizedDeletion):\n\n def _manager(self):\n return identity.Identity(self.admin)\n\n def delete(self):\n delete_method = getattr(self._manager(), \"delete_%s\" % self._resource)\n delete_method(self.id())\n\n def list(self):\n resources = self._resource + \"s\"\n return getattr(self._manager(), \"list_%s\" % resources)()\n\n\[email protected](\"keystone\", \"user\", order=next(_keystone_order),\n admin_required=True, perform_for_admin_only=True)\nclass KeystoneUser(KeystoneMixin, base.ResourceManager):\n pass\n\n\[email protected](\"keystone\", \"project\", order=next(_keystone_order),\n admin_required=True, perform_for_admin_only=True)\nclass KeystoneProject(KeystoneMixin, base.ResourceManager):\n pass\n\n\[email protected](\"keystone\", \"service\", order=next(_keystone_order),\n admin_required=True, perform_for_admin_only=True)\nclass KeystoneService(KeystoneMixin, base.ResourceManager):\n pass\n\n\[email protected](\"keystone\", \"role\", order=next(_keystone_order),\n admin_required=True, perform_for_admin_only=True)\nclass KeystoneRole(KeystoneMixin, base.ResourceManager):\n pass\n\n\n# NOTE(andreykurilin): unfortunately, ec2 credentials doesn't have name\n# and id fields. It makes impossible to identify resources belonging to\n# particular task.\[email protected](\"keystone\", \"ec2\", tenant_resource=True,\n order=next(_keystone_order))\nclass KeystoneEc2(SynchronizedDeletion, base.ResourceManager):\n def _manager(self):\n return identity.Identity(self.user)\n\n def id(self):\n return \"n/a\"\n\n def name(self):\n return base.NoName(self._resource)\n\n @property\n def user_id(self):\n return self.user.keystone.auth_ref.user_id\n\n def list(self):\n return self._manager().list_ec2credentials(self.user_id)\n\n def delete(self):\n self._manager().delete_ec2credential(\n self.user_id, access=self.raw_resource.access)\n\n# BARBICAN\n\n\[email protected](\"barbican\", \"secrets\", order=1500, admin_required=True,\n perform_for_admin_only=True)\nclass BarbicanSecrets(base.ResourceManager):\n\n def id(self):\n return self.raw_resource.secret_ref\n\n def is_deleted(self):\n try:\n self._manager().get(self.id()).status\n except Exception:\n return True\n\n return False\n\n\[email protected](\"barbican\", \"containers\", order=1500, admin_required=True,\n perform_for_admin_only=True)\nclass BarbicanContainers(base.ResourceManager):\n pass\n\n\[email protected](\"barbican\", \"orders\", order=1500, admin_required=True,\n perform_for_admin_only=True)\nclass BarbicanOrders(base.ResourceManager):\n pass\n" }, { "alpha_fraction": 0.570859432220459, "alphanum_fraction": 0.5754126310348511, "avg_line_length": 34.49494934082031, "blob_id": "1a613de13162601d2b43348c0b119180e7f9310c", "content_id": "e270f6835536492a90a8fcd92a1d9705fb4ab012", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3514, "license_type": "permissive", "max_line_length": 78, "num_lines": 99, "path": "/rally_openstack/task/contexts/manila/manila_shares.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts as rally_consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.scenarios.manila import utils as manila_utils\n\nCONF = cfg.CONF\nCONTEXT_NAME = consts.SHARES_CONTEXT_NAME\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=CONTEXT_NAME, platform=\"openstack\", order=455)\nclass Shares(context.OpenStackContext):\n \"\"\"This context creates shares for Manila project.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": rally_consts.JSON_SCHEMA,\n \"properties\": {\n \"shares_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1,\n },\n \"size\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"share_proto\": {\n \"type\": \"string\",\n },\n \"share_type\": {\n \"type\": \"string\",\n },\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"shares_per_tenant\": 1,\n \"size\": 1,\n \"share_proto\": \"NFS\",\n \"share_type\": None,\n }\n\n def _create_shares(self, manila_scenario, tenant_id, share_proto, size=1,\n share_type=None):\n tenant_ctxt = self.context[\"tenants\"][tenant_id]\n tenant_ctxt.setdefault(\"shares\", [])\n for i in range(self.config[\"shares_per_tenant\"]):\n kwargs = {\"share_proto\": share_proto, \"size\": size}\n if share_type:\n kwargs[\"share_type\"] = share_type\n share_networks = tenant_ctxt.get(\"manila_share_networks\", {}).get(\n \"share_networks\", [])\n if share_networks:\n kwargs[\"share_network\"] = share_networks[\n i % len(share_networks)][\"id\"]\n share = manila_scenario._create_share(**kwargs)\n tenant_ctxt[\"shares\"].append(share.to_dict())\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants():\n manila_scenario = manila_utils.ManilaScenario({\n \"task\": self.task,\n \"owner_id\": self.context[\"owner_id\"],\n \"user\": user\n })\n self._create_shares(\n manila_scenario,\n tenant_id,\n self.config[\"share_proto\"],\n self.config[\"size\"],\n self.config[\"share_type\"],\n )\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"manila.shares\"],\n users=self.context.get(\"users\", []),\n superclass=manila_utils.ManilaScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6847222447395325, "alphanum_fraction": 0.6902777552604675, "avg_line_length": 45.45161437988281, "blob_id": "11e85bb9095968020f869c06194de602e816b8dc", "content_id": "bdb3cdb6b0a69a09d5fd64e9113ed10f7cb3e1c7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "permissive", "max_line_length": 78, "num_lines": 31, "path": "/rally_openstack/task/scenarios/gnocchi/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.common.services.gnocchi import metric\nfrom rally_openstack.task import scenario\n\n\nclass GnocchiBase(scenario.OpenStackScenario):\n \"\"\"Base class for Gnocchi scenarios with basic atomic actions.\"\"\"\n\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(GnocchiBase, self).__init__(context, admin_clients, clients)\n if hasattr(self, \"_admin_clients\"):\n self.admin_gnocchi = metric.GnocchiService(\n self._admin_clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n if hasattr(self, \"_clients\"):\n self.gnocchi = metric.GnocchiService(\n self._clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n" }, { "alpha_fraction": 0.5754081606864929, "alphanum_fraction": 0.5776107907295227, "avg_line_length": 46.06097412109375, "blob_id": "6df5871254694e9d51a2370ad292f8cd87141124", "content_id": "13a12a36ff6820b221b773d244ebe0f6e2e94f2e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7718, "license_type": "permissive", "max_line_length": 78, "num_lines": 164, "path": "/tests/unit/task/contexts/neutron/test_lbaas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.neutron import lbaas as lbaas_context\nfrom tests.unit import test\n\nNET = \"rally_openstack.common.wrappers.network.\"\n\n\nclass LbaasTestCase(test.TestCase):\n def get_context(self, **kwargs):\n foo_tenant = {\"networks\": [{\"id\": \"foo_net\",\n \"tenant_id\": \"foo_tenant\",\n \"subnets\": [\"foo_subnet\"]}]}\n bar_tenant = {\"networks\": [{\"id\": \"bar_net\",\n \"tenant_id\": \"bar_tenant\",\n \"subnets\": [\"bar_subnet\"]}]}\n return {\"task\": {\"uuid\": \"foo_task\"},\n \"admin\": {\"credential\": \"foo_admin\"},\n \"users\": [{\"id\": \"foo_user\", \"tenant_id\": \"foo_tenant\"},\n {\"id\": \"bar_user\", \"tenant_id\": \"bar_tenant\"}],\n \"config\": {\"lbaas\": kwargs},\n \"tenants\": {\"foo_tenant\": foo_tenant,\n \"bar_tenant\": bar_tenant}}\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(NET + \"wrap\", return_value=\"foo_service\")\n def test__init__default(self, mock_wrap, mock_clients):\n context = lbaas_context.Lbaas(self.get_context())\n self.assertEqual(\n context.config[\"pool\"][\"lb_method\"],\n lbaas_context.Lbaas.DEFAULT_CONFIG[\"pool\"][\"lb_method\"])\n self.assertEqual(\n context.config[\"pool\"][\"protocol\"],\n lbaas_context.Lbaas.DEFAULT_CONFIG[\"pool\"][\"protocol\"])\n self.assertEqual(\n context.config[\"lbaas_version\"],\n lbaas_context.Lbaas.DEFAULT_CONFIG[\"lbaas_version\"])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(NET + \"wrap\", return_value=\"foo_service\")\n def test__init__explicit(self, mock_wrap, mock_clients):\n context = lbaas_context.Lbaas(\n self.get_context(pool={\"lb_method\": \"LEAST_CONNECTIONS\"}))\n self.assertEqual(context.config[\"pool\"][\"lb_method\"],\n \"LEAST_CONNECTIONS\")\n\n @mock.patch(NET + \"wrap\")\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_with_lbaas(self, mock_clients, mock_wrap):\n foo_net = {\"id\": \"foo_net\",\n \"tenant_id\": \"foo_tenant\",\n \"subnets\": [\"foo_subnet\"],\n \"lb_pools\": [{\"pool\": {\"id\": \"foo_pool\",\n \"tenant_id\": \"foo_tenant\"}}]}\n bar_net = {\"id\": \"bar_net\",\n \"tenant_id\": \"bar_tenant\",\n \"subnets\": [\"bar_subnet\"],\n \"lb_pools\": [{\"pool\": {\"id\": \"bar_pool\",\n \"tenant_id\": \"bar_tenant\"}}]}\n expected_net = [bar_net, foo_net]\n mock_create = mock.Mock(\n side_effect=lambda t, s,\n **kw: {\"pool\": {\"id\": str(t.split(\"_\")[0]) + \"_pool\",\n \"tenant_id\": t}})\n actual_net = []\n mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)\n net_wrapper = mock_wrap(mock_clients.return_value)\n net_wrapper.supports_extension.return_value = (True, None)\n fake_args = {\"lbaas_version\": 1}\n\n lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))\n lb_context._iterate_per_tenants = mock.MagicMock(\n return_value=[\n (\"foo_user\", \"foo_tenant\"),\n (\"bar_user\", \"bar_tenant\")]\n )\n\n lb_context.setup()\n\n lb_context._iterate_per_tenants.assert_called_once_with()\n net_wrapper.supports_extension.assert_called_once_with(\"lbaas\")\n for tenant_id, tenant_ctx in (\n sorted(lb_context.context[\"tenants\"].items())):\n for network in tenant_ctx[\"networks\"]:\n actual_net.append(network)\n self.assertEqual(expected_net, actual_net)\n\n @mock.patch(NET + \"wrap\")\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_with_no_lbaas(self, mock_clients, mock_wrap):\n mock_create = mock.Mock(side_effect=lambda t, **kw: t + \"-net\")\n mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)\n fake_args = {\"lbaas_version\": 1}\n lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))\n net_wrapper = mock_wrap(mock_clients.return_value)\n net_wrapper.supports_extension.return_value = (False, None)\n\n lb_context._iterate_per_tenants = mock.MagicMock(\n return_value=[(\"bar_user\", \"bar_tenant\")]\n )\n lb_context.setup()\n\n lb_context._iterate_per_tenants.assert_not_called()\n net_wrapper.supports_extension.assert_called_once_with(\"lbaas\")\n assert not net_wrapper.create_v1_pool.called\n\n @mock.patch(NET + \"wrap\")\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_with_lbaas_version_not_one(self, mock_clients, mock_wrap):\n mock_create = mock.Mock(side_effect=lambda t, **kw: t + \"-net\")\n mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)\n fake_args = {\"lbaas_version\": 2}\n\n lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))\n lb_context._iterate_per_tenants = mock.MagicMock(\n return_value=[(\"bar_user\", \"bar_tenant\")]\n )\n net_wrapper = mock_wrap(mock_clients.return_value)\n net_wrapper.supports_extension.return_value = (True, None)\n self.assertRaises(NotImplementedError, lb_context.setup)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(NET + \"wrap\")\n def test_cleanup(self, mock_wrap, mock_clients):\n net_wrapper = mock_wrap(mock_clients.return_value)\n lb_context = lbaas_context.Lbaas(self.get_context())\n expected_pools = []\n for tenant_id, tenant_ctx in lb_context.context[\"tenants\"].items():\n resultant_pool = {\"pool\": {\n \"id\": str(tenant_id.split(\"_\")[0]) + \"_pool\"}}\n expected_pools.append(resultant_pool)\n for network in (\n lb_context.context[\"tenants\"][tenant_id][\"networks\"]):\n network.setdefault(\"lb_pools\", []).append(resultant_pool)\n lb_context.cleanup()\n net_wrapper.delete_v1_pool.assert_has_calls(\n [mock.call(pool[\"pool\"][\"id\"]) for pool in expected_pools])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(NET + \"wrap\")\n def test_cleanup_lbaas_version_not_one(self, mock_wrap, mock_clients):\n fakeargs = {\"lbaas_version\": 2}\n net_wrapper = mock_wrap(mock_clients.return_value)\n lb_context = lbaas_context.Lbaas(self.get_context(**fakeargs))\n for tenant_id, tenant_ctx in lb_context.context[\"tenants\"].items():\n resultant_pool = {\"pool\": {\n \"id\": str(tenant_id.split(\"_\")[0]) + \"_pool\"}}\n for network in (\n lb_context.context[\"tenants\"][tenant_id][\"networks\"]):\n network.setdefault(\"lb_pools\", []).append(resultant_pool)\n lb_context.cleanup()\n assert not net_wrapper.delete_v1_pool.called\n" }, { "alpha_fraction": 0.7019801735877991, "alphanum_fraction": 0.7112210988998413, "avg_line_length": 38.350650787353516, "blob_id": "5ba62548750451977d47ba5dc6dd02b601272f15", "content_id": "b9b155883c8929babb0ab004d9014bf42b922540", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3030, "license_type": "permissive", "max_line_length": 78, "num_lines": 77, "path": "/rally_openstack/task/scenarios/gnocchi/archive_policy.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils\n\n\"\"\"Scenarios for Gnocchi archive policy.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"GnocchiArchivePolicy.list_archive_policy\")\nclass ListArchivePolicy(gnocchiutils.GnocchiBase):\n\n def run(self):\n \"\"\"List archive policies.\"\"\"\n self.gnocchi.list_archive_policy()\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"gnocchi.archive_policy\"]},\n name=\"GnocchiArchivePolicy.create_archive_policy\")\nclass CreateArchivePolicy(gnocchiutils.GnocchiBase):\n\n def run(self, definition=None, aggregation_methods=None):\n \"\"\"Create archive policy.\n\n :param definition: List of definitions\n :param aggregation_methods: List of aggregation methods\n \"\"\"\n if definition is None:\n definition = [{\"granularity\": \"0:00:01\", \"timespan\": \"1:00:00\"}]\n\n name = self.generate_random_name()\n self.admin_gnocchi.create_archive_policy(\n name, definition=definition,\n aggregation_methods=aggregation_methods)\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"gnocchi.archive_policy\"]},\n name=\"GnocchiArchivePolicy.create_delete_archive_policy\")\nclass CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):\n\n def run(self, definition=None, aggregation_methods=None):\n \"\"\"Create archive policy and then delete it.\n\n :param definition: List of definitions\n :param aggregation_methods: List of aggregation methods\n \"\"\"\n if definition is None:\n definition = [{\"granularity\": \"0:00:01\", \"timespan\": \"1:00:00\"}]\n\n name = self.generate_random_name()\n self.admin_gnocchi.create_archive_policy(\n name, definition=definition,\n aggregation_methods=aggregation_methods)\n self.admin_gnocchi.delete_archive_policy(name)\n" }, { "alpha_fraction": 0.6624277234077454, "alphanum_fraction": 0.6670520305633545, "avg_line_length": 31.037036895751953, "blob_id": "7b358d72f30153a41497d05e75c453d5858ac958", "content_id": "723a0f4c6ae84bbf7b3d616f975525851c78f8cb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "permissive", "max_line_length": 78, "num_lines": 54, "path": "/rally_openstack/task/contexts/cleanup/base.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager\n\n\[email protected](\"check_cleanup_resources\")\nclass CheckCleanupResourcesValidator(validation.Validator):\n\n def __init__(self, admin_required):\n \"\"\"Validates that openstack resource managers exist\n\n :param admin_required: describes access level to resource\n \"\"\"\n super(CheckCleanupResourcesValidator, self).__init__()\n self.admin_required = admin_required\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n missing = set(plugin_cfg)\n missing -= manager.list_resource_names(\n admin_required=self.admin_required)\n missing = \", \".join(missing)\n if missing:\n return self.fail(\n \"Couldn't find cleanup resource managers: %s\" % missing)\n\n\nclass CleanupMixin(object):\n\n CONFIG_SCHEMA = {\n \"type\": \"array\",\n \"$schema\": consts.JSON_SCHEMA,\n \"items\": {\n \"type\": \"string\",\n }\n }\n\n def setup(self):\n pass\n" }, { "alpha_fraction": 0.620506763458252, "alphanum_fraction": 0.6252209544181824, "avg_line_length": 35.10638427734375, "blob_id": "1b82a1454cca52e8a3aa1b1a204ee16791e8bf3e", "content_id": "4e78ddf99046484b49d64ff45d51af463edeee17", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1697, "license_type": "permissive", "max_line_length": 79, "num_lines": 47, "path": "/tests/unit/task/scenarios/loadbalancer/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.octavia import utils\nfrom tests.unit import test\n\n\nclass LoadBalancerBaseTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(LoadBalancerBaseTestCase, self).setUp()\n self.context = super(LoadBalancerBaseTestCase, self).get_test_context()\n self.context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant_id\",\n \"name\": \"fake_tenant_name\"}\n })\n patch = mock.patch(\n \"rally_openstack.common.services.loadbalancer.octavia.Octavia\")\n self.addCleanup(patch.stop)\n self.mock_service = patch.start()\n\n def test_octavia_base(self):\n base = utils.OctaviaBase(self.context)\n self.assertEqual(base.octavia,\n self.mock_service.return_value)\n" }, { "alpha_fraction": 0.5072318911552429, "alphanum_fraction": 0.5117207169532776, "avg_line_length": 32.2320442199707, "blob_id": "3a26ed12a2717d99fac4551e3a9b52929e092bbd", "content_id": "ef012d2b4dc56710322f8a12b6dbd695732a3c13", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6015, "license_type": "permissive", "max_line_length": 76, "num_lines": 181, "path": "/tests/unit/task/contexts/designate/test_zones.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport copy\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.designate import zones\nfrom rally_openstack.task.scenarios.designate import utils\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts\"\nSCN = \"rally_openstack.task.scenarios\"\n\n\nclass ZoneGeneratorTestCase(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n return tenants\n\n def test_init(self):\n self.context.update({\n \"config\": {\n \"zones\": {\n \"zones_per_tenant\": 5,\n }\n }\n })\n\n inst = zones.ZoneGenerator(self.context)\n self.assertEqual(inst.config, self.context[\"config\"][\"zones\"])\n\n @mock.patch(\"%s.designate.utils.DesignateScenario._create_zone\" % SCN,\n return_value={\"id\": \"uuid\"})\n def test_setup(self, mock_designate_scenario__create_zone):\n tenants_count = 2\n users_per_tenant = 5\n zones_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"zones\": {\n \"zones_per_tenant\": zones_per_tenant,\n \"set_zone_in_network\": False\n }\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n new_context = copy.deepcopy(self.context)\n for id_ in tenants.keys():\n new_context[\"tenants\"][id_].setdefault(\"zones\", [])\n for i in range(zones_per_tenant):\n new_context[\"tenants\"][id_][\"zones\"].append({\"id\": \"uuid\"})\n\n zones_ctx = zones.ZoneGenerator(self.context)\n zones_ctx.setup()\n self.assertEqual(new_context, self.context)\n\n @mock.patch(\"%s.neutron.utils.NeutronScenario\" % SCN)\n @mock.patch(\"%s.designate.utils.DesignateScenario._create_zone\" % SCN,\n return_value={\"id\": \"uuid\", \"name\": \"fake_name\"})\n def test_setup_for_existinge(self, mock_designate_scenario__create_zone,\n mock_neutron_scenario):\n tenants_count = 1\n users_per_tenant = 1\n\n networks = []\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n networks.append(\n {\"id\": f\"foo_net_{id_}\",\n \"tenant_id\": id_, \"subnets\": [\"foo_subnet\"]})\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": mock.MagicMock()})\n tenants[\"0\"][\"networks\"] = networks\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 1,\n \"users_per_tenant\": 1,\n \"concurrent\": 1,\n },\n \"zones\": {\n \"set_zone_in_network\": True\n },\n \"network\": {}\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n zones_ctx = zones.ZoneGenerator(self.context)\n zones_ctx.setup()\n\n mock_neutron_scenario.assert_called_once()\n scenario = mock_neutron_scenario.return_value\n scenario.clients.assert_called_with(\"neutron\")\n neutron = scenario.clients.return_value\n neutron.update_network.assert_called_with(\n \"foo_net_0\", {\"network\": {\"dns_domain\": \"fake_name\"}})\n\n @mock.patch(\"%s.designate.zones.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n\n tenants_count = 2\n users_per_tenant = 5\n zones_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"endpoint\": \"endpoint\"})\n tenants[id_].setdefault(\"zones\", [])\n for j in range(zones_per_tenant):\n tenants[id_][\"zones\"].append({\"id\": \"uuid\"})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"zones\": {\n \"zones_per_tenant\": 5,\n }\n },\n \"admin\": {\n \"endpoint\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n zones_ctx = zones.ZoneGenerator(self.context)\n zones_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"designate.zones\"],\n users=self.context[\"users\"],\n superclass=utils.DesignateScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.6009138822555542, "alphanum_fraction": 0.6023619771003723, "avg_line_length": 39.675392150878906, "blob_id": "23845679580cc5b549641218162cdb8093a0f35d", "content_id": "1c1dcf831ee18a517b9931d0c86f1bc7c16c1b90", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31076, "license_type": "permissive", "max_line_length": 79, "num_lines": 764, "path": "/rally_openstack/common/services/storage/cinder_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils as bench_utils\n\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.common.services.storage import block\n\n\nCONF = block.CONF\n\n\nclass CinderMixin(object):\n\n def _get_client(self):\n return self._clients.cinder(self.version)\n\n def _update_resource(self, resource):\n try:\n manager = getattr(resource, \"manager\", None)\n if manager:\n res = manager.get(resource.id)\n else:\n if isinstance(resource, block.Volume):\n attr = \"volumes\"\n elif isinstance(resource, block.VolumeSnapshot):\n attr = \"volume_snapshots\"\n elif isinstance(resource, block.VolumeBackup):\n attr = \"backups\"\n res = getattr(self._get_client(), attr).get(resource.id)\n except Exception as e:\n if getattr(e, \"code\", getattr(e, \"http_status\", 400)) == 404:\n raise exceptions.GetResourceNotFound(resource=resource)\n raise exceptions.GetResourceFailure(resource=resource, err=e)\n return res\n\n def _wait_available_volume(self, volume):\n return bench_utils.wait_for_status(\n volume,\n ready_statuses=[\"available\"],\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_create_timeout,\n check_interval=CONF.openstack.cinder_volume_create_poll_interval\n )\n\n def get_volume(self, volume_id):\n \"\"\"Get target volume information.\"\"\"\n aname = \"cinder_v%s.get_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.get(volume_id)\n\n def delete_volume(self, volume):\n \"\"\"Delete target volume.\"\"\"\n aname = \"cinder_v%s.delete_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.delete(volume)\n bench_utils.wait_for_status(\n volume,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )\n\n def extend_volume(self, volume, new_size):\n \"\"\"Extend the size of the specified volume.\"\"\"\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)\n\n def list_snapshots(self, detailed=True):\n \"\"\"Get a list of all snapshots.\"\"\"\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))\n\n def set_metadata(self, volume, sets=10, set_size=3):\n \"\"\"Set volume metadata.\n\n :param volume: The volume to set metadata on\n :param sets: how many operations to perform\n :param set_size: number of metadata keys to set in each operation\n :returns: A list of keys that were set\n \"\"\"\n key = \"cinder_v%s.set_%s_metadatas_%s_times\" % (self.version,\n set_size,\n sets)\n with atomic.ActionTimer(self, key):\n keys = []\n for i in range(sets):\n metadata = {}\n for j in range(set_size):\n key = self.generate_random_name()\n keys.append(key)\n metadata[key] = self.generate_random_name()\n\n self._get_client().volumes.set_metadata(volume, metadata)\n return keys\n\n def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n \"\"\"Delete volume metadata keys.\n\n Note that ``len(keys)`` must be greater than or equal to\n ``deletes * delete_size``.\n\n :param volume: The volume to delete metadata from\n :param deletes: how many operations to perform\n :param delete_size: number of metadata keys to delete in each operation\n :param keys: a list of keys to choose deletion candidates from\n \"\"\"\n if len(keys) < deletes * delete_size:\n raise exceptions.InvalidArgumentsException(\n \"Not enough metadata keys to delete: \"\n \"%(num_keys)s keys, but asked to delete %(num_deletes)s\" %\n {\"num_keys\": len(keys),\n \"num_deletes\": deletes * delete_size})\n # make a shallow copy of the list of keys so that, when we pop\n # from it later, we don't modify the original list.\n keys = list(keys)\n random.shuffle(keys)\n action_name = (\"cinder_v%s.delete_%s_metadatas_%s_times\"\n % (self.version, delete_size, deletes))\n with atomic.ActionTimer(self, action_name):\n for i in range(deletes):\n to_del = keys[i * delete_size:(i + 1) * delete_size]\n self._get_client().volumes.delete_metadata(volume, to_del)\n\n def update_readonly_flag(self, volume, read_only):\n \"\"\"Update the read-only access mode flag of the specified volume.\n\n :param volume: The UUID of the volume to update.\n :param read_only: The value to indicate whether to update volume to\n read-only access mode.\n :returns: A tuple of http Response and body\n \"\"\"\n aname = \"cinder_v%s.update_readonly_flag\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.update_readonly_flag(\n volume, read_only)\n\n def upload_volume_to_image(self, volume, force=False,\n container_format=\"bare\", disk_format=\"raw\"):\n \"\"\"Upload the given volume to image.\n\n Returns created image.\n\n :param volume: volume object\n :param force: flag to indicate whether to snapshot a volume even if\n it's attached to an instance\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso\n :returns: Returns created image object\n \"\"\"\n aname = \"cinder_v%s.upload_volume_to_image\" % self.version\n with atomic.ActionTimer(self, aname):\n resp, img = self._get_client().volumes.upload_to_image(\n volume, force, self.generate_random_name(), container_format,\n disk_format)\n # NOTE (e0ne): upload_to_image changes volume status to uploading\n # so we need to wait until it will be available.\n volume = self._wait_available_volume(volume)\n\n image_id = img[\"os-volume_upload_image\"][\"image_id\"]\n glance = image.Image(self._clients)\n\n image_inst = glance.get_image(image_id)\n image_inst = bench_utils.wait_for_status(\n image_inst,\n ready_statuses=[\"active\"],\n update_resource=glance.get_image,\n timeout=CONF.openstack.glance_image_create_timeout,\n check_interval=(CONF.openstack\n .glance_image_create_poll_interval)\n )\n\n return image_inst\n\n def create_qos(self, specs):\n \"\"\"Create a qos specs.\n\n :param specs: A dict of key/value pairs to be set\n :rtype: :class:'QoSSpecs'\n \"\"\"\n aname = \"cinder_v%s.create_qos\" % self.version\n name = self.generate_random_name()\n\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.create(name, specs)\n\n def list_qos(self, search_opts=None):\n \"\"\"Get a list of all qos specs.\n\n :param search_opts: search options\n :rtype: list of :class: 'QoSpecs'\n \"\"\"\n aname = \"cinder_v%s.list_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.list(search_opts)\n\n def get_qos(self, qos_id):\n \"\"\"Get a specific qos specs.\n\n :param qos_id: The ID of the :class: 'QoSSpecs' to get\n :rtype: :class: 'QoSSpecs'\n \"\"\"\n aname = \"cinder_v%s.get_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.get(qos_id)\n\n def set_qos(self, qos_id, set_specs_args):\n \"\"\"Add/Update keys in qos specs.\n\n :param qos_id: The ID of the :class:`QoSSpecs` to get\n :param set_specs_args: A dict of key/value pairs to be set\n :rtype: class 'cinderclient.apiclient.base.DictWithMeta'\n {\"qos_specs\": set_specs_args}\n \"\"\"\n aname = \"cinder_v%s.set_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.set_keys(qos_id,\n set_specs_args)\n\n def qos_associate_type(self, qos_specs, vol_type_id):\n \"\"\"Associate qos specs from volume type.\n\n :param qos_specs: The qos specs to be associated with\n :param vol_type_id: The volume type id to be associated with\n :returns: base on client response return True if the request\n has been accepted or not\n \"\"\"\n aname = \"cinder_v%s.qos_associate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.associate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)\n\n def qos_disassociate_type(self, qos_specs, vol_type_id):\n \"\"\"Disassociate qos specs from volume type.\n\n :param qos_specs: The qos specs to be disassociated with\n :param vol_type_id: The volume type id to be disassociated with\n :returns: base on client response return True if the request\n has been accepted or not\n \"\"\"\n aname = \"cinder_v%s.qos_disassociate_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().qos_specs.disassociate(qos_specs,\n vol_type_id)\n return (tuple_res[0].status_code == 202)\n\n def delete_snapshot(self, snapshot):\n \"\"\"Delete the given snapshot.\n\n Returns when the snapshot is actually deleted.\n\n :param snapshot: snapshot object\n \"\"\"\n aname = \"cinder_v%s.delete_snapshot\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volume_snapshots.delete(snapshot)\n bench_utils.wait_for_status(\n snapshot,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )\n\n def delete_backup(self, backup):\n \"\"\"Delete the given backup.\n\n Returns when the backup is actually deleted.\n\n :param backup: backup instance\n \"\"\"\n aname = \"cinder_v%s.delete_backup\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().backups.delete(backup)\n bench_utils.wait_for_status(\n backup,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )\n\n def restore_backup(self, backup_id, volume_id=None):\n \"\"\"Restore the given backup.\n\n :param backup_id: The ID of the backup to restore.\n :param volume_id: The ID of the volume to restore the backup to.\n \"\"\"\n aname = \"cinder_v%s.restore_backup\" % self.version\n with atomic.ActionTimer(self, aname):\n restore = self._get_client().restores.restore(backup_id, volume_id)\n restored_volume = self._get_client().volumes.get(restore.volume_id)\n return self._wait_available_volume(restored_volume)\n\n def list_backups(self, detailed=True):\n \"\"\"Return user volume backups list.\n\n :param detailed: True if detailed information about backup\n should be listed\n \"\"\"\n aname = \"cinder_v%s.list_backups\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().backups.list(detailed)\n\n def list_transfers(self, detailed=True, search_opts=None):\n \"\"\"Get a list of all volume transfers.\n\n :param detailed: If True, detailed information about transfer\n should be listed\n :param search_opts: Search options to filter out volume transfers\n :returns: list of :class:`VolumeTransfer`\n \"\"\"\n aname = \"cinder_v%s.list_transfers\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().transfers.list(detailed, search_opts)\n\n def get_volume_type(self, volume_type):\n \"\"\"get details of volume_type.\n\n :param volume_type: The ID of the :class:`VolumeType` to get\n :returns: :class:`VolumeType`\n \"\"\"\n aname = \"cinder_v%s.get_volume_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_types.get(volume_type)\n\n def delete_volume_type(self, volume_type):\n \"\"\"delete a volume type.\n\n :param volume_type: Name or Id of the volume type\n :returns: base on client response return True if the request\n has been accepted or not\n \"\"\"\n aname = \"cinder_v%s.delete_volume_type\" % self.version\n with atomic.ActionTimer(self, aname):\n tuple_res = self._get_client().volume_types.delete(\n volume_type)\n return tuple_res[0].status_code == 202\n\n def set_volume_type_keys(self, volume_type, metadata):\n \"\"\"Set extra specs on a volume type.\n\n :param volume_type: The :class:`VolumeType` to set extra spec on\n :param metadata: A dict of key/value pairs to be set\n :returns: extra_specs if the request has been accepted\n \"\"\"\n aname = \"cinder_v%s.set_volume_type_keys\" % self.version\n with atomic.ActionTimer(self, aname):\n return volume_type.set_keys(metadata)\n\n def transfer_create(self, volume_id, name=None):\n \"\"\"Create a volume transfer.\n\n :param name: The name of created transfer\n :param volume_id: The ID of the volume to transfer\n :rtype: VolumeTransfer\n \"\"\"\n name = name or self.generate_random_name()\n aname = \"cinder_v%s.transfer_create\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().transfers.create(volume_id, name=name)\n\n def transfer_accept(self, transfer_id, auth_key):\n \"\"\"Accept a volume transfer.\n\n :param transfer_id: The ID of the transfer to accept.\n :param auth_key: The auth_key of the transfer.\n :rtype: VolumeTransfer\n \"\"\"\n aname = \"cinder_v%s.transfer_accept\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().transfers.accept(transfer_id, auth_key)\n\n def create_encryption_type(self, volume_type, specs):\n \"\"\"Create encryption type for a volume type. Default: admin only.\n\n :param volume_type: the volume type on which to add an encryption type\n :param specs: the encryption type specifications to add\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n aname = \"cinder_v%s.create_encryption_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_encryption_types.create(\n volume_type, specs)\n\n def get_encryption_type(self, volume_type):\n \"\"\"Get the volume encryption type for the specified volume type.\n\n :param volume_type: the volume type to query\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n aname = \"cinder_v%s.get_encryption_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_encryption_types.get(\n volume_type)\n\n def list_encryption_type(self, search_opts=None):\n \"\"\"List all volume encryption types.\n\n :param search_opts: Options used when search for encryption types\n :return: a list of :class: VolumeEncryptionType instances\n \"\"\"\n aname = \"cinder_v%s.list_encryption_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_encryption_types.list(\n search_opts)\n\n def delete_encryption_type(self, volume_type):\n \"\"\"Delete the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n must be deleted\n \"\"\"\n aname = \"cinder_v%s.delete_encryption_type\" % self.version\n with atomic.ActionTimer(self, aname):\n resp = self._get_client().volume_encryption_types.delete(\n volume_type)\n if (resp[0].status_code != 202):\n raise exceptions.RallyException(\n \"EncryptionType Deletion Failed\")\n\n def update_encryption_type(self, volume_type, specs):\n \"\"\"Update the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n must be updated\n :param specs: the encryption type specifications to update\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n aname = \"cinder_v%s.update_encryption_type\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volume_encryption_types.update(\n volume_type, specs)\n\n\nclass UnifiedCinderMixin(object):\n\n @staticmethod\n def _unify_backup(backup):\n return block.VolumeBackup(id=backup.id, name=backup.name,\n volume_id=backup.volume_id,\n status=backup.status)\n\n @staticmethod\n def _unify_transfer(transfer):\n return block.VolumeTransfer(\n id=transfer.id,\n name=transfer.name,\n volume_id=transfer.volume_id,\n # NOTE(andreykurilin): we need to access private field to avoid\n # calling extra GET request when the object is not fully\n # loaded.\n auth_key=transfer._info.get(\"auth_key\"))\n\n @staticmethod\n def _unify_qos(qos):\n return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs)\n\n @staticmethod\n def _unify_encryption_type(encryption_type):\n return block.VolumeEncryptionType(\n id=encryption_type.encryption_id,\n volume_type_id=encryption_type.volume_type_id)\n\n def delete_volume(self, volume):\n \"\"\"Delete a volume.\"\"\"\n self._impl.delete_volume(volume)\n\n def set_metadata(self, volume, sets=10, set_size=3):\n \"\"\"Update/Set a volume metadata.\n\n :param volume: The updated/setted volume.\n :param sets: how many operations to perform\n :param set_size: number of metadata keys to set in each operation\n :returns: A list of keys that were set\n \"\"\"\n return self._impl.set_metadata(volume, sets=sets, set_size=set_size)\n\n def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n \"\"\"Delete volume metadata keys.\n\n Note that ``len(keys)`` must be greater than or equal to\n ``deletes * delete_size``.\n\n :param volume: The volume to delete metadata from\n :param deletes: how many operations to perform\n :param delete_size: number of metadata keys to delete in each operation\n :param keys: a list of keys to choose deletion candidates from\n \"\"\"\n self._impl.delete_metadata(volume, keys=keys, deletes=10,\n delete_size=3)\n\n def update_readonly_flag(self, volume, read_only):\n \"\"\"Update the read-only access mode flag of the specified volume.\n\n :param volume: The UUID of the volume to update.\n :param read_only: The value to indicate whether to update volume to\n read-only access mode.\n :returns: A tuple of http Response and body\n \"\"\"\n return self._impl.update_readonly_flag(volume, read_only=read_only)\n\n def upload_volume_to_image(self, volume, force=False,\n container_format=\"bare\", disk_format=\"raw\"):\n \"\"\"Upload the given volume to image.\n\n Returns created image.\n\n :param volume: volume object\n :param force: flag to indicate whether to snapshot a volume even if\n it's attached to an instance\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso\n :returns: Returns created image object\n \"\"\"\n return self._impl.upload_volume_to_image(\n volume, force=force, container_format=container_format,\n disk_format=disk_format)\n\n def create_qos(self, specs):\n \"\"\"Create a qos specs.\n\n :param specs: A dict of key/value pairs to be set\n :rtype: :class:'QoSSpecs'\n \"\"\"\n return self._unify_qos(self._impl.create_qos(specs))\n\n def list_qos(self, search_opts=None):\n \"\"\"Get a list of all qos specs.\n\n :param search_opts: search options\n :rtype: list of :class: 'QoSpecs'\n \"\"\"\n return [self._unify_qos(qos)\n for qos in self._impl.list_qos(search_opts)]\n\n def get_qos(self, qos_id):\n \"\"\"Get a specific qos specs.\n\n :param qos_id: The ID of the :class: 'QoSSpecs' to get\n :rtype: :class: 'QoSSpecs'\n \"\"\"\n return self._unify_qos(self._impl.get_qos(qos_id))\n\n def set_qos(self, qos, set_specs_args):\n \"\"\"Add/Update keys in qos specs.\n\n :param qos: The instance of the :class:`QoSSpecs` to set\n :param set_specs_args: A dict of key/value pairs to be set\n :rtype: :class: 'QoSSpecs'\n \"\"\"\n self._impl.set_qos(qos.id, set_specs_args)\n return self._unify_qos(qos)\n\n def qos_associate_type(self, qos_specs, vol_type_id):\n \"\"\"Associate qos specs from volume type.\n\n :param qos_specs: The qos specs to be associated with\n :param vol_type_id: The volume type id to be associated with\n \"\"\"\n self._impl.qos_associate_type(qos_specs, vol_type_id)\n return self._unify_qos(qos_specs)\n\n def qos_disassociate_type(self, qos_specs, vol_type_id):\n \"\"\"Disassociate qos specs from volume type.\n\n :param qos_specs: The qos specs to be disassociated with\n :param vol_type_id: The volume type id to be disassociated with\n \"\"\"\n self._impl.qos_disassociate_type(qos_specs, vol_type_id)\n return self._unify_qos(qos_specs)\n\n def delete_snapshot(self, snapshot):\n \"\"\"Delete the given backup.\n\n Returns when the backup is actually deleted.\n\n :param backup: backup instance\n \"\"\"\n self._impl.delete_snapshot(snapshot)\n\n def delete_backup(self, backup):\n \"\"\"Delete a volume backup.\"\"\"\n self._impl.delete_backup(backup)\n\n def list_backups(self, detailed=True):\n \"\"\"Return user volume backups list.\"\"\"\n return [self._unify_backup(backup)\n for backup in self._impl.list_backups(detailed=detailed)]\n\n def list_transfers(self, detailed=True, search_opts=None):\n \"\"\"Get a list of all volume transfers.\n\n :param detailed: If True, detailed information about transfer\n should be listed\n :param search_opts: Search options to filter out volume transfers\n :returns: list of :class:`VolumeTransfer`\n \"\"\"\n return [self._unify_transfer(transfer)\n for transfer in self._impl.list_transfers(\n detailed=detailed, search_opts=search_opts)]\n\n def get_volume_type(self, volume_type):\n \"\"\"get details of volume_type.\n\n :param volume_type: The ID of the :class:`VolumeType` to get\n :returns: :class:`VolumeType`\n \"\"\"\n return self._impl.get_volume_type(volume_type)\n\n def delete_volume_type(self, volume_type):\n \"\"\"delete a volume type.\n\n :param volume_type: Name or Id of the volume type\n :returns: base on client response return True if the request\n has been accepted or not\n \"\"\"\n return self._impl.delete_volume_type(volume_type)\n\n def update_volume_type(self, volume_type, name=None,\n description=None, is_public=None):\n \"\"\"Update the name and/or description for a volume type.\n\n :param volume_type: The ID or an instance of the :class:`VolumeType`\n to update.\n :param name: if None, updates name by generating random name.\n else updates name with provided name\n :param description: Description of the volume type.\n :rtype: :class:`VolumeType`\n \"\"\"\n return self._impl.update_volume_type(\n volume_type=volume_type, name=name, description=description,\n is_public=is_public\n )\n\n def add_type_access(self, volume_type, project):\n \"\"\"Add a project to the given volume type access list.\n\n :param volume_type: Volume type name or ID to add access for the given\n project\n :project: Project ID to add volume type access for\n :return: An instance of cinderclient.apiclient.base.TupleWithMeta\n \"\"\"\n return self._impl.add_type_access(\n volume_type=volume_type, project=project\n )\n\n def list_type_access(self, volume_type):\n \"\"\"Print access information about the given volume type\n\n :param volume_type: Filter results by volume type name or ID\n :return: VolumeTypeAccess of specific project\n \"\"\"\n return self._impl.list_type_access(volume_type)\n\n def set_volume_type_keys(self, volume_type, metadata):\n \"\"\"Set extra specs on a volume type.\n\n :param volume_type: The :class:`VolumeType` to set extra spec on\n :param metadata: A dict of key/value pairs to be set\n :returns: extra_specs if the request has been accepted\n \"\"\"\n return self._impl.set_volume_type_keys(volume_type, metadata)\n\n def transfer_create(self, volume_id, name=None):\n \"\"\"Creates a volume transfer.\n\n :param name: The name of created transfer\n :param volume_id: The ID of the volume to transfer.\n :returns: Return the created transfer.\n \"\"\"\n return self._unify_transfer(\n self._impl.transfer_create(volume_id, name=name))\n\n def transfer_accept(self, transfer_id, auth_key):\n \"\"\"Accept a volume transfer.\n\n :param transfer_id: The ID of the transfer to accept.\n :param auth_key: The auth_key of the transfer.\n :returns: VolumeTransfer\n \"\"\"\n return self._unify_transfer(\n self._impl.transfer_accept(transfer_id, auth_key=auth_key))\n\n def create_encryption_type(self, volume_type, specs):\n \"\"\"Create encryption type for a volume type. Default: admin only.\n\n :param volume_type: the volume type on which to add an encryption type\n :param specs: the encryption type specifications to add\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._unify_encryption_type(\n self._impl.create_encryption_type(volume_type, specs=specs))\n\n def get_encryption_type(self, volume_type):\n \"\"\"Get the volume encryption type for the specified volume type.\n\n :param volume_type: the volume type to query\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._unify_encryption_type(\n self._impl.get_encryption_type(volume_type))\n\n def list_encryption_type(self, search_opts=None):\n \"\"\"List all volume encryption types.\n\n :param search_opts: Options used when search for encryption types\n :return: a list of :class: VolumeEncryptionType instances\n \"\"\"\n return [self._unify_encryption_type(encryption_type)\n for encryption_type in self._impl.list_encryption_type(\n search_opts=search_opts)]\n\n def delete_encryption_type(self, volume_type):\n \"\"\"Delete the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n must be deleted\n \"\"\"\n return self._impl.delete_encryption_type(volume_type)\n\n def update_encryption_type(self, volume_type, specs):\n \"\"\"Update the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n must be updated\n :param specs: the encryption type specifications to update\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._impl.update_encryption_type(volume_type, specs=specs)\n" }, { "alpha_fraction": 0.6027048230171204, "alphanum_fraction": 0.606428861618042, "avg_line_length": 38.55038833618164, "blob_id": "f54ad475587b91e906ffbcda2e01b96c324af3a1", "content_id": "b384d552fcbc4b2bb2c21b08b178b2e34ef6d967", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5102, "license_type": "permissive", "max_line_length": 78, "num_lines": 129, "path": "/tests/unit/task/hooks/test_fault_injection.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport os_faults\nfrom os_faults.api import error\n\nfrom rally import consts\nfrom rally.task import hook\n\nfrom rally_openstack.task.hooks import fault_injection\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\[email protected]\nclass FaultInjectionHookTestCase(test.TestCase):\n\n def setUp(self):\n super(FaultInjectionHookTestCase, self).setUp()\n self.task = {\"deployment_uuid\": \"foo_uuid\"}\n\n @ddt.data((dict(action=\"foo\"), True),\n (dict(action=\"foo\", verify=True), True),\n (dict(action=10), False),\n (dict(action=\"foo\", verify=10), False),\n (dict(), False))\n @ddt.unpack\n def test_config_schema(self, config, valid):\n results = hook.HookAction.validate(\"fault_injection\", None, None,\n config)\n if valid:\n self.assertEqual([], results)\n else:\n self.assertEqual(1, len(results))\n\n @mock.patch(\"rally.common.objects.Deployment.get\")\n @mock.patch(\"rally.common.utils.Timer\", side_effect=fakes.FakeTimer)\n def test_run(self, mock_timer, mock_deployment_get):\n mock_deployment_get.return_value = {\"config\": {}}\n hook = fault_injection.FaultInjectionHook(\n self.task, {\"action\": \"foo\", \"verify\": True},\n {\"iteration\": 1})\n\n with mock.patch.object(os_faults, \"human_api\") as mock_human_api:\n with mock.patch.object(os_faults, \"connect\") as mock_connect:\n hook.run_sync()\n\n injector_inst = mock_connect.return_value\n\n mock_connect.assert_called_once_with(None)\n mock_human_api.assert_called_once_with(injector_inst, \"foo\")\n\n self.assertEqual(\n {\"finished_at\": fakes.FakeTimer().finish_timestamp(),\n \"started_at\": fakes.FakeTimer().timestamp(),\n \"status\": consts.HookStatus.SUCCESS,\n \"triggered_by\": {\"iteration\": 1}},\n hook.result())\n injector_inst.verify.assert_called_once_with()\n\n @mock.patch(\"rally.common.objects.Deployment.get\")\n @mock.patch(\"rally.common.utils.Timer\", side_effect=fakes.FakeTimer)\n def test_run_extra_config(self, mock_timer, mock_deployment_get):\n mock_deployment_get.return_value = {\n \"config\": {\"type\": \"ExistingCloud\",\n \"extra\": {\"cloud_config\": {\"conf\": \"foo_config\"}}}}\n hook = fault_injection.FaultInjectionHook(\n self.task, {\"action\": \"foo\"}, {\"iteration\": 1})\n\n with mock.patch.object(os_faults, \"human_api\") as mock_human_api:\n with mock.patch.object(os_faults, \"connect\") as mock_connect:\n hook.run_sync()\n\n injector_inst = mock_connect.return_value\n\n mock_connect.assert_called_once_with({\"conf\": \"foo_config\"})\n mock_human_api.assert_called_once_with(injector_inst, \"foo\")\n\n self.assertEqual(\n {\"finished_at\": fakes.FakeTimer().finish_timestamp(),\n \"started_at\": fakes.FakeTimer().timestamp(),\n \"status\": consts.HookStatus.SUCCESS,\n \"triggered_by\": {\"iteration\": 1}},\n hook.result())\n\n @mock.patch(\"rally.common.objects.Deployment.get\")\n @mock.patch(\"os_faults.human_api\")\n @mock.patch(\"os_faults.connect\")\n @mock.patch(\"rally.common.utils.Timer\", side_effect=fakes.FakeTimer)\n def test_run_error(self, mock_timer, mock_connect, mock_human_api,\n mock_deployment_get):\n mock_deployment_get.return_value = {\"config\": {}}\n injector_inst = mock_connect.return_value\n mock_human_api.side_effect = error.OSFException(\"foo error\")\n hook = fault_injection.FaultInjectionHook(\n self.task, {\"action\": \"foo\", \"verify\": True},\n {\"iteration\": 1})\n\n hook.run_sync()\n\n self.assertEqual(\n {\"finished_at\": fakes.FakeTimer().finish_timestamp(),\n \"started_at\": fakes.FakeTimer().timestamp(),\n \"status\": consts.HookStatus.FAILED,\n \"error\": {\n \"details\": mock.ANY,\n \"etype\": \"OSFException\",\n \"msg\": \"foo error\"},\n \"triggered_by\": {\"iteration\": 1}},\n hook.result())\n\n mock_connect.assert_called_once_with(None)\n injector_inst.verify.assert_called_once_with()\n mock_human_api.assert_called_once_with(injector_inst, \"foo\")\n" }, { "alpha_fraction": 0.7023529410362244, "alphanum_fraction": 0.7047058939933777, "avg_line_length": 13.655172348022461, "blob_id": "85f9fbbd820d523d79dff2b39a79a827bd8bd12b", "content_id": "26458d2baf41417ef60c3a39cc9ff1ee19ee4254", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 850, "license_type": "permissive", "max_line_length": 117, "num_lines": 58, "path": "/rally_openstack/verification/tempest/config.ini", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "[DEFAULT]\ndebug = True\nuse_stderr = False\nlog_file =\n\n[auth]\nuse_dynamic_credentials = True\n\n[compute]\nimage_ref =\nimage_ref_alt =\nflavor_ref =\nflavor_ref_alt =\nfixed_network_name =\n\n[compute-feature-enabled]\nlive_migration = False\nresize = True\nvnc_console = True\nattach_encrypted_volume = False\n\n[data-processing]\n\n[identity]\n\n[identity-feature-enabled]\n\n[image-feature-enabled]\ndeactivate_image = True\n\n[input-scenario]\nssh_user_regex = [[\"^.*[Cc]irros.*$\", \"cirros\"], [\"^.*[Tt]est[VvMm].*$\", \"cirros\"], [\"^.*rally_verify.*$\", \"cirros\"]]\n\n[network]\n\n[network-feature-enabled]\nipv6_subnet_attributes = True\nipv6 = True\n\n[object-storage]\n\n[oslo_concurrency]\nlock_path =\n\n[orchestration]\ninstance_type =\n\n[scenario]\nimg_file =\n\n[service_available]\n\n[validation]\nrun_validation = True\nimage_ssh_user = cirros\n\n[volume-feature-enabled]\nbootable = True\n" }, { "alpha_fraction": 0.7062795162200928, "alphanum_fraction": 0.7116813063621521, "avg_line_length": 36.025001525878906, "blob_id": "c15c3d584f3ddd5defeb56e6ed87f0bde3075c00", "content_id": "37fa9aab0c89fb8b8e54848f81be8d13f3625977", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "permissive", "max_line_length": 78, "num_lines": 40, "path": "/rally_openstack/task/contexts/cleanup/user.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport sys\n\nfrom rally.common import validation\n\nfrom rally_openstack.task.cleanup import manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.cleanup import base\nfrom rally_openstack.task import scenario\n\n\[email protected](name=\"check_cleanup_resources\", admin_required=False)\n# NOTE(amaretskiy): Set maximum order to run this last\[email protected](name=\"cleanup\", platform=\"openstack\", order=sys.maxsize,\n hidden=True)\nclass UserCleanup(base.CleanupMixin, context.OpenStackContext):\n \"\"\"Context class for user resources cleanup.\"\"\"\n\n def cleanup(self):\n manager.cleanup(\n names=self.config,\n admin_required=False,\n users=self.context.get(\"users\", []),\n superclass=scenario.OpenStackScenario,\n task_id=self.get_owner_id()\n )\n" }, { "alpha_fraction": 0.6651009917259216, "alphanum_fraction": 0.6749647855758667, "avg_line_length": 39.94230651855469, "blob_id": "1849772d549a52c5a9c80490865b2f7395b147bf", "content_id": "9c7ad0bc499e90033d051b590221737eae889c08", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "permissive", "max_line_length": 75, "num_lines": 52, "path": "/tests/unit/task/scenarios/zaqar/test_basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (c) 2014 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.zaqar import basic\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.zaqar.basic\"\n\n\nclass ZaqarBasicTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.CreateQueue.generate_random_name\" % BASE,\n return_value=\"fizbit\")\n def test_create_queue(self, mock_random_name):\n scenario = basic.CreateQueue(self.context)\n scenario._queue_create = mock.MagicMock()\n scenario.run(fakearg=\"fake\")\n scenario._queue_create.assert_called_once_with(fakearg=\"fake\")\n\n @mock.patch(\"%s.CreateQueue.generate_random_name\" % BASE,\n return_value=\"kitkat\")\n def test_producer_consumer(self, mock_random_name):\n scenario = basic.ProducerConsumer(self.context)\n messages = [{\"body\": {\"id\": idx}, \"ttl\": 360} for idx\n in range(20)]\n queue = mock.MagicMock()\n\n scenario._queue_create = mock.MagicMock(return_value=queue)\n scenario._messages_post = mock.MagicMock()\n scenario._messages_list = mock.MagicMock()\n scenario._queue_delete = mock.MagicMock()\n\n scenario.run(min_msg_count=20, max_msg_count=20, fakearg=\"fake\")\n\n scenario._queue_create.assert_called_once_with(fakearg=\"fake\")\n scenario._messages_post.assert_called_once_with(queue, messages,\n 20, 20)\n scenario._messages_list.assert_called_once_with(queue)\n scenario._queue_delete.assert_called_once_with(queue)\n" }, { "alpha_fraction": 0.5604113340377808, "alphanum_fraction": 0.5718365907669067, "avg_line_length": 34.3636360168457, "blob_id": "b72b8b91ed1be4dcb0665f004f53220770991b01", "content_id": "704852536738cb8a1a0997e365b8ad86501bcd44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3501, "license_type": "permissive", "max_line_length": 78, "num_lines": 99, "path": "/tests/unit/task/contexts/network/test_existing_network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.network import existing_network\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.network\"\n\n\nclass ExistingNetworkTestCase(test.TestCase):\n\n def setUp(self):\n super(ExistingNetworkTestCase, self).setUp()\n\n self.config = {\"foo\": \"bar\"}\n self.context = test.get_test_context()\n self.context.update({\n \"users\": [\n {\"id\": 1,\n \"tenant_id\": \"tenant1\",\n \"credential\": mock.Mock(tenant_name=\"tenant_1\")},\n {\"id\": 2,\n \"tenant_id\": \"tenant2\",\n \"credential\": mock.Mock(tenant_name=\"tenant_2\")},\n ],\n \"tenants\": {\n \"tenant1\": {},\n \"tenant2\": {},\n },\n \"config\": {\n \"existing_network\": self.config\n },\n })\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup(self, mock_clients):\n clients = {\n # key is tenant_name\n \"tenant_1\": mock.MagicMock(),\n \"tenant_2\": mock.MagicMock()\n }\n mock_clients.side_effect = lambda cred: clients[cred.tenant_name]\n\n networks = {\n # key is tenant_id\n \"tenant_1\": [mock.Mock(), mock.Mock()],\n \"tenant_2\": [mock.Mock()]\n }\n subnets = {\n # key is tenant_id\n \"tenant_1\": [mock.Mock()],\n \"tenant_2\": [mock.Mock()]\n }\n neutron1 = clients[\"tenant_1\"].neutron.return_value\n neutron2 = clients[\"tenant_2\"].neutron.return_value\n neutron1.list_networks.return_value = {\n \"networks\": networks[\"tenant_1\"]}\n neutron2.list_networks.return_value = {\n \"networks\": networks[\"tenant_2\"]}\n neutron1.list_subnets.return_value = {\"subnets\": subnets[\"tenant_1\"]}\n neutron2.list_subnets.return_value = {\"subnets\": subnets[\"tenant_2\"]}\n\n context = existing_network.ExistingNetwork(self.context)\n context.setup()\n\n mock_clients.assert_has_calls([\n mock.call(u[\"credential\"]) for u in self.context[\"users\"]])\n\n neutron1.list_networks.assert_called_once_with()\n neutron1.list_subnets.assert_called_once_with()\n neutron2.list_networks.assert_called_once_with()\n neutron2.list_subnets.assert_called_once_with()\n\n self.assertEqual(\n self.context[\"tenants\"],\n {\n \"tenant1\": {\"networks\": networks[\"tenant_1\"],\n \"subnets\": subnets[\"tenant_1\"]},\n \"tenant2\": {\"networks\": networks[\"tenant_2\"],\n \"subnets\": subnets[\"tenant_2\"]},\n }\n )\n\n def test_cleanup(self):\n # NOTE(stpierre): Test that cleanup is not abstract\n existing_network.ExistingNetwork({\"task\": mock.MagicMock()}).cleanup()\n" }, { "alpha_fraction": 0.6845778226852417, "alphanum_fraction": 0.6901605129241943, "avg_line_length": 43.78125, "blob_id": "8b714fd513d15700a5c05d5ca8014dd2ef096778", "content_id": "7e6e6e16fbcf43ff53ca615d02174e63cf49ddc6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1433, "license_type": "permissive", "max_line_length": 78, "num_lines": 32, "path": "/rally_openstack/task/scenarios/octavia/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.common.services.loadbalancer import octavia\nfrom rally_openstack.task import scenario\n\n\nclass OctaviaBase(scenario.OpenStackScenario):\n \"\"\"Base class for Octavia scenarios with basic atomic actions.\"\"\"\n\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(OctaviaBase, self).__init__(context, admin_clients, clients)\n if hasattr(self, \"_admin_clients\"):\n self.admin_octavia = octavia.Octavia(\n self._admin_clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n if hasattr(self, \"_clients\"):\n self.octavia = octavia.Octavia(\n self._clients, name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n" }, { "alpha_fraction": 0.6978305578231812, "alphanum_fraction": 0.7001549601554871, "avg_line_length": 39.757896423339844, "blob_id": "79bf59f0dc2e936751b811ed37400fd97c3c38e7", "content_id": "2c22836138f0264b59cdb86e5e46aed77e520907", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7744, "license_type": "permissive", "max_line_length": 79, "num_lines": 190, "path": "/rally_openstack/task/scenarios/authenticate/authenticate.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.common import consts\n\nfrom rally.task import atomic\nfrom rally.task import validation\n\nfrom rally_openstack.task import scenario\n\n\n\"\"\"Scenarios for Authentication mechanism.\"\"\"\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.keystone\", platform=\"openstack\")\nclass Keystone(scenario.OpenStackScenario):\n\n @atomic.action_timer(\"authenticate.keystone\")\n def run(self):\n \"\"\"Check Keystone Client.\"\"\"\n self.clients(\"keystone\")\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_glance\", platform=\"openstack\")\nclass ValidateGlance(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Glance Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n In following we are checking for non-existent image.\n\n :param repetitions: number of times to validate\n \"\"\"\n glance_client = self.clients(\"glance\")\n image_name = \"__intentionally_non_existent_image___\"\n with atomic.ActionTimer(self, \"authenticate.validate_glance\"):\n for i in range(repetitions):\n list(glance_client.images.list(name=image_name))\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_nova\", platform=\"openstack\")\nclass ValidateNova(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Nova Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n nova_client = self.clients(\"nova\")\n with atomic.ActionTimer(self, \"authenticate.validate_nova\"):\n for i in range(repetitions):\n nova_client.flavors.list()\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_ceilometer\",\n platform=\"openstack\")\nclass ValidateCeilometer(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Ceilometer Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n ceilometer_client = self.clients(\"ceilometer\")\n with atomic.ActionTimer(self, \"authenticate.validate_ceilometer\"):\n for i in range(repetitions):\n ceilometer_client.meters.list()\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_cinder\", platform=\"openstack\")\nclass ValidateCinder(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Cinder Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n cinder_client = self.clients(\"cinder\")\n with atomic.ActionTimer(self, \"authenticate.validate_cinder\"):\n for i in range(repetitions):\n cinder_client.volume_types.list()\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_neutron\", platform=\"openstack\")\nclass ValidateNeutron(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Neutron Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n neutron_client = self.clients(\"neutron\")\n with atomic.ActionTimer(self, \"authenticate.validate_neutron\"):\n for i in range(repetitions):\n neutron_client.list_networks()\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_octavia\", platform=\"openstack\")\nclass ValidateOctavia(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Octavia Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n octavia_client = self.clients(\"octavia\")\n with atomic.ActionTimer(self, \"authenticate.validate_octavia\"):\n for i in range(repetitions):\n octavia_client.load_balancer_list()\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"Authenticate.validate_heat\", platform=\"openstack\")\nclass ValidateHeat(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Heat Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n heat_client = self.clients(\"heat\")\n with atomic.ActionTimer(self, \"authenticate.validate_heat\"):\n for i in range(repetitions):\n list(heat_client.stacks.list(limit=0))\n\n\[email protected](\"number\", param_name=\"repetitions\", minval=1)\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_services\",\n services=[consts.Service.MONASCA])\[email protected](name=\"Authenticate.validate_monasca\", platform=\"openstack\")\nclass ValidateMonasca(scenario.OpenStackScenario):\n\n def run(self, repetitions):\n \"\"\"Check Monasca Client to ensure validation of token.\n\n Creation of the client does not ensure validation of the token.\n We have to do some minimal operation to make sure token gets validated.\n\n :param repetitions: number of times to validate\n \"\"\"\n monasca_client = self.clients(\"monasca\")\n with atomic.ActionTimer(self, \"authenticate.validate_monasca\"):\n for i in range(repetitions):\n list(monasca_client.metrics.list(limit=0))\n" }, { "alpha_fraction": 0.5560019016265869, "alphanum_fraction": 0.5607277750968933, "avg_line_length": 39.69230651855469, "blob_id": "6a4554a62fde444b1686205c735a25efe445c317", "content_id": "b32cee6d24d78550f5640c36d01998b234122058", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8464, "license_type": "permissive", "max_line_length": 79, "num_lines": 208, "path": "/tests/functional/test_cli_env.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport unittest\n\nfrom tests.functional import utils\n\n\nTEST_ENV = {\n \"OS_USERNAME\": \"admin\",\n \"OS_PASSWORD\": \"admin\",\n \"OS_TENANT_NAME\": \"admin\",\n \"OS_AUTH_URL\": \"http://fake/\",\n}\n\nRALLY_OPTS = {\n # speed up failures\n \"DEFAULT\": {\"openstack_client_http_timeout\": 5}\n}\n\n\nclass EnvTestCase(unittest.TestCase):\n def test_check_success(self):\n rally = utils.Rally()\n rally(\"env check\")\n\n def test_check_wrong_url(self):\n rally = utils.Rally(config_opts=RALLY_OPTS)\n fake_spec = copy.deepcopy(rally.env_spec)\n fake_spec[\"existing@openstack\"][\"auth_url\"] = \"http://example.com:5000\"\n spec = utils.JsonTempFile(fake_spec)\n rally(\"env create --name t_create_env --spec %s\" % spec.filename)\n\n try:\n rally(\"env check\")\n except utils.RallyCliError as e:\n output = e.output.split(\"\\n\")\n line_template = \"| :-( | openstack | %s |\"\n err1 = \"Unable to establish connection to http://example.com:5000\"\n err2 = \"Request to http://example.com:5000 timed out\"\n if (line_template % err1 not in output\n and line_template % err2 not in output):\n self.fail(\"The output of `env check` doesn't contain expected\"\n \" error. Output:\\n\" % e.output)\n else:\n self.fail(\"Check env command should fail!\")\n\n def test_check_wrong_username(self):\n rally = utils.Rally(config_opts=RALLY_OPTS)\n fake_spec = copy.deepcopy(rally.env_spec)\n fake_spec[\"existing@openstack\"][\"admin\"][\"username\"] = \"MASTER777\"\n spec = utils.JsonTempFile(fake_spec)\n rally(\"env create --name t_create_env --spec %s\" % spec.filename)\n\n try:\n rally(\"env check\")\n except utils.RallyCliError as e:\n line = (\"| :-( | openstack | Failed to authenticate to \"\n \"%s for user '%s' in project '%s': The request you have \"\n \"made requires authentication. |\" %\n (fake_spec[\"existing@openstack\"][\"auth_url\"],\n fake_spec[\"existing@openstack\"][\"admin\"][\"username\"],\n fake_spec[\"existing@openstack\"][\"admin\"][\"project_name\"]))\n self.assertIn(line, e.output.split(\"\\n\"))\n else:\n self.fail(\"Check env command should fail!\")\n\n def test_check_wrong_password(self):\n rally = utils.Rally(config_opts=RALLY_OPTS)\n fake_spec = copy.deepcopy(rally.env_spec)\n fake_spec[\"existing@openstack\"][\"admin\"][\"password\"] = \"MASTER777\"\n spec = utils.JsonTempFile(fake_spec)\n rally(\"env create --name t_create_env --spec %s\" % spec.filename)\n\n try:\n rally(\"env check\")\n except utils.RallyCliError as e:\n line = (\"| :-( | openstack | Failed to authenticate to \"\n \"%s for user '%s' in project '%s': The request you have \"\n \"made requires authentication. |\" %\n (fake_spec[\"existing@openstack\"][\"auth_url\"],\n fake_spec[\"existing@openstack\"][\"admin\"][\"username\"],\n fake_spec[\"existing@openstack\"][\"admin\"][\"project_name\"]))\n self.assertIn(line, e.output.split(\"\\n\"))\n else:\n self.fail(\"Check env command should fail!\")\n\n def test_create_from_sysenv(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"env create --name t_create_env --from-sysenv\")\n config = rally(\"env show --only-spec\", getjson=True)\n self.assertIn(\"existing@openstack\", config)\n self.assertEqual(TEST_ENV[\"OS_USERNAME\"],\n config[\"existing@openstack\"][\"admin\"][\"username\"])\n self.assertEqual(TEST_ENV[\"OS_PASSWORD\"],\n config[\"existing@openstack\"][\"admin\"][\"password\"])\n if \"project_name\" in config[\"existing@openstack\"][\"admin\"]:\n # keystone v3\n self.assertEqual(\n TEST_ENV[\"OS_TENANT_NAME\"],\n config[\"existing@openstack\"][\"admin\"][\"project_name\"])\n else:\n # keystone v2\n self.assertEqual(\n TEST_ENV[\"OS_TENANT_NAME\"],\n config[\"existing@openstack\"][\"admin\"][\"tenant_name\"])\n self.assertEqual(\n TEST_ENV[\"OS_AUTH_URL\"],\n config[\"existing@openstack\"][\"auth_url\"])\n\n def test_check_api_info_success(self):\n rally = utils.Rally()\n spec = copy.deepcopy(rally.env_spec)\n spec[\"existing@openstack\"][\"api_info\"] = {\n \"fakedummy\": {\n \"version\": \"2\",\n \"service_type\": \"dummyv2\"\n }\n }\n spec = utils.JsonTempFile(spec)\n rally(\"env create --name t_create_env_with_api_info\"\n \" --spec %s\" % spec.filename)\n plugings = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s env check\" % plugings)\n\n def test_check_api_info_fail_1(self):\n rally = utils.Rally()\n spec = copy.deepcopy(rally.env_spec)\n spec[\"existing@openstack\"][\"api_info\"] = {\n \"fakedummy\": {\n \"version\": \"3\",\n \"service_type\": \"dummyv2\"\n }\n }\n spec = utils.JsonTempFile(spec)\n rally(\"env create --name t_create_env_with_api_info\"\n \" --spec %s\" % spec.filename)\n try:\n plugings = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s env check\" % plugings)\n except utils.RallyCliError as e:\n self.assertIn(\"Invalid setting for 'fakedummy':\", e.output)\n\n def test_check_api_info_fail_2(self):\n rally = utils.Rally()\n spec = copy.deepcopy(rally.env_spec)\n spec[\"existing@openstack\"][\"api_info\"] = {\n \"noneclient\": {\n \"version\": \"1\",\n \"service_type\": \"none\"\n }\n }\n spec = utils.JsonTempFile(spec)\n rally(\"env create --name t_create_env_with_api_info\"\n \" --spec %s\" % spec.filename)\n try:\n plugings = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s env check\" % plugings)\n except utils.RallyCliError as e:\n self.assertIn(\n \"Plugin [email protected]_health() method is broken\",\n e.output)\n\n def test_check_api_info_fail_3(self):\n rally = utils.Rally()\n spec = copy.deepcopy(rally.env_spec)\n spec[\"existing@openstack\"][\"api_info\"] = {\n \"faileddummy\": {\n \"version\": \"2\",\n \"service_type\": \"dummy\"\n }\n }\n spec = utils.JsonTempFile(spec)\n rally(\"env create --name t_create_env_with_api_info\"\n \" --spec %s\" % spec.filename)\n try:\n plugings = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s env check\" % plugings)\n except utils.RallyCliError as e:\n self.assertIn(\"Can not create 'faileddummy' with 2 version\",\n e.output)\n\n def test_create_env_with_https_cert_https_key(self):\n rally = utils.Rally()\n fake_spec = copy.deepcopy(rally.env_spec)\n fake_spec[\"existing@openstack\"][\"https_cert\"] = \"\"\n fake_spec[\"existing@openstack\"][\"https_key\"] = \"\"\n spec = utils.JsonTempFile(fake_spec)\n rally(\"env create --name t_create_env --spec %s\" % spec.filename)\n config = rally(\"env show --only-spec\", getjson=True)\n self.assertIn(\"https_cert\", config[\"existing@openstack\"].keys())\n self.assertIn(\"https_key\", config[\"existing@openstack\"].keys())\n rally(\"env check\")\n rally(\"env info\")\n" }, { "alpha_fraction": 0.6321091055870056, "alphanum_fraction": 0.6337880492210388, "avg_line_length": 33.280574798583984, "blob_id": "27daffb3d4875364df8745b45a7448494dd60d49", "content_id": "be560e7a4ee050bc9cb9711e27a42522a10d1f3b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4765, "license_type": "permissive", "max_line_length": 78, "num_lines": 139, "path": "/rally_openstack/task/scenarios/mistral/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally.common import cfg\nfrom rally.task import atomic\nfrom rally.task import utils\nimport yaml\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass MistralScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Mistral scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"mistral.list_workbooks\")\n def _list_workbooks(self):\n \"\"\"Gets list of existing workbooks.\n\n :returns: workbook list\n \"\"\"\n return self.clients(\"mistral\").workbooks.list()\n\n @atomic.action_timer(\"mistral.create_workbook\")\n def _create_workbook(self, definition, namespace=\"\"):\n \"\"\"Create a new workbook.\n\n :param definition: workbook description in string\n (yaml string) format\n :param namespace: the namespace where the workbook\n will be created in\n :returns: workbook object\n \"\"\"\n definition = yaml.safe_load(definition)\n definition[\"name\"] = self.generate_random_name()\n definition = yaml.safe_dump(definition)\n\n return self.clients(\"mistral\").workbooks.create(\n definition,\n namespace=namespace\n )\n\n @atomic.action_timer(\"mistral.delete_workbook\")\n def _delete_workbook(self, wb_name, namespace=\"\"):\n \"\"\"Delete the given workbook.\n\n :param wb_name: the name of workbook that would be deleted.\n :param namespace: the namespace of workbook that would be deleted.\n \"\"\"\n self.clients(\"mistral\").workbooks.delete(\n wb_name,\n namespace=namespace\n )\n\n @atomic.action_timer(\"mistral.create_workflow\")\n def _create_workflow(self, definition, namespace=\"\"):\n \"\"\"creates a workflow in the given namespace.\n\n :param definition: the definition of workflow\n :param namespace: the namespace of the workflow\n \"\"\"\n return self.clients(\"mistral\").workflows.create(\n definition,\n namespace=namespace\n )\n\n @atomic.action_timer(\"mistral.delete_workflow\")\n def _delete_workflow(self, workflow_identifier, namespace=\"\"):\n \"\"\"Delete the given workflow.\n\n :param workflow_identifier: the identifier of workflow\n :param namespace: the namespace of the workflow\n \"\"\"\n self.clients(\"mistral\").workflows.delete(\n workflow_identifier,\n namespace=namespace\n )\n\n @atomic.action_timer(\"mistral.list_executions\")\n def _list_executions(self, marker=\"\", limit=None, sort_keys=\"\",\n sort_dirs=\"\"):\n \"\"\"Gets list of existing executions.\n\n :returns: execution list\n \"\"\"\n\n return self.clients(\"mistral\").executions.list(\n marker=marker, limit=limit, sort_keys=sort_keys,\n sort_dirs=sort_dirs)\n\n @atomic.action_timer(\"mistral.create_execution\")\n def _create_execution(self, workflow_identifier, wf_input=None,\n namespace=\"\", **params):\n \"\"\"Create a new execution.\n\n :param workflow_identifier: name or id of the workflow to execute\n :param namespace: namespace of the workflow to execute\n :param input_: json string of mistral workflow input\n :param params: optional mistral params (this is the place to pass\n environment).\n :returns: executions object\n \"\"\"\n\n execution = self.clients(\"mistral\").executions.create(\n workflow_identifier,\n namespace=namespace,\n workflow_input=wf_input,\n **params\n )\n\n execution = utils.wait_for_status(\n execution, ready_statuses=[\"SUCCESS\"], failure_statuses=[\"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.mistral_execution_timeout)\n\n return execution\n\n @atomic.action_timer(\"mistral.delete_execution\")\n def _delete_execution(self, execution):\n \"\"\"Delete the given execution.\n\n :param ex: the execution that would be deleted.\n \"\"\"\n self.clients(\"mistral\").executions.delete(execution.id)\n" }, { "alpha_fraction": 0.5402777791023254, "alphanum_fraction": 0.5438888669013977, "avg_line_length": 38.130435943603516, "blob_id": "cb01fc1fae266e75ef45b771966016bd32cad4fa", "content_id": "16f2bbf130748c7d32883d761c000c542f194083", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3600, "license_type": "permissive", "max_line_length": 78, "num_lines": 92, "path": "/rally_openstack/task/contexts/neutron/lbaas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.wrappers import network as network_wrapper\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True,\n users=True)\[email protected](name=\"lbaas\", platform=\"openstack\", order=360)\nclass Lbaas(context.OpenStackContext):\n \"\"\"Creates a lb-pool for every subnet created in network context.\"\"\"\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"pool\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"lbaas_version\": {\n \"type\": \"integer\",\n \"minimum\": 1\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"pool\": {\n \"lb_method\": \"ROUND_ROBIN\",\n \"protocol\": \"HTTP\"\n },\n \"lbaas_version\": 1\n }\n\n def setup(self):\n net_wrapper = network_wrapper.wrap(\n osclients.Clients(self.context[\"admin\"][\"credential\"]),\n self, config=self.config)\n\n use_lb, msg = net_wrapper.supports_extension(\"lbaas\")\n if not use_lb:\n LOG.info(msg)\n return\n\n # Creates a lb-pool for every subnet created in network context.\n for user, tenant_id in self._iterate_per_tenants():\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n for subnet in network.get(\"subnets\", []):\n if self.config[\"lbaas_version\"] == 1:\n network.setdefault(\"lb_pools\", []).append(\n net_wrapper.create_v1_pool(\n tenant_id,\n subnet,\n **self.config[\"pool\"]))\n else:\n raise NotImplementedError(\n \"Context for LBaaS version %s not implemented.\"\n % self.config[\"lbaas_version\"])\n\n def cleanup(self):\n net_wrapper = network_wrapper.wrap(\n osclients.Clients(self.context[\"admin\"][\"credential\"]),\n self, config=self.config)\n for tenant_id, tenant_ctx in self.context[\"tenants\"].items():\n for network in tenant_ctx.get(\"networks\", []):\n for pool in network.get(\"lb_pools\", []):\n with logging.ExceptionLogger(\n LOG,\n \"Failed to delete pool %(pool)s for tenant \"\n \"%(tenant)s\" % {\"pool\": pool[\"pool\"][\"id\"],\n \"tenant\": tenant_id}):\n if self.config[\"lbaas_version\"] == 1:\n net_wrapper.delete_v1_pool(pool[\"pool\"][\"id\"])\n" }, { "alpha_fraction": 0.682735025882721, "alphanum_fraction": 0.6861538290977478, "avg_line_length": 36.98701477050781, "blob_id": "943ae05f04f7e8ccea64fc91fba734eb25cfa9b6", "content_id": "37dbbe409b39b98c7e25d33d7077db0e1502b068", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2925, "license_type": "permissive", "max_line_length": 78, "num_lines": 77, "path": "/rally_openstack/task/scenarios/murano/environments.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.murano import utils\n\n\n\"\"\"Scenarios for Murano environments.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](name=\"MuranoEnvironments.list_environments\",\n platform=\"openstack\")\nclass ListEnvironments(utils.MuranoScenario):\n\n def run(self):\n \"\"\"List the murano environments.\n\n Run murano environment-list for listing all environments.\n \"\"\"\n self._list_environments()\n\n\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](context={\"cleanup@openstack\": [\"murano.environments\"]},\n name=\"MuranoEnvironments.create_and_delete_environment\",\n platform=\"openstack\")\nclass CreateAndDeleteEnvironment(utils.MuranoScenario):\n\n def run(self):\n \"\"\"Create environment, session and delete environment.\"\"\"\n environment = self._create_environment()\n\n self._create_session(environment.id)\n self._delete_environment(environment)\n\n\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](\"required_contexts\", contexts=(\"murano_packages\"))\[email protected](context={\"cleanup@openstack\": [\"murano\"],\n \"roles@openstack\": [\"admin\"]},\n name=\"MuranoEnvironments.create_and_deploy_environment\",\n platform=\"openstack\")\nclass CreateAndDeployEnvironment(utils.MuranoScenario):\n\n def run(self, packages_per_env=1):\n \"\"\"Create environment, session and deploy environment.\n\n Create environment, create session, add app to environment\n packages_per_env times, send environment to deploy.\n\n :param packages_per_env: number of packages per environment\n \"\"\"\n environment = self._create_environment()\n session = self._create_session(environment.id)\n package = self.context[\"tenant\"][\"packages\"][0]\n\n for i in range(packages_per_env):\n self._create_service(environment, session,\n package.fully_qualified_name)\n\n self._deploy_environment(environment, session)\n" }, { "alpha_fraction": 0.5887225270271301, "alphanum_fraction": 0.617075502872467, "avg_line_length": 35.929412841796875, "blob_id": "ebb91bdea646e065affd12d21b08b1282fefd59b", "content_id": "6b83b0a0927f481c3750e121a3c1d32e8b38327e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3139, "license_type": "permissive", "max_line_length": 78, "num_lines": 85, "path": "/tests/unit/task/scenarios/vm/workloads/test_siege.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport sys\nfrom unittest import mock\n\n\nfrom rally_openstack.task.scenarios.vm.workloads import siege\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.task.scenarios.vm.workloads.siege\"\n\nSIEGE_OUTPUT = \"\"\"\nTransactions: 522 hits\nAvailability: 100.00 %\nElapsed time: 3.69 secs\nData transferred: 1.06 MB\nResponse time: 0.10 secs\nTransaction rate: 141.46 trans/sec\nThroughput: 0.29 MB/sec\nConcurrency: 14.71\nSuccessful transactions: 522\nFailed transactions: 0\nLongest transaction: 0.26\nShortest transaction: 0.08\n\"\"\"\n\nOUTPUT = [\n {\"output_value\": \"curl\", \"descr\": \"\", \"output_key\": \"curl_cli\"},\n {\"output_value\": \"wp-net\", \"descr\": \"\", \"output_key\": \"net_name\"},\n {\"output_value\": [\"10.0.0.3\", \"172.16.0.159\"],\n \"description\": \"\",\n \"output_key\": \"gate_node\"},\n {\"output_value\": {\n \"1\": {\"wordpress-network\": [\"10.0.0.4\"]},\n \"0\": {\"wordpress-network\": [\"10.0.0.5\"]}},\n \"description\": \"No description given\", \"output_key\": \"wp_nodes\"}]\n\n\nclass SiegeTestCase(test.TestCase):\n\n @mock.patch(\"%s.json.load\" % PATH)\n def test_get_instances(self, mock_load):\n mock_load.return_value = OUTPUT\n instances = list(siege.get_instances())\n self.assertEqual([\"10.0.0.4\", \"10.0.0.5\"], instances)\n\n @mock.patch(\"%s.get_instances\" % PATH)\n @mock.patch(\"%s.generate_urls_list\" % PATH)\n @mock.patch(\"%s.subprocess.check_output\" % PATH)\n def test_run(self, mock_check_output, mock_generate_urls_list,\n mock_get_instances):\n mock_get_instances.return_value = [1, 2]\n mock_generate_urls_list.return_value = \"urls\"\n mock_check_output.return_value = SIEGE_OUTPUT\n mock_write = mock.MagicMock()\n mock_stdout = mock.MagicMock(write=mock_write)\n real_stdout = sys.stdout\n sys.stdout = mock_stdout\n siege.run()\n expected = [mock.call(\"Transaction rate:141.46\\n\"),\n mock.call(\"Throughput:0.29\\n\")]\n sys.stdout = real_stdout\n self.assertEqual(expected, mock_write.mock_calls)\n\n @mock.patch(\"%s.tempfile.NamedTemporaryFile\" % PATH)\n def test_generate_urls_list(self, mock_named_temporary_file):\n mock_urls = mock.MagicMock()\n mock_named_temporary_file.return_value = mock_urls\n name = siege.generate_urls_list([\"foo\", \"bar\"])\n self.assertEqual(mock_urls.name, name)\n" }, { "alpha_fraction": 0.6151953935623169, "alphanum_fraction": 0.6165307760238647, "avg_line_length": 40.97050094604492, "blob_id": "41d8dd6141745b6f0faf3e06ad8f1aa8c7b990c8", "content_id": "db5907b9fd2a34ac1395b422688df8ffc2ff3d37", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14228, "license_type": "permissive", "max_line_length": 79, "num_lines": 339, "path": "/rally_openstack/common/services/storage/cinder_v1.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import utils as rutils\nfrom rally.task import atomic\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.storage import block\nfrom rally_openstack.common.services.storage import cinder_common\n\n\nCONF = block.CONF\n\n\[email protected](\"cinder\", service_type=\"block-storage\", version=\"1\")\nclass CinderV1Service(service.Service, cinder_common.CinderMixin):\n\n @atomic.action_timer(\"cinder_v1.create_volume\")\n def create_volume(self, size, snapshot_id=None, source_volid=None,\n display_name=None, display_description=None,\n volume_type=None, user_id=None,\n project_id=None, availability_zone=None,\n metadata=None, imageRef=None):\n \"\"\"Creates a volume.\n\n :param size: Size of volume in GB\n :param snapshot_id: ID of the snapshot\n :param display_name: Name of the volume\n :param display_description: Description of the volume\n :param volume_type: Type of volume\n :param user_id: User id derived from context\n :param project_id: Project id derived from context\n :param availability_zone: Availability Zone to use\n :param metadata: Optional metadata to set on volume creation\n :param imageRef: reference to an image stored in glance\n\n :returns: Return a new volume.\n \"\"\"\n if isinstance(size, dict):\n size = random.randint(size[\"min\"], size[\"max\"])\n\n volume = self._get_client().volumes.create(\n size,\n display_name=(display_name or self.generate_random_name()),\n display_description=display_description,\n snapshot_id=snapshot_id,\n source_volid=source_volid,\n volume_type=volume_type,\n user_id=user_id,\n project_id=project_id,\n availability_zone=availability_zone,\n metadata=metadata,\n imageRef=imageRef\n )\n\n # NOTE(msdubov): It is reasonable to wait 5 secs before starting to\n # check whether the volume is ready => less API calls.\n rutils.interruptable_sleep(\n CONF.openstack.cinder_volume_create_prepoll_delay)\n\n return self._wait_available_volume(volume)\n\n @atomic.action_timer(\"cinder_v1.update_volume\")\n def update_volume(self, volume_id, display_name=None,\n display_description=None):\n \"\"\"Update the name or description for a volume.\n\n :param volume_id: The updated volume id.\n :param display_name: The volume name.\n :param display_description: The volume description.\n\n :returns: The updated volume.\n \"\"\"\n kwargs = {}\n if display_name is not None:\n kwargs[\"display_name\"] = display_name\n if display_description is not None:\n kwargs[\"display_description\"] = display_description\n updated_volume = self._get_client().volumes.update(\n volume_id, **kwargs)\n return updated_volume[\"volume\"]\n\n @atomic.action_timer(\"cinder_v1.list_volumes\")\n def list_volumes(self, detailed=True, search_opts=None, limit=None):\n \"\"\"List all volumes.\"\"\"\n return self._get_client().volumes.list(detailed=detailed,\n search_opts=search_opts,\n limit=limit)\n\n @atomic.action_timer(\"cinder_v1.list_types\")\n def list_types(self, search_opts=None):\n \"\"\"Lists all volume types.\"\"\"\n return (self._get_client()\n .volume_types.list(search_opts))\n\n @atomic.action_timer(\"cinder_v1.create_snapshot\")\n def create_snapshot(self, volume_id, force=False,\n display_name=None, display_description=None):\n \"\"\"Create one snapshot.\n\n Returns when the snapshot is actually created and is in the \"Available\"\n state.\n\n :param volume_id: volume uuid for creating snapshot\n :param force: flag to indicate whether to snapshot a volume even if\n it's attached to an instance\n :param display_name: Name of the snapshot\n :param display_description: Description of the snapshot\n :returns: Created snapshot object\n \"\"\"\n kwargs = {\"force\": force,\n \"display_name\": display_name or self.generate_random_name(),\n \"display_description\": display_description}\n\n snapshot = self._get_client().volume_snapshots.create(volume_id,\n **kwargs)\n rutils.interruptable_sleep(\n CONF.openstack.cinder_volume_create_prepoll_delay)\n snapshot = self._wait_available_volume(snapshot)\n return snapshot\n\n @atomic.action_timer(\"cinder_v1.create_backup\")\n def create_backup(self, volume_id, container=None,\n name=None, description=None):\n \"\"\"Create a volume backup of the given volume.\n\n :param volume_id: The ID of the volume to backup.\n :param container: The name of the backup service container.\n :param name: The name of the backup.\n :param description: The description of the backup.\n \"\"\"\n kwargs = {\"name\": name or self.generate_random_name(),\n \"description\": description,\n \"container\": container}\n backup = self._get_client().backups.create(volume_id, **kwargs)\n return self._wait_available_volume(backup)\n\n @atomic.action_timer(\"cinder_v1.create_volume_type\")\n def create_volume_type(self, name=None):\n \"\"\"create volume type.\n\n :param kwargs: Optional additional arguments for volume type creation\n :param name: Descriptive name of the volume type\n \"\"\"\n kwargs = {\"name\": name or self.generate_random_name()}\n return self._get_client().volume_types.create(**kwargs)\n\n\[email protected]_layer(CinderV1Service)\nclass UnifiedCinderV1Service(cinder_common.UnifiedCinderMixin,\n block.BlockStorage):\n\n @staticmethod\n def _unify_volume(volume):\n if isinstance(volume, dict):\n return block.Volume(id=volume[\"id\"], name=volume[\"display_name\"],\n size=volume[\"size\"], status=volume[\"status\"])\n else:\n return block.Volume(id=volume.id, name=volume.display_name,\n size=volume.size, status=volume.status)\n\n @staticmethod\n def _unify_snapshot(snapshot):\n return block.VolumeSnapshot(id=snapshot.id, name=snapshot.display_name,\n volume_id=snapshot.volume_id,\n status=snapshot.status)\n\n def create_volume(self, size, consistencygroup_id=None,\n group_id=None, snapshot_id=None, source_volid=None,\n name=None, description=None,\n volume_type=None, user_id=None,\n project_id=None, availability_zone=None,\n metadata=None, imageRef=None, scheduler_hints=None,\n backup_id=None):\n \"\"\"Creates a volume.\n\n :param size: Size of volume in GB\n :param consistencygroup_id: ID of the consistencygroup\n :param group_id: ID of the group\n :param snapshot_id: ID of the snapshot\n :param name: Name of the volume\n :param description: Description of the volume\n :param volume_type: Type of volume\n :param user_id: User id derived from context\n :param project_id: Project id derived from context\n :param availability_zone: Availability Zone to use\n :param metadata: Optional metadata to set on volume creation\n :param imageRef: reference to an image stored in glance\n :param source_volid: ID of source volume to clone from\n :param scheduler_hints: (optional extension) arbitrary key-value pairs\n specified by the client to help boot an instance\n :param backup_id: ID of the backup(IGNORED)\n\n :returns: Return a new volume.\n \"\"\"\n return self._unify_volume(self._impl.create_volume(\n size, snapshot_id=snapshot_id, source_volid=source_volid,\n display_name=name,\n display_description=description,\n volume_type=volume_type, user_id=user_id,\n project_id=project_id, availability_zone=availability_zone,\n metadata=metadata, imageRef=imageRef))\n\n def list_volumes(self, detailed=True, search_opts=None, marker=None,\n limit=None, sort=None):\n \"\"\"Lists all volumes.\n\n :param detailed: Whether to return detailed volume info.\n :param search_opts: Search options to filter out volumes.\n :param marker: Begin returning volumes that appear later in the volume\n list than that represented by this volume id.(IGNORED)\n :param limit: Maximum number of volumes to return.\n :param sort: Sort information(IGNORED)\n :returns: Return volumes list.\n \"\"\"\n return [self._unify_volume(volume)\n for volume in self._impl.list_volumes(detailed=detailed,\n search_opts=search_opts,\n limit=limit)]\n\n def get_volume(self, volume_id):\n \"\"\"Get a volume.\n\n :param volume_id: The ID of the volume to get.\n\n :returns: Return the volume.\n \"\"\"\n return self._unify_volume(self._impl.get_volume(volume_id))\n\n def extend_volume(self, volume, new_size):\n \"\"\"Extend the size of the specified volume.\"\"\"\n return self._unify_volume(\n self._impl.extend_volume(volume, new_size=new_size))\n\n def update_volume(self, volume_id,\n name=None, description=None):\n \"\"\"Update the name or description for a volume.\n\n :param volume_id: The updated volume id.\n :param name: The volume name.\n :param description: The volume description.\n\n :returns: The updated volume.\n \"\"\"\n return self._unify_volume(self._impl.update_volume(\n volume_id, display_name=name,\n display_description=description))\n\n def list_types(self, search_opts=None, is_public=None):\n \"\"\"Lists all volume types.\"\"\"\n return self._impl.list_types(search_opts=search_opts)\n\n def create_snapshot(self, volume_id, force=False,\n name=None, description=None, metadata=None):\n \"\"\"Create one snapshot.\n\n Returns when the snapshot is actually created and is in the \"Available\"\n state.\n\n :param volume_id: volume uuid for creating snapshot\n :param force: If force is True, create a snapshot even if the volume is\n attached to an instance. Default is False.\n :param name: Name of the snapshot\n :param description: Description of the snapshot\n :param metadata: Metadata of the snapshot\n :returns: Created snapshot object\n \"\"\"\n return self._unify_snapshot(self._impl.create_snapshot(\n volume_id, force=force, display_name=name,\n display_description=description))\n\n def list_snapshots(self, detailed=True):\n \"\"\"Get a list of all snapshots.\"\"\"\n return [self._unify_snapshot(snapshot)\n for snapshot in self._impl.list_snapshots(detailed=detailed)]\n\n def create_backup(self, volume_id, container=None,\n name=None, description=None,\n incremental=False, force=False,\n snapshot_id=None):\n \"\"\"Creates a volume backup.\n\n :param volume_id: The ID of the volume to backup.\n :param container: The name of the backup service container.\n :param name: The name of the backup.\n :param description: The description of the backup.\n :param incremental: Incremental backup.\n :param force: If True, allows an in-use volume to be backed up.\n :param snapshot_id: The ID of the snapshot to backup.\n\n :returns: The created backup object.\n \"\"\"\n return self._unify_backup(self._impl.create_backup(\n volume_id, container=container, name=name,\n description=description))\n\n def create_volume_type(self, name=None, description=None, is_public=True):\n \"\"\"Creates a volume type.\n\n :param name: Descriptive name of the volume type\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n :returns: Return the created volume type.\n \"\"\"\n return self._impl.create_volume_type(name=name)\n\n def update_volume_type(self, volume_type, name=None,\n description=None, is_public=None):\n raise NotImplementedError(\"Cinder V1 doesn't support this method.\")\n\n def add_type_access(self, volume_type, project):\n raise NotImplementedError(\"Cinder V1 doesn't support this method.\")\n\n def list_type_access(self, volume_type):\n raise NotImplementedError(\"Cinder V1 doesn't support this method.\")\n\n def restore_backup(self, backup_id, volume_id=None):\n \"\"\"Restore the given backup.\n\n :param backup_id: The ID of the backup to restore.\n :param volume_id: The ID of the volume to restore the backup to.\n\n :returns: Return the restored backup.\n \"\"\"\n return self._unify_volume(self._impl.restore_backup(\n backup_id, volume_id=volume_id))\n" }, { "alpha_fraction": 0.5773788690567017, "alphanum_fraction": 0.5824329257011414, "avg_line_length": 38.84722137451172, "blob_id": "596ed62e1a6ea9f46eec27c3dc7664f1796feacf", "content_id": "e12766b1ab668665399129b58316939c99288070", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5738, "license_type": "permissive", "max_line_length": 79, "num_lines": 144, "path": "/tests/functional/test_task_samples.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# Copyright 2014: Catalyst IT Ltd.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\nimport os\nimport re\nimport traceback\nimport unittest\n\nfrom rally import api\nfrom rally.cli import yamlutils as yaml\nfrom rally.common import broker\nfrom rally import plugins\n\nimport rally_openstack as rally_openstack_module\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\nfrom tests.functional import utils\n\n\nclass TestTaskSamples(unittest.TestCase):\n\n NUMBER_OF_THREADS = 20\n\n def _skip(self, validation_output):\n \"\"\"Help to decide do we want to skip this result or not.\n\n :param validation_output: string representation of the\n error that we want to check\n :return: True if we want to skip this error\n of task sample validation, otherwise False.\n \"\"\"\n\n skip_lst = [\"[Ss]ervice is not available\",\n \"is not installed. To install it run\",\n \"extension.* is not configured\"]\n for check_str in skip_lst:\n if re.search(check_str, validation_output) is not None:\n return True\n return False\n\n @plugins.ensure_plugins_are_loaded\n def test_task_samples_are_valid(self):\n from rally_openstack.task.contexts.keystone import users\n rally = utils.Rally(force_new_db=True)\n # let's use pre-created users to make TestTaskSamples quicker\n rapi = api.API(config_file=rally.config_filename)\n deployment = rapi.deployment._get(\"MAIN\")\n\n openstack_platform = deployment.env_obj.data[\"platforms\"][\"openstack\"]\n admin_creds = credential.OpenStackCredential(\n permission=consts.EndpointPermission.ADMIN,\n **openstack_platform[\"platform_data\"][\"admin\"])\n\n ctx = {\n \"env\": {\n \"platforms\": {\n \"openstack\": {\n \"admin\": admin_creds.to_dict(),\n \"users\": []}}},\n \"task\": {\"uuid\": self.__class__.__name__,\n \"deployment_uuid\": deployment[\"uuid\"]}}\n user_ctx = users.UserGenerator(ctx)\n user_ctx.setup()\n self.addCleanup(user_ctx.cleanup)\n\n os_creds = deployment[\"config\"][\"openstack\"]\n\n user = copy.copy(os_creds[\"admin\"])\n user[\"username\"] = ctx[\"users\"][0][\"credential\"].username\n user[\"password\"] = ctx[\"users\"][0][\"credential\"].password\n if \"project_name\" in os_creds[\"admin\"]:\n # it is Keystone\n user[\"project_name\"] = ctx[\"users\"][0][\"credential\"].tenant_name\n else:\n user[\"tenant_name\"] = ctx[\"users\"][0][\"credential\"].tenant_name\n os_creds[\"users\"] = [user]\n\n rally(\"deployment destroy MAIN\", write_report=False)\n deployment_cfg = os.path.join(rally.tmp_dir, \"new_deployment.json\")\n with open(deployment_cfg, \"w\") as f:\n f.write(json.dumps({\"openstack\": os_creds}))\n rally(\"deployment create --name MAIN --filename %s\" % deployment_cfg,\n write_report=False)\n\n # store all failures and print them at once\n failed_samples = {}\n\n def publisher(queue):\n \"\"\"List all samples and render task configs\"\"\"\n samples_path = os.path.join(\n os.path.dirname(rally_openstack_module.__file__), os.pardir,\n \"samples\", \"tasks\")\n\n for dirname, dirnames, filenames in os.walk(samples_path):\n # NOTE(rvasilets): Skip by suggest of boris-42 because in\n # future we don't what to maintain this dir\n if dirname.find(\"tempest-do-not-run-against-production\") != -1:\n continue\n for filename in filenames:\n full_path = os.path.join(dirname, filename)\n\n # NOTE(hughsaunders): Skip non config files\n # (bug https://bugs.launchpad.net/rally/+bug/1314369)\n if os.path.splitext(filename)[1] != \".json\":\n continue\n with open(full_path) as task_file:\n input_task = task_file.read()\n rendered_task = rapi.task.render_template(\n task_template=input_task)\n queue.append((full_path, rendered_task))\n\n def consumer(_cache, sample):\n \"\"\"Validate one sample\"\"\"\n full_path, rendered_task = sample\n task_config = yaml.safe_load(rendered_task)\n try:\n rapi.task.validate(deployment=\"MAIN\",\n config=task_config)\n except Exception as e:\n if not self._skip(str(e)):\n failed_samples[full_path] = traceback.format_exc()\n\n broker.run(publisher, consumer, self.NUMBER_OF_THREADS)\n\n if failed_samples:\n self.fail(\"Validation failed on the one or several samples. \"\n \"See details below:\\n%s\" %\n \"\".join([\"\\n======\\n%s\\n\\n%s\\n\" % (k, v)\n for k, v in failed_samples.items()]))\n" }, { "alpha_fraction": 0.6641837954521179, "alphanum_fraction": 0.6685745716094971, "avg_line_length": 43.28472137451172, "blob_id": "94e8b769ea3c3b1d667b6a2d9c4bb1a4cfc445e4", "content_id": "a3229f7aba71164000a4c5d981d21235072cb8d3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12754, "license_type": "permissive", "max_line_length": 79, "num_lines": 288, "path": "/rally_openstack/task/scenarios/neutron/loadbalancer_v1.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils\n\n\n\"\"\"Scenarios for Neutron Loadbalancer v1.\"\"\"\n\n\[email protected](\"restricted_parameters\", param_names=\"subnet_id\",\n subdict=\"pool_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_list_pools\",\n platform=\"openstack\")\nclass CreateAndListPools(utils.NeutronScenario):\n\n def run(self, pool_create_args=None):\n \"\"\"Create a pool(v1) and then list pools(v1).\n\n Measure the \"neutron lb-pool-list\" command performance.\n The scenario creates a pool for every subnet and then lists pools.\n\n :param pool_create_args: dict, POST /lb/pools request options\n \"\"\"\n pool_create_args = pool_create_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n self._create_v1_pools(networks, **pool_create_args)\n self._list_v1_pools()\n\n\[email protected](\"restricted_parameters\", param_names=\"subnet_id\",\n subdict=\"pool_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_delete_pools\",\n platform=\"openstack\")\nclass CreateAndDeletePools(utils.NeutronScenario):\n\n def run(self, pool_create_args=None):\n \"\"\"Create pools(v1) and delete pools(v1).\n\n Measure the \"neutron lb-pool-create\" and \"neutron lb-pool-delete\"\n command performance. The scenario creates a pool for every subnet\n and then deletes those pools.\n\n :param pool_create_args: dict, POST /lb/pools request options\n \"\"\"\n pool_create_args = pool_create_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n for pool in pools:\n self._delete_v1_pool(pool[\"pool\"])\n\n\[email protected](\"restricted_parameters\", param_names=\"subnet_id\",\n subdict=\"pool_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_update_pools\",\n platform=\"openstack\")\nclass CreateAndUpdatePools(utils.NeutronScenario):\n\n def run(self, pool_update_args=None, pool_create_args=None):\n \"\"\"Create pools(v1) and update pools(v1).\n\n Measure the \"neutron lb-pool-create\" and \"neutron lb-pool-update\"\n command performance. The scenario creates a pool for every subnet\n and then update those pools.\n\n :param pool_create_args: dict, POST /lb/pools request options\n :param pool_update_args: dict, POST /lb/pools update options\n \"\"\"\n pool_create_args = pool_create_args or {}\n pool_update_args = pool_update_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n for pool in pools:\n self._update_v1_pool(pool, **pool_update_args)\n\n\[email protected](\"restricted_parameters\", param_names=[\"pool_id\", \"subnet_id\"],\n subdict=\"vip_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_list_vips\",\n platform=\"openstack\")\nclass CreateAndListVips(utils.NeutronScenario):\n\n def run(self, pool_create_args=None, vip_create_args=None):\n \"\"\"Create a vip(v1) and then list vips(v1).\n\n Measure the \"neutron lb-vip-create\" and \"neutron lb-vip-list\" command\n performance. The scenario creates a vip for every pool created and\n then lists vips.\n\n :param vip_create_args: dict, POST /lb/vips request options\n :param pool_create_args: dict, POST /lb/pools request options\n \"\"\"\n vip_create_args = vip_create_args or {}\n pool_create_args = pool_create_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n for pool in pools:\n self._create_v1_vip(pool, **vip_create_args)\n self._list_v1_vips()\n\n\[email protected](\"restricted_parameters\", param_names=[\"pool_id\", \"subnet_id\"],\n subdict=\"vip_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_delete_vips\",\n platform=\"openstack\")\nclass CreateAndDeleteVips(utils.NeutronScenario):\n\n def run(self, pool_create_args=None, vip_create_args=None):\n \"\"\"Create a vip(v1) and then delete vips(v1).\n\n Measure the \"neutron lb-vip-create\" and \"neutron lb-vip-delete\"\n command performance. The scenario creates a vip for pool and\n then deletes those vips.\n\n :param pool_create_args: dict, POST /lb/pools request options\n :param vip_create_args: dict, POST /lb/vips request options\n \"\"\"\n vips = []\n pool_create_args = pool_create_args or {}\n vip_create_args = vip_create_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n for pool in pools:\n vips.append(self._create_v1_vip(pool, **vip_create_args))\n for vip in vips:\n self._delete_v1_vip(vip[\"vip\"])\n\n\[email protected](\"restricted_parameters\", param_names=[\"pool_id\", \"subnet_id\"],\n subdict=\"vip_create_args\")\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_update_vips\",\n platform=\"openstack\")\nclass CreateAndUpdateVips(utils.NeutronScenario):\n\n def run(self, pool_create_args=None,\n vip_update_args=None, vip_create_args=None):\n \"\"\"Create vips(v1) and update vips(v1).\n\n Measure the \"neutron lb-vip-create\" and \"neutron lb-vip-update\"\n command performance. The scenario creates a pool for every subnet\n and then update those pools.\n\n :param pool_create_args: dict, POST /lb/pools request options\n :param vip_create_args: dict, POST /lb/vips request options\n :param vip_update_args: dict, POST /lb/vips update options\n \"\"\"\n vips = []\n pool_create_args = pool_create_args or {}\n vip_create_args = vip_create_args or {}\n vip_update_args = vip_update_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n for pool in pools:\n vips.append(self._create_v1_vip(pool, **vip_create_args))\n for vip in vips:\n self._update_v1_vip(vip, **vip_update_args)\n\n\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_list_healthmonitors\",\n platform=\"openstack\")\nclass CreateAndListHealthmonitors(utils.NeutronScenario):\n\n def run(self, healthmonitor_create_args=None):\n \"\"\"Create healthmonitors(v1) and list healthmonitors(v1).\n\n Measure the \"neutron lb-healthmonitor-list\" command performance. This\n scenario creates healthmonitors and lists them.\n\n :param healthmonitor_create_args: dict, POST /lb/healthmonitors request\n options\n \"\"\"\n healthmonitor_create_args = healthmonitor_create_args or {}\n self._create_v1_healthmonitor(**healthmonitor_create_args)\n self._list_v1_healthmonitors()\n\n\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_delete_healthmonitors\",\n platform=\"openstack\")\nclass CreateAndDeleteHealthmonitors(utils.NeutronScenario):\n\n def run(self, healthmonitor_create_args=None):\n \"\"\"Create a healthmonitor(v1) and delete healthmonitors(v1).\n\n Measure the \"neutron lb-healthmonitor-create\" and \"neutron\n lb-healthmonitor-delete\" command performance. The scenario creates\n healthmonitors and deletes those healthmonitors.\n\n :param healthmonitor_create_args: dict, POST /lb/healthmonitors request\n options\n \"\"\"\n healthmonitor_create_args = healthmonitor_create_args or {}\n healthmonitor = self._create_v1_healthmonitor(\n **healthmonitor_create_args)\n self._delete_v1_healthmonitor(healthmonitor[\"health_monitor\"])\n\n\[email protected](\"required_neutron_extensions\", extensions=[\"lbaas\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV1.create_and_update_healthmonitors\",\n platform=\"openstack\")\nclass CreateAndUpdateHealthmonitors(utils.NeutronScenario):\n\n def run(self, healthmonitor_create_args=None,\n healthmonitor_update_args=None):\n \"\"\"Create a healthmonitor(v1) and update healthmonitors(v1).\n\n Measure the \"neutron lb-healthmonitor-create\" and \"neutron\n lb-healthmonitor-update\" command performance. The scenario creates\n healthmonitors and then updates them.\n\n :param healthmonitor_create_args: dict, POST /lb/healthmonitors request\n options\n :param healthmonitor_update_args: dict, POST /lb/healthmonitors update\n options\n \"\"\"\n healthmonitor_create_args = healthmonitor_create_args or {}\n healthmonitor_update_args = healthmonitor_update_args or {\n \"max_retries\": random.choice(range(1, 10))}\n healthmonitor = self._create_v1_healthmonitor(\n **healthmonitor_create_args)\n self._update_v1_healthmonitor(healthmonitor,\n **healthmonitor_update_args)\n" }, { "alpha_fraction": 0.5111581087112427, "alphanum_fraction": 0.5135071873664856, "avg_line_length": 38.7850456237793, "blob_id": "4c99fc111356103f83de9bb871e914b1f9006452", "content_id": "fd31e4be929d8ad425b359591557182b942fe46b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4257, "license_type": "permissive", "max_line_length": 75, "num_lines": 107, "path": "/rally_openstack/task/contexts/watcher/audit_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.watcher import utils as watcher_utils\nfrom rally_openstack.task import types\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"audit_templates\", platform=\"openstack\", order=550)\nclass AuditTemplateGenerator(context.OpenStackContext):\n \"\"\"Creates Watcher audit templates for tenants.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"audit_templates_per_admin\": {\"type\": \"integer\", \"minimum\": 1},\n \"fill_strategy\": {\"enum\": [\"round_robin\", \"random\", None]},\n \"params\": {\n \"type\": \"array\",\n \"minItems\": 1,\n \"uniqueItems\": True,\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"goal\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n },\n \"strategy\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n },\n },\n \"additionalProperties\": False,\n },\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"params\"]\n }\n\n DEFAULT_CONFIG = {\n \"audit_templates_per_admin\": 1,\n \"fill_strategy\": \"round_robin\"\n }\n\n def setup(self):\n watcher_scenario = watcher_utils.WatcherScenario(\n {\"admin\": self.context[\"admin\"], \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"],\n \"config\": {\n \"api_versions\": self.context[\"config\"].get(\n \"api_versions\", [])}\n })\n\n self.context[\"audit_templates\"] = []\n for i in range(self.config[\"audit_templates_per_admin\"]):\n cfg_size = len(self.config[\"params\"])\n if self.config[\"fill_strategy\"] == \"round_robin\":\n audit_params = self.config[\"params\"][i % cfg_size]\n elif self.config[\"fill_strategy\"] == \"random\":\n audit_params = random.choice(self.config[\"params\"])\n\n goal_id = types.WatcherGoal(self.context).pre_process(\n resource_spec=audit_params[\"goal\"], config={})\n strategy_id = types.WatcherStrategy(self.context).pre_process(\n resource_spec=audit_params[\"strategy\"], config={})\n\n audit_template = watcher_scenario._create_audit_template(\n goal_id, strategy_id)\n self.context[\"audit_templates\"].append(audit_template.uuid)\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"watcher.action_plan\",\n \"watcher.audit_template\"],\n admin=self.context.get(\"admin\", []),\n superclass=watcher_utils.WatcherScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6039420962333679, "alphanum_fraction": 0.6045580506324768, "avg_line_length": 41.5836067199707, "blob_id": "4e8a84c6b3825f3d6e8c1b4bb48fe324976ccb33", "content_id": "9cdb3188b93b4c9d0497e1e5e4eb2980495bdcb3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12988, "license_type": "permissive", "max_line_length": 79, "num_lines": 305, "path": "/tests/unit/task/scenarios/cinder/test_volume_types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.cinder import volume_types\nfrom tests.unit import test\n\n\nclass CinderVolumeTypesTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(CinderVolumeTypesTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.storage.block.BlockStorage\")\n self.addCleanup(patch.stop)\n self.mock_cinder = patch.start()\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\"}})\n return context\n\n def test_create_and_get_volume_type(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_types.CreateAndGetVolumeType(self._get_context())\n description = \"rally tests creating types\"\n is_public = False\n scenario.run(description=description, is_public=is_public)\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.get_volume_type.assert_called_once_with(\n mock_service.create_volume_type.return_value)\n\n def test_create_and_delete_volume_type(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_types.CreateAndDeleteVolumeType(self._get_context())\n description = \"rally tests creating types\"\n is_public = False\n scenario.run(description=description, is_public=is_public)\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.delete_volume_type.assert_called_once_with(\n mock_service.create_volume_type.return_value)\n\n def test_create_and_delete_encryption_type(self):\n mock_service = self.mock_cinder.return_value\n context = self._get_context()\n context.update({\n \"volume_types\": [{\"id\": \"fake_id\",\n \"name\": \"fake_name\"}],\n \"iteration\": 1})\n scenario = volume_types.CreateAndDeleteEncryptionType(\n context)\n\n # case: create_specs is None\n specs = {\n \"provider\": \"prov\",\n \"cipher\": \"cip\",\n \"key_size\": \"ks\",\n \"control_location\": \"cl\"\n }\n scenario.run(create_specs=None, provider=\"prov\", cipher=\"cip\",\n key_size=\"ks\", control_location=\"cl\")\n mock_service.create_encryption_type.assert_called_once_with(\n \"fake_id\", specs=specs)\n mock_service.delete_encryption_type.assert_called_once_with(\n \"fake_id\")\n\n # case: create_specs is not None\n scenario.run(create_specs=\"fakecreatespecs\", provider=\"prov\",\n cipher=\"cip\", key_size=\"ks\", control_location=\"cl\")\n mock_service.create_encryption_type.assert_called_with(\n \"fake_id\", specs=\"fakecreatespecs\")\n mock_service.delete_encryption_type.assert_called_with(\n \"fake_id\")\n\n def test_create_get_and_delete_encryption_type(self):\n mock_service = self.mock_cinder.return_value\n context = self._get_context()\n context.update({\n \"volume_types\": [{\"id\": \"fake_id\",\n \"name\": \"fake_name\"}],\n \"iteration\": 1})\n scenario = volume_types.CreateGetAndDeleteEncryptionType(\n context)\n\n specs = {\n \"provider\": \"prov\",\n \"cipher\": \"cip\",\n \"key_size\": \"ks\",\n \"control_location\": \"cl\"\n }\n scenario.run(provider=\"prov\", cipher=\"cip\",\n key_size=\"ks\", control_location=\"cl\")\n mock_service.create_encryption_type.assert_called_once_with(\n \"fake_id\", specs=specs)\n mock_service.get_encryption_type.assert_called_once_with(\n \"fake_id\")\n mock_service.delete_encryption_type.assert_called_once_with(\n \"fake_id\")\n\n def test_create_and_list_volume_types(self):\n mock_service = self.mock_cinder.return_value\n fake_type = mock.Mock()\n pool_list = [mock.Mock(), mock.Mock(), fake_type]\n description = \"rally tests creating types\"\n is_public = False\n\n scenario = volume_types.CreateAndListVolumeTypes(self._get_context())\n mock_service.create_volume_type.return_value = fake_type\n mock_service.list_types.return_value = pool_list\n scenario.run(description=description, is_public=is_public)\n\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.list_types.assert_called_once_with()\n\n def test_create_and_list_volume_types_with_fails(self):\n # Negative case: type isn't listed\n mock_service = self.mock_cinder.return_value\n fake_type = mock.Mock()\n pool_list = [mock.Mock(), mock.Mock(), mock.Mock()]\n description = \"rally tests creating types\"\n is_public = False\n\n scenario = volume_types.CreateAndListVolumeTypes(self._get_context())\n mock_service.create_volume_type.return_value = fake_type\n mock_service.list_types.return_value = pool_list\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n description=description, is_public=is_public)\n\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.list_types.assert_called_once_with()\n\n def test_create_and_update_volume_type(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_types.CreateAndUpdateVolumeType(self._get_context())\n fake_type = mock.MagicMock()\n fake_type.name = \"any\"\n create_description = \"test create\"\n update_description = \"test update\"\n mock_service.create_volume_type.return_value = fake_type\n scenario.run(description=create_description,\n update_description=update_description)\n\n mock_service.create_volume_type.assert_called_once_with(\n description=create_description,\n is_public=True)\n mock_service.update_volume_type.assert_called_once_with(\n fake_type,\n description=update_description,\n # update_is_public and update_name are not specified, so should\n # not be used\n is_public=None, name=None\n )\n\n def test_create_volume_type_and_encryption_type(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_types.CreateVolumeTypeAndEncryptionType(\n self._get_context())\n description = \"rally tests creating types\"\n is_public = False\n # case: create_specs is None\n specs = {\n \"provider\": \"prov\",\n \"cipher\": \"cip\",\n \"key_size\": \"ks\",\n \"control_location\": \"cl\"\n }\n scenario.run(create_specs=None, provider=\"prov\", cipher=\"cip\",\n key_size=\"ks\", control_location=\"cl\",\n description=description, is_public=is_public)\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.create_encryption_type.assert_called_once_with(\n mock_service.create_volume_type.return_value, specs=specs)\n\n # case: create_specs is not None\n scenario.run(create_specs=\"fakecreatespecs\", provider=\"prov\",\n cipher=\"cip\", key_size=\"ks\", control_location=\"cl\",\n description=description, is_public=is_public)\n mock_service.create_volume_type.assert_called_with(\n description=description, is_public=is_public)\n mock_service.create_encryption_type.assert_called_with(\n mock_service.create_volume_type.return_value,\n specs=\"fakecreatespecs\")\n\n def test_create_and_list_encryption_type(self):\n mock_service = self.mock_cinder.return_value\n context = self._get_context()\n context.update({\n \"volume_types\": [{\"id\": \"fake_id\",\n \"name\": \"fake_name\"}],\n \"iteration\": 1})\n scenario = volume_types.CreateAndListEncryptionType(\n context)\n\n # case: create_specs is None\n specs = {\n \"provider\": \"prov\",\n \"cipher\": \"cip\",\n \"key_size\": \"ks\",\n \"control_location\": \"cl\"\n }\n scenario.run(create_specs=None, provider=\"prov\", cipher=\"cip\",\n key_size=\"ks\", control_location=\"cl\",\n search_opts=\"fakeopts\")\n mock_service.create_encryption_type.assert_called_once_with(\n \"fake_id\", specs=specs)\n mock_service.list_encryption_type.assert_called_once_with(\n \"fakeopts\")\n\n # case: create_specs is not None\n scenario.run(create_specs=\"fakecreatespecs\", provider=\"prov\",\n cipher=\"cip\", key_size=\"ks\", control_location=\"cl\",\n search_opts=\"fakeopts\")\n mock_service.create_encryption_type.assert_called_with(\n \"fake_id\", specs=\"fakecreatespecs\")\n mock_service.list_encryption_type.assert_called_with(\n \"fakeopts\")\n\n def test_create_and_set_volume_type_keys(self):\n mock_service = self.mock_cinder.return_value\n volume_type_key = {\"volume_backend_name\": \"LVM_iSCSI\"}\n description = \"rally tests creating types\"\n is_public = False\n scenario = volume_types.CreateAndSetVolumeTypeKeys(\n self._get_context())\n scenario.run(volume_type_key, description=description,\n is_public=is_public)\n\n mock_service.create_volume_type.assert_called_once_with(\n description=description, is_public=is_public)\n mock_service.set_volume_type_keys.assert_called_once_with(\n mock_service.create_volume_type.return_value,\n metadata=volume_type_key)\n\n def test_create_and_update_encryption_type(self):\n mock_service = self.mock_cinder.return_value\n context = self._get_context()\n context.update({\n \"volume_types\": [{\"id\": \"fake_id\",\n \"name\": \"fake_name\"}],\n \"iteration\": 1})\n scenario = volume_types.CreateAndUpdateEncryptionType(\n context)\n\n create_specs = {\n \"provider\": \"create_prov\",\n \"cipher\": \"create_cip\",\n \"key_size\": \"create_ks\",\n \"control_location\": \"create_cl\"\n }\n update_specs = {\n \"provider\": \"update_prov\",\n \"cipher\": \"update_cip\",\n \"key_size\": \"update_ks\",\n \"control_location\": \"update_cl\"\n }\n scenario.run(create_provider=\"create_prov\", create_cipher=\"create_cip\",\n create_key_size=\"create_ks\",\n create_control_location=\"create_cl\",\n update_provider=\"update_prov\", update_cipher=\"update_cip\",\n update_key_size=\"update_ks\",\n update_control_location=\"update_cl\")\n mock_service.create_encryption_type.assert_called_once_with(\n \"fake_id\", specs=create_specs)\n mock_service.update_encryption_type.assert_called_once_with(\n \"fake_id\", specs=update_specs)\n\n def test_create_volume_type_add_and_list_type_access(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_types.CreateVolumeTypeAddAndListTypeAccess(\n self._get_context())\n fake_type = mock.Mock()\n mock_service.create_volume_type.return_value = fake_type\n\n scenario.run(description=None, is_public=False)\n mock_service.create_volume_type.assert_called_once_with(\n description=None, is_public=False)\n mock_service.add_type_access.assert_called_once_with(\n fake_type, project=\"fake\")\n mock_service.list_type_access.assert_called_once_with(fake_type)\n" }, { "alpha_fraction": 0.5681905746459961, "alphanum_fraction": 0.5829811692237854, "avg_line_length": 37.56296157836914, "blob_id": "eec888ef8b7a71f2e583cb5903f06da59fbb4570", "content_id": "5af4744756de7f85fe875acfdd37d344dfca6855", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5206, "license_type": "permissive", "max_line_length": 78, "num_lines": 135, "path": "/tests/unit/task/scenarios/cinder/test_qos_specs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.cinder import qos_specs\nfrom tests.unit import test\n\n\nclass CinderQosTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(CinderQosTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.storage.block.BlockStorage\")\n self.addCleanup(patch.stop)\n self.mock_cinder = patch.start()\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\"}})\n return context\n\n def test_create_and_list_qos(self):\n mock_service = self.mock_cinder.return_value\n qos = mock.MagicMock()\n list_qos = [mock.MagicMock(),\n mock.MagicMock(),\n qos]\n\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n scenario = qos_specs.CreateAndListQos(self._get_context())\n mock_service.create_qos.return_value = qos\n mock_service.list_qos.return_value = list_qos\n\n scenario.run(\"both\", \"10\", \"1000\")\n mock_service.create_qos.assert_called_once_with(specs)\n mock_service.list_qos.assert_called_once_with()\n\n def test_create_and_list_qos_with_fails(self):\n mock_service = self.mock_cinder.return_value\n qos = mock.MagicMock()\n list_qos = [mock.MagicMock(),\n mock.MagicMock(),\n mock.MagicMock()]\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n scenario = qos_specs.CreateAndListQos(self._get_context())\n mock_service.create_qos.return_value = qos\n mock_service.list_qos.return_value = list_qos\n\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run, \"both\", \"10\", \"1000\")\n mock_service.create_qos.assert_called_once_with(specs)\n mock_service.list_qos.assert_called_once_with()\n\n def test_create_and_get_qos(self):\n mock_service = self.mock_cinder.return_value\n qos = mock.MagicMock()\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n scenario = qos_specs.CreateAndGetQos(self._get_context())\n mock_service.create_qos.return_value = qos\n\n scenario.run(\"both\", \"10\", \"1000\")\n mock_service.create_qos.assert_called_once_with(specs)\n mock_service.get_qos.assert_called_once_with(qos.id)\n\n def test_create_and_set_qos(self):\n mock_service = self.mock_cinder.return_value\n qos = mock.MagicMock()\n create_specs_args = {\"consumer\": \"back-end\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n set_specs_args = {\"consumer\": \"both\",\n \"write_iops_sec\": \"11\",\n \"read_iops_sec\": \"1001\"}\n scenario = qos_specs.CreateAndSetQos(self._get_context())\n mock_service.create_qos.return_value = qos\n\n scenario.run(\"back-end\", \"10\", \"1000\",\n \"both\", \"11\", \"1001\")\n mock_service.create_qos.assert_called_once_with(create_specs_args)\n mock_service.set_qos.assert_called_once_with(\n qos=qos, set_specs_args=set_specs_args)\n\n def test_create_qos_associate_and_disassociate_type(self):\n mock_service = self.mock_cinder.return_value\n context = self._get_context()\n context.update({\n \"volume_types\": [{\"id\": \"fake_id\",\n \"name\": \"fake_name\"}],\n \"iteration\": 1})\n\n qos = mock.MagicMock()\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n\n scenario = qos_specs.CreateQosAssociateAndDisassociateType(context)\n mock_service.create_qos.return_value = qos\n\n scenario.run(\"both\", \"10\", \"1000\")\n mock_service.create_qos.assert_called_once_with(specs)\n mock_service.qos_associate_type.assert_called_once_with(\n qos_specs=qos, volume_type=\"fake_id\")\n mock_service.qos_disassociate_type.assert_called_once_with(\n qos_specs=qos, volume_type=\"fake_id\")\n" }, { "alpha_fraction": 0.5992729663848877, "alphanum_fraction": 0.6023311018943787, "avg_line_length": 45.215999603271484, "blob_id": "6066ecba14717922f7bf9385ed7815691ef9af7c", "content_id": "7a6dcdf61caae603a0b6ec1ed2f75284ec171ac1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17331, "license_type": "permissive", "max_line_length": 79, "num_lines": 375, "path": "/tests/unit/task/scenarios/neutron/test_security_groups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport copy\nimport ddt\n\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.neutron import security_groups\nfrom tests.unit import test\n\n\[email protected]\nclass NeutronSecurityGroup(test.TestCase):\n\n @ddt.data(\n {},\n {\"security_group_create_args\": {}},\n {\"security_group_create_args\": {\"description\": \"fake-description\"}},\n )\n @ddt.unpack\n def test_create_and_list_security_groups(\n self, security_group_create_args=None):\n scenario = security_groups.CreateAndListSecurityGroups()\n\n security_group_data = security_group_create_args or {}\n scenario._create_security_group = mock.Mock()\n scenario._list_security_groups = mock.Mock()\n scenario.run(security_group_create_args=security_group_create_args)\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._list_security_groups.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"security_group_create_args\": {}},\n {\"security_group_create_args\": {\"description\": \"fake-description\"}},\n )\n @ddt.unpack\n def test_create_and_show_security_group(\n self, security_group_create_args=None):\n scenario = security_groups.CreateAndShowSecurityGroup()\n security_group = mock.Mock()\n security_group_data = security_group_create_args or {}\n scenario._create_security_group = mock.Mock()\n scenario._show_security_group = mock.Mock()\n\n # Positive case\n scenario._create_security_group.return_value = security_group\n scenario.run(security_group_create_args=security_group_create_args)\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._show_security_group.assert_called_once_with(\n scenario._create_security_group.return_value)\n\n @ddt.data(\n {},\n {\"security_group_create_args\": {}},\n {\"security_group_create_args\": {\"description\": \"fake-description\"}},\n )\n @ddt.unpack\n def test_create_and_show_security_group_with_none_group(\n self, security_group_create_args=None):\n scenario = security_groups.CreateAndShowSecurityGroup()\n security_group_data = security_group_create_args or {}\n scenario._create_security_group = mock.Mock()\n scenario._show_security_group = mock.Mock()\n\n # Negative case: security_group isn't created\n scenario._create_security_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run, security_group_create_args)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n\n @ddt.data(\n {},\n {\"security_group_create_args\": {}},\n {\"security_group_create_args\": {\"description\": \"fake-description\"}},\n )\n @ddt.unpack\n def test_create_and_delete_security_groups(\n self, security_group_create_args=None):\n scenario = security_groups.CreateAndDeleteSecurityGroups()\n security_group_data = security_group_create_args or {}\n scenario._create_security_group = mock.Mock()\n scenario._delete_security_group = mock.Mock()\n scenario.run(security_group_create_args=security_group_create_args)\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._delete_security_group.assert_called_once_with(\n scenario._create_security_group.return_value)\n\n @ddt.data(\n {},\n {\"security_group_create_args\": {}},\n {\"security_group_create_args\": {\"description\": \"fake-description\"}},\n {\"security_group_update_args\": {}},\n {\"security_group_update_args\": {\"description\": \"fake-updated-descr\"}},\n )\n @ddt.unpack\n def test_create_and_update_security_groups(\n self, security_group_create_args=None,\n security_group_update_args=None):\n scenario = security_groups.CreateAndUpdateSecurityGroups()\n security_group_data = security_group_create_args or {}\n security_group_update_data = security_group_update_args or {}\n scenario._create_security_group = mock.Mock()\n scenario._update_security_group = mock.Mock()\n scenario.run(security_group_create_args=security_group_create_args,\n security_group_update_args=security_group_update_args)\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._update_security_group.assert_called_once_with(\n scenario._create_security_group.return_value,\n **security_group_update_data)\n\n @ddt.data(\n {},\n {\"security_group_args\": {}},\n {\"security_group_args\": {\"description\": \"fake-description\"}},\n {\"security_group_rule_args\": {}},\n {\"security_group_rule_args\": {\"description\": \"fake-rule-descr\"}},\n {\"security_group_rules_count\": 2},\n )\n @ddt.unpack\n def test_create_and_list_security_group_rules(\n self, security_group_rules_count=1,\n security_group_args=None,\n security_group_rule_args=None):\n scenario = security_groups.CreateAndListSecurityGroupRules()\n security_group_data = security_group_args or {}\n security_group_rule_data = security_group_rule_args or {}\n\n security_group = mock.MagicMock()\n scenario._create_security_group = mock.MagicMock()\n scenario._create_security_group_rule = mock.MagicMock()\n scenario._list_security_group_rules = mock.MagicMock()\n\n # Positive case\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.side_effect = [\n {\"security_group_rule\": {\"id\": 1, \"name\": \"f1\",\n \"port_range_min\": 1,\n \"port_range_max\": 1}},\n {\"security_group_rule\": {\"id\": 2, \"name\": \"f2\",\n \"port_range_min\": 2,\n \"port_range_max\": 2}}]\n scenario._list_security_group_rules.return_value = {\n \"security_group_rules\": [{\"id\": 1, \"name\": \"f1\",\n \"port_range_min\": 1,\n \"port_range_max\": 1},\n {\"id\": 2, \"name\": \"f2\",\n \"port_range_min\": 2,\n \"port_range_max\": 2},\n {\"id\": 3, \"name\": \"f3\",\n \"port_range_min\": 3,\n \"port_range_max\": 3}]}\n scenario.run(security_group_rules_count,\n security_group_args=security_group_data,\n security_group_rule_args=security_group_rule_data)\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n calls = []\n for i in range(security_group_rules_count):\n security_group_rule_data[\"port_range_min\"] = i + 1\n security_group_rule_data[\"port_range_max\"] = i + 1\n calls.append(mock.call(security_group[\"security_group\"][\"id\"],\n **security_group_rule_data))\n scenario._create_security_group_rule.assert_has_calls(calls)\n scenario._list_security_group_rules.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"security_group_args\": {}},\n {\"security_group_args\": {\"description\": \"fake-description\"}},\n {\"security_group_rule_args\": {}},\n {\"security_group_rule_args\": {\"description\": \"fake-rule-descr\"}},\n {\"security_group_rules_count\": 2},\n )\n @ddt.unpack\n def test_create_and_list_security_group_rules_with_fails(\n self, security_group_rules_count=1,\n security_group_args=None, security_group_rule_args=None):\n scenario = security_groups.CreateAndListSecurityGroupRules()\n security_group_data = security_group_args or {}\n security_group_rule_data = security_group_rule_args or {}\n rule_expected = copy.deepcopy(security_group_rule_data)\n\n security_group = mock.MagicMock()\n security_group_rule = {\"security_group_rule\": {\"id\": 1, \"name\": \"f1\"}}\n scenario._create_security_group = mock.MagicMock()\n scenario._create_security_group_rule = mock.MagicMock()\n\n scenario._list_security_group_rules = mock.MagicMock()\n scenario._create_security_group_rule.return_value = security_group_rule\n scenario._list_security_group_rules.return_value = {\n \"security_group_rules\": [{\"id\": 1, \"name\": \"f1\"},\n {\"id\": 2, \"name\": \"f2\"},\n {\"id\": 3, \"name\": \"f3\"}]}\n\n # Negative case1: security_group isn't created\n scenario._create_security_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n security_group_rules_count,\n security_group_data,\n security_group_rule_data)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n\n # Negative case2: security_group_rule isn't created\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n security_group_rules_count,\n security_group_data,\n security_group_rule_data)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n rule_expected[\"port_range_min\"] = 1\n rule_expected[\"port_range_max\"] = 1\n scenario._create_security_group_rule.assert_called_with(\n security_group[\"security_group\"][\"id\"],\n **rule_expected)\n\n # Negative case3: security_group_rule isn't listed\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.reset_mock()\n scenario._create_security_group_rule.return_value = mock.MagicMock()\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n security_group_rules_count,\n security_group_data,\n security_group_rule_data)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n calls = []\n for i in range(security_group_rules_count):\n rule_expected[\"port_range_min\"] = i + 1\n rule_expected[\"port_range_max\"] = i + 1\n calls.append(mock.call(security_group[\"security_group\"][\"id\"],\n **rule_expected))\n scenario._create_security_group_rule.assert_has_calls(calls,\n any_order=True)\n scenario._list_security_group_rules.assert_called_with()\n\n @ddt.data(\n {},\n {\"security_group_args\": {}},\n {\"security_group_args\": {\"description\": \"fake-description\"}},\n {\"security_group_rule_args\": {}},\n {\"security_group_rule_args\": {\"description\": \"fake-rule-descr\"}}\n )\n @ddt.unpack\n def test_create_and_show_security_group_rule(\n self, security_group_args=None,\n security_group_rule_args=None):\n scenario = security_groups.CreateAndShowSecurityGroupRule()\n\n security_group_data = security_group_args or {}\n security_group_rule_data = security_group_rule_args or {}\n security_group = mock.MagicMock()\n security_group_rule = {\"security_group_rule\": {\"id\": 1, \"name\": \"f1\"}}\n scenario._create_security_group = mock.MagicMock()\n scenario._create_security_group_rule = mock.MagicMock()\n scenario._show_security_group_rule = mock.MagicMock()\n\n # Positive case\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.return_value = security_group_rule\n scenario.run(security_group_args=security_group_data,\n security_group_rule_args=security_group_rule_data)\n\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._create_security_group_rule.assert_called_once_with(\n security_group[\"security_group\"][\"id\"],\n **security_group_rule_data)\n scenario._show_security_group_rule.assert_called_once_with(\n security_group_rule[\"security_group_rule\"][\"id\"])\n\n @ddt.data(\n {},\n {\"security_group_args\": {}},\n {\"security_group_args\": {\"description\": \"fake-description\"}},\n {\"security_group_rule_args\": {}},\n {\"security_group_rule_args\": {\"description\": \"fake-rule-descr\"}}\n )\n @ddt.unpack\n def test_create_and_delete_security_group_rule(\n self, security_group_args=None,\n security_group_rule_args=None):\n scenario = security_groups.CreateAndDeleteSecurityGroupRule()\n\n security_group_data = security_group_args or {}\n security_group_rule_data = security_group_rule_args or {}\n security_group = mock.MagicMock()\n security_group_rule = {\"security_group_rule\": {\"id\": 1, \"name\": \"f1\"}}\n scenario._create_security_group = mock.MagicMock()\n scenario._create_security_group_rule = mock.MagicMock()\n scenario._delete_security_group_rule = mock.MagicMock()\n scenario._delete_security_group = mock.MagicMock()\n\n # Positive case\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.return_value = security_group_rule\n scenario.run(security_group_args=security_group_data,\n security_group_rule_args=security_group_rule_data)\n\n scenario._create_security_group.assert_called_once_with(\n **security_group_data)\n scenario._create_security_group_rule.assert_called_once_with(\n security_group[\"security_group\"][\"id\"],\n **security_group_rule_data)\n scenario._delete_security_group_rule.assert_called_once_with(\n security_group_rule[\"security_group_rule\"][\"id\"])\n scenario._delete_security_group.assert_called_once_with(\n security_group)\n\n @ddt.data(\n {},\n {\"security_group_args\": {}},\n {\"security_group_args\": {\"description\": \"fake-description\"}},\n {\"security_group_rule_args\": {}},\n {\"security_group_rule_args\": {\"description\": \"fake-rule-descr\"}},\n )\n @ddt.unpack\n def test_create_and_show_security_group_rule_with_fails(\n self, security_group_args=None,\n security_group_rule_args=None):\n scenario = security_groups.CreateAndShowSecurityGroupRule()\n\n security_group_data = security_group_args or {}\n security_group_rule_data = security_group_rule_args or {}\n\n security_group = mock.MagicMock()\n security_group_rule = {\"security_group_rule\": {\"id\": 1, \"name\": \"f1\"}}\n scenario._create_security_group = mock.MagicMock()\n scenario._create_security_group_rule = mock.MagicMock()\n scenario._show_security_group_rule = mock.MagicMock()\n scenario._create_security_group_rule.return_value = security_group_rule\n\n # Negative case1: security_group isn't created\n scenario._create_security_group.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n security_group_data,\n security_group_rule_data)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n\n # Negative case2: security_group_rule isn't created\n scenario._create_security_group.return_value = security_group\n scenario._create_security_group_rule.return_value = None\n self.assertRaises(rally_exceptions.RallyAssertionError,\n scenario.run,\n security_group_data,\n security_group_rule_data)\n scenario._create_security_group.assert_called_with(\n **security_group_data)\n scenario._create_security_group_rule.assert_called_with(\n security_group[\"security_group\"][\"id\"],\n **security_group_rule_data)\n" }, { "alpha_fraction": 0.49270960688591003, "alphanum_fraction": 0.4969623386859894, "avg_line_length": 31.594058990478516, "blob_id": "a260545d5fa5153ced83ac3ad7b65a1e0c8c86e5", "content_id": "9025ba785d3a3b2a31b334d188cef04bedc048cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3292, "license_type": "permissive", "max_line_length": 77, "num_lines": 101, "path": "/rally_openstack/task/contexts/monasca/metrics.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import utils as rutils\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.monasca import utils as monasca_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"monasca_metrics\", platform=\"openstack\", order=510)\nclass MonascaMetricGenerator(context.OpenStackContext):\n \"\"\"Creates Monasca Metrics.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"dimensions\": {\n \"type\": \"object\",\n \"properties\": {\n \"region\": {\n \"type\": \"string\"\n },\n \"service\": {\n \"type\": \"string\"\n },\n \"hostname\": {\n \"type\": \"string\"\n },\n \"url\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n },\n \"metrics_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"value_meta\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value_meta_key\": {\n \"type\": \"string\"\n },\n \"value_meta_value\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False\n }\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"metrics_per_tenant\": 2\n }\n\n def setup(self):\n new_metric = {}\n\n if \"dimensions\" in self.config:\n new_metric = {\n \"dimensions\": self.config[\"dimensions\"]\n }\n\n for user, tenant_id in self._iterate_per_tenants():\n scenario = monasca_utils.MonascaScenario(\n context={\"user\": user, \"task\": self.context[\"task\"]}\n )\n for i in range(self.config[\"metrics_per_tenant\"]):\n scenario._create_metrics(**new_metric)\n rutils.interruptable_sleep(0.001)\n rutils.interruptable_sleep(\n monasca_utils.CONF.openstack.monasca_metric_create_prepoll_delay,\n atomic_delay=1)\n\n def cleanup(self):\n # We don't have API for removal of metrics\n pass\n" }, { "alpha_fraction": 0.5461409687995911, "alphanum_fraction": 0.548378050327301, "avg_line_length": 37.869564056396484, "blob_id": "15f4ada4a0c688817b74e0d2dd5ce45bf7becad3", "content_id": "4e52571ce0fa323c868cd5bd2547a45e7b2d6b92", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3576, "license_type": "permissive", "max_line_length": 77, "num_lines": 92, "path": "/tests/unit/task/contexts/watcher/test_audit_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.watcher import audit_templates\nfrom rally_openstack.task.scenarios.watcher import utils as watcher_utils\nfrom tests.unit import test\n\n\nCTX = \"rally_openstack.task.contexts.watcher\"\nSCN = \"rally_openstack.task.scenarios.watcher\"\nTYP = \"rally_openstack.task.types\"\n\n\nclass AuditTemplateTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.utils.WatcherScenario._create_audit_template\" % SCN,\n return_value=mock.MagicMock())\n @mock.patch(\"%s.WatcherStrategy\" % TYP,)\n @mock.patch(\"%s.WatcherGoal\" % TYP)\n def test_setup(self, mock_watcher_goal, mock_watcher_strategy,\n mock_watcher_scenario__create_audit_template):\n\n users = [{\"id\": 1, \"tenant_id\": 1, \"credential\": mock.MagicMock()}]\n self.context.update({\n \"config\": {\n \"audit_templates\": {\n \"audit_templates_per_admin\": 1,\n \"fill_strategy\": \"random\",\n \"params\": [\n {\n \"goal\": {\n \"name\": \"workload_balancing\"\n },\n \"strategy\": {\n \"name\": \"workload_stabilization\"\n }\n },\n {\n \"goal\": {\n \"name\": \"workload_balancing\"\n },\n \"strategy\": {\n \"name\": \"workload_stabilization\"\n }\n }\n ]\n },\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users\n })\n audit_template = audit_templates.AuditTemplateGenerator(self.context)\n audit_template.setup()\n goal_id = mock_watcher_goal.return_value.pre_process.return_value\n strategy_id = (\n mock_watcher_strategy.return_value.pre_process.return_value)\n mock_calls = [mock.call(goal_id, strategy_id)]\n mock_watcher_scenario__create_audit_template.assert_has_calls(\n mock_calls)\n\n @mock.patch(\"%s.audit_templates.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n audit_templates_mocks = [mock.Mock() for i in range(2)]\n self.context.update({\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"audit_templates\": audit_templates_mocks\n })\n audit_templates_ctx = audit_templates.AuditTemplateGenerator(\n self.context)\n audit_templates_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"watcher.action_plan\", \"watcher.audit_template\"],\n admin=self.context[\"admin\"],\n superclass=watcher_utils.WatcherScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.594601571559906, "alphanum_fraction": 0.5959725975990295, "avg_line_length": 49.085838317871094, "blob_id": "59748ccb181726c581f2dcdeb8cbd6d4f49d42b5", "content_id": "5f5f8365f6f73dca223a8823985c1c371663656a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11670, "license_type": "permissive", "max_line_length": 79, "num_lines": 233, "path": "/rally_openstack/task/scenarios/sahara/clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\"\"\"Scenarios for Sahara clusters.\"\"\"\n\n\[email protected](flavor={\"type\": \"nova_flavor\"},\n master_flavor={\"type\": \"nova_flavor\"},\n worker_flavor={\"type\": \"nova_flavor\"},\n neutron_net={\"type\": \"neutron_network\"},\n floating_ip_pool={\"type\": \"neutron_network\"})\[email protected](\"flavor_exists\", param_name=\"master_flavor\")\[email protected](\"flavor_exists\", param_name=\"worker_flavor\")\[email protected](\"required_contexts\", contexts=[\"users\", \"sahara_image\"])\[email protected](\"number\", param_name=\"workers_count\", minval=1,\n integer_only=True)\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaClusters.create_and_delete_cluster\",\n platform=\"openstack\")\nclass CreateAndDeleteCluster(utils.SaharaScenario):\n\n def run(self, workers_count, plugin_name, hadoop_version,\n master_flavor=None, worker_flavor=None, flavor=None,\n floating_ip_pool=None, volumes_per_node=None,\n volumes_size=None, auto_security_group=None,\n security_groups=None, node_configs=None,\n cluster_configs=None, enable_anti_affinity=False,\n enable_proxy=False, use_autoconfig=True):\n \"\"\"Launch and delete a Sahara Cluster.\n\n This scenario launches a Hadoop cluster, waits until it becomes\n 'Active' and deletes it.\n\n :param flavor: Nova flavor that will be for nodes in the\n created node groups. Deprecated.\n :param master_flavor: Nova flavor that will be used for the master\n instance of the cluster\n :param worker_flavor: Nova flavor that will be used for the workers of\n the cluster\n :param workers_count: number of worker instances in a cluster\n :param plugin_name: name of a provisioning plugin\n :param hadoop_version: version of Hadoop distribution supported by\n the specified plugin.\n :param floating_ip_pool: floating ip pool name from which Floating\n IPs will be allocated. Sahara will determine\n automatically how to treat this depending on\n its own configurations. Defaults to None\n because in some cases Sahara may work w/o\n Floating IPs.\n :param volumes_per_node: number of Cinder volumes that will be\n attached to every cluster node\n :param volumes_size: size of each Cinder volume in GB\n :param auto_security_group: boolean value. If set to True Sahara will\n create a Security Group for each Node Group\n in the Cluster automatically.\n :param security_groups: list of security groups that will be used\n while creating VMs. If auto_security_group\n is set to True, this list can be left empty.\n :param node_configs: config dict that will be passed to each Node\n Group\n :param cluster_configs: config dict that will be passed to the\n Cluster\n :param enable_anti_affinity: If set to true the vms will be scheduled\n one per compute node.\n :param enable_proxy: Use Master Node of a Cluster as a Proxy node and\n do not assign floating ips to workers.\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n \"\"\"\n\n image_id = self.context[\"tenant\"][\"sahara\"][\"image\"]\n\n LOG.debug(\"Using Image: %s\" % image_id)\n\n cluster = self._launch_cluster(\n flavor_id=flavor,\n master_flavor_id=master_flavor,\n worker_flavor_id=worker_flavor,\n image_id=image_id,\n workers_count=workers_count,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n floating_ip_pool=floating_ip_pool,\n volumes_per_node=volumes_per_node,\n volumes_size=volumes_size,\n auto_security_group=auto_security_group,\n security_groups=security_groups,\n node_configs=node_configs,\n cluster_configs=cluster_configs,\n enable_anti_affinity=enable_anti_affinity,\n enable_proxy=enable_proxy,\n use_autoconfig=use_autoconfig)\n\n self._delete_cluster(cluster)\n\n\[email protected](flavor={\"type\": \"nova_flavor\"},\n master_flavor={\"type\": \"nova_flavor\"},\n worker_flavor={\"type\": \"nova_flavor\"})\[email protected](\"flavor_exists\", param_name=\"master_flavor\")\[email protected](\"flavor_exists\", param_name=\"worker_flavor\")\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_contexts\", contexts=[\"users\", \"sahara_image\"])\[email protected](\"number\", param_name=\"workers_count\", minval=1,\n integer_only=True)\[email protected](context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaClusters.create_scale_delete_cluster\",\n platform=\"openstack\")\nclass CreateScaleDeleteCluster(utils.SaharaScenario):\n\n def run(self, master_flavor, worker_flavor, workers_count,\n plugin_name, hadoop_version, deltas, flavor=None,\n floating_ip_pool=None, volumes_per_node=None,\n volumes_size=None, auto_security_group=None,\n security_groups=None, node_configs=None,\n cluster_configs=None, enable_anti_affinity=False,\n enable_proxy=False, use_autoconfig=True):\n \"\"\"Launch, scale and delete a Sahara Cluster.\n\n This scenario launches a Hadoop cluster, waits until it becomes\n 'Active'. Then a series of scale operations is applied. The scaling\n happens according to numbers listed in :param deltas. Ex. if\n deltas is set to [2, -2] it means that the first scaling operation will\n add 2 worker nodes to the cluster and the second will remove two.\n\n :param flavor: Nova flavor that will be for nodes in the\n created node groups. Deprecated.\n :param master_flavor: Nova flavor that will be used for the master\n instance of the cluster\n :param worker_flavor: Nova flavor that will be used for the workers of\n the cluster\n :param workers_count: number of worker instances in a cluster\n :param plugin_name: name of a provisioning plugin\n :param hadoop_version: version of Hadoop distribution supported by\n the specified plugin.\n :param deltas: list of integers which will be used to add or\n remove worker nodes from the cluster\n :param floating_ip_pool: floating ip pool name from which Floating\n IPs will be allocated. Sahara will determine\n automatically how to treat this depending on\n its own configurations. Defaults to None\n because in some cases Sahara may work w/o\n Floating IPs.\n :param neutron_net_id: id of a Neutron network that will be used\n for fixed IPs. This parameter is ignored when\n Nova Network is set up.\n :param volumes_per_node: number of Cinder volumes that will be\n attached to every cluster node\n :param volumes_size: size of each Cinder volume in GB\n :param auto_security_group: boolean value. If set to True Sahara will\n create a Security Group for each Node Group\n in the Cluster automatically.\n :param security_groups: list of security groups that will be used\n while creating VMs. If auto_security_group\n is set to True this list can be left empty.\n :param node_configs: configs dict that will be passed to each Node\n Group\n :param cluster_configs: configs dict that will be passed to the\n Cluster\n :param enable_anti_affinity: If set to true the vms will be scheduled\n one per compute node.\n :param enable_proxy: Use Master Node of a Cluster as a Proxy node and\n do not assign floating ips to workers.\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n \"\"\"\n\n image_id = self.context[\"tenant\"][\"sahara\"][\"image\"]\n\n LOG.debug(\"Using Image: %s\" % image_id)\n\n cluster = self._launch_cluster(\n flavor_id=flavor,\n master_flavor_id=master_flavor,\n worker_flavor_id=worker_flavor,\n image_id=image_id,\n workers_count=workers_count,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n floating_ip_pool=floating_ip_pool,\n volumes_per_node=volumes_per_node,\n volumes_size=volumes_size,\n auto_security_group=auto_security_group,\n security_groups=security_groups,\n node_configs=node_configs,\n cluster_configs=cluster_configs,\n enable_anti_affinity=enable_anti_affinity,\n enable_proxy=enable_proxy,\n use_autoconfig=use_autoconfig)\n\n for delta in deltas:\n # The Cluster is fetched every time so that its node groups have\n # correct 'count' values.\n cluster = self.clients(\"sahara\").clusters.get(cluster.id)\n\n if delta == 0:\n # Zero scaling makes no sense.\n continue\n elif delta > 0:\n self._scale_cluster_up(cluster, delta)\n elif delta < 0:\n self._scale_cluster_down(cluster, delta)\n\n self._delete_cluster(cluster)\n" }, { "alpha_fraction": 0.6128131151199341, "alphanum_fraction": 0.6160290837287903, "avg_line_length": 48.233333587646484, "blob_id": "981d216d6e309271835e82dd15a81d93c57de28e", "content_id": "51b0b064f2dca5003e197c0021d4caea0d07a7e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11816, "license_type": "permissive", "max_line_length": 79, "num_lines": 240, "path": "/rally_openstack/task/scenarios/nova/flavors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils\n\n\n\"\"\"Scenarios for Nova flavors.\"\"\"\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"NovaFlavors.list_flavors\", platform=\"openstack\")\nclass ListFlavors(utils.NovaScenario):\n\n def run(self, detailed=True, is_public=True, marker=None, min_disk=None,\n min_ram=None, limit=None, sort_key=None, sort_dir=None):\n \"\"\"List all flavors.\n\n Measure the \"nova flavor-list\" command performance.\n\n :param detailed: Whether flavor needs to be return with details\n (optional).\n :param is_public: Filter flavors with provided access type (optional).\n None means give all flavors and only admin has query\n access to all flavor types.\n :param marker: Begin returning flavors that appear later in the flavor\n list than that represented by this flavor id (optional).\n :param min_disk: Filters the flavors by a minimum disk space, in GiB.\n :param min_ram: Filters the flavors by a minimum RAM, in MB.\n :param limit: maximum number of flavors to return (optional).\n :param sort_key: Flavors list sort key (optional).\n :param sort_dir: Flavors list sort direction (optional).\n \"\"\"\n self._list_flavors(detailed=detailed, is_public=is_public,\n marker=marker, min_disk=min_disk, min_ram=min_ram,\n limit=limit, sort_key=sort_key, sort_dir=sort_dir)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_and_list_flavor_access\",\n platform=\"openstack\")\nclass CreateAndListFlavorAccess(utils.NovaScenario):\n\n def run(self, ram, vcpus, disk, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create a non-public flavor and list its access rules\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n # NOTE(pirsriva): access rules can be listed\n # only for non-public flavors\n if is_public:\n LOG.warning(\"is_public cannot be set to True for listing \"\n \"flavor access rules. Setting is_public to False\")\n is_public = False\n flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n self.assertTrue(flavor)\n\n self._list_flavor_access(flavor.id)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_flavor_and_add_tenant_access\",\n platform=\"openstack\")\nclass CreateFlavorAndAddTenantAccess(utils.NovaScenario):\n\n def run(self, ram, vcpus, disk, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create a flavor and Add flavor access for the given tenant.\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n self.assertTrue(flavor)\n self._add_tenant_access(flavor.id, self.context[\"tenant\"][\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_flavor\", platform=\"openstack\")\nclass CreateFlavor(utils.NovaScenario):\n\n def run(self, ram, vcpus, disk, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create a flavor.\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_and_get_flavor\",\n platform=\"openstack\")\nclass CreateAndGetFlavor(utils.NovaScenario):\n \"\"\"Scenario for create and get flavor.\"\"\"\n\n def run(self, ram, vcpus, disk, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create flavor and get detailed information of the flavor.\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n self._get_flavor(flavor.id)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_and_delete_flavor\",\n platform=\"openstack\")\nclass CreateAndDeleteFlavor(utils.NovaScenario):\n def run(self, ram, vcpus, disk, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create flavor and delete the flavor.\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n self._delete_flavor(flavor.id)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaFlavors.create_flavor_and_set_keys\",\n platform=\"openstack\")\nclass CreateFlavorAndSetKeys(utils.NovaScenario):\n def run(self, ram, vcpus, disk, extra_specs, flavorid=\"auto\",\n ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):\n \"\"\"Create flavor and set keys to the flavor.\n\n Measure the \"nova flavor-key\" command performance.\n the scenario first create a flavor,then add the extra specs to it.\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param extra_specs: additional arguments for flavor set keys\n :param flavorid: ID for the flavor (optional). You can use the reserved\n value ``\"auto\"`` to have Nova generate a UUID for the\n flavor in cases where you cannot simply pass ``None``.\n :param ephemeral: Ephemeral space size in GB (default 0).\n :param swap: Swap space in MB\n :param rxtx_factor: RX/TX factor\n :param is_public: Make flavor accessible to the public (default true).\n \"\"\"\n flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid,\n ephemeral=ephemeral, swap=swap,\n rxtx_factor=rxtx_factor,\n is_public=is_public)\n self._set_flavor_keys(flavor, extra_specs)\n" }, { "alpha_fraction": 0.5314533710479736, "alphanum_fraction": 0.5683296918869019, "avg_line_length": 18.20833396911621, "blob_id": "7bd670b3c2c789deb90529eb33ae5ca0b1e53b97", "content_id": "51c998bea6390e3d271c623464f21720fa0b72ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 461, "license_type": "permissive", "max_line_length": 71, "num_lines": 24, "path": "/rally-jobs/extra/install_benchmark.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -e\n\nmain() {\n cat > ~/dd_test.sh <<'EOF'\n#!/bin/sh\ntime_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }\nfile=/tmp/test.img\nc=1000 #1GB\nwrite_seq_1gb=$(time_seconds \"dd if=/dev/zero of=$file bs=1M count=$c\")\nread_seq_1gb=$(time_seconds \"dd if=$file of=/dev/null bs=1M count=$c\")\n[ -f $file ] && rm $file\n\necho \"{\n \\\"write_seq_1gb\\\": $write_seq_1gb,\n \\\"read_seq_1gb\\\": $read_seq_1gb\n }\"\nEOF\n\n chmod a+x ~/dd_test.sh\n}\n\nmain\n" }, { "alpha_fraction": 0.6679959893226624, "alphanum_fraction": 0.6739780902862549, "avg_line_length": 45.11494064331055, "blob_id": "124c5f3655d63072024a4227641a96fa3c16cff4", "content_id": "0d46ece04011371d37dedf269090f23e3725544e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4012, "license_type": "permissive", "max_line_length": 79, "num_lines": 87, "path": "/rally_openstack/task/scenarios/ironic/nodes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.ironic import utils\n\n\n\"\"\"Scenarios for ironic nodes.\"\"\"\n\n\[email protected]_deprecated_args(\"Useless arguments detected\", \"0.10.0\",\n (\"marker\", \"limit\", \"sort_key\"), once=True)\[email protected](\"required_services\", services=[consts.Service.IRONIC])\[email protected](\"restricted_parameters\", param_names=\"name\")\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"ironic\"]},\n name=\"IronicNodes.create_and_list_node\",\n platform=\"openstack\")\nclass CreateAndListNode(utils.IronicScenario):\n\n def run(self, driver, properties=None, associated=None, maintenance=None,\n detail=False, sort_dir=None, marker=None, limit=None,\n sort_key=None, **kwargs):\n \"\"\"Create and list nodes.\n\n :param driver: The name of the driver used to manage this Node.\n :param properties: Key/value pair describing the physical\n characteristics of the node.\n :param associated: Optional argument of list request. Either a Boolean\n or a string representation of a Boolean that indicates whether to\n return a list of associated (True or \"True\") or unassociated\n (False or \"False\") nodes.\n :param maintenance: Optional argument of list request. Either a Boolean\n or a string representation of a Boolean that indicates whether\n to return nodes in maintenance mode (True or \"True\"), or not in\n maintenance mode (False or \"False\").\n :param detail: Optional, boolean whether to return detailed\n information about nodes.\n :param sort_dir: Optional, direction of sorting, either 'asc' (the\n default) or 'desc'.\n :param marker: DEPRECATED since Rally 0.10.0\n :param limit: DEPRECATED since Rally 0.10.0\n :param sort_key: DEPRECATED since Rally 0.10.0\n :param kwargs: Optional additional arguments for node creation\n \"\"\"\n\n node = self._create_node(driver, properties, **kwargs)\n list_nodes = self._list_nodes(\n associated=associated, maintenance=maintenance, detail=detail,\n sort_dir=sort_dir)\n self.assertIn(node.name, [n.name for n in list_nodes])\n\n\[email protected](\"required_services\", services=[consts.Service.IRONIC])\[email protected](\"restricted_parameters\", param_names=\"name\")\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"ironic\"]},\n name=\"IronicNodes.create_and_delete_node\",\n platform=\"openstack\")\nclass CreateAndDeleteNode(utils.IronicScenario):\n\n def run(self, driver, properties=None, **kwargs):\n \"\"\"Create and delete node.\n\n :param driver: The name of the driver used to manage this Node.\n :param properties: Key/value pair describing the physical\n characteristics of the node.\n :param kwargs: Optional additional arguments for node creation\n \"\"\"\n node = self._create_node(driver, properties, **kwargs)\n self._delete_node(node)\n" }, { "alpha_fraction": 0.5590916872024536, "alphanum_fraction": 0.5607492327690125, "avg_line_length": 40.32191848754883, "blob_id": "e1a658ef177f3a3b89a7b6d6f05d4b227994ac30", "content_id": "fe17aa9272905bc2ebe1a7c80e33ea9369151016", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6033, "license_type": "permissive", "max_line_length": 79, "num_lines": 146, "path": "/rally_openstack/task/contexts/swift/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport tempfile\n\nfrom rally.common import broker\n\nfrom rally_openstack.task.scenarios.swift import utils as swift_utils\n\n\nclass SwiftObjectMixin(object):\n \"\"\"Mix-in method for Swift Object Context.\"\"\"\n\n def _create_containers(self, containers_per_tenant, threads):\n \"\"\"Create containers and store results in Rally context.\n\n :param containers_per_tenant: int, number of containers to create\n per tenant\n :param threads: int, number of threads to use for broker pattern\n\n :returns: list of tuples containing (account, container)\n \"\"\"\n containers = []\n\n def publish(queue):\n for user, tenant_id in self._iterate_per_tenants():\n self.context[\"tenants\"][tenant_id][\"containers\"] = []\n for i in range(containers_per_tenant):\n args = (user,\n self.context[\"tenants\"][tenant_id][\"containers\"])\n queue.append(args)\n\n def consume(cache, args):\n user, tenant_containers = args\n if user[\"id\"] not in cache:\n cache[user[\"id\"]] = swift_utils.SwiftScenario(\n {\"user\": user, \"task\": self.context.get(\"task\", {})})\n container_name = cache[user[\"id\"]]._create_container()\n tenant_containers.append({\"user\": user,\n \"container\": container_name,\n \"objects\": []})\n containers.append((user[\"tenant_id\"], container_name))\n\n broker.run(publish, consume, threads)\n\n return containers\n\n def _create_objects(self, objects_per_container, object_size, threads):\n \"\"\"Create objects and store results in Rally context.\n\n :param objects_per_container: int, number of objects to create\n per container\n :param object_size: int, size of created swift objects in byte\n :param threads: int, number of threads to use for broker pattern\n\n :returns: list of tuples containing (account, container, object)\n \"\"\"\n objects = []\n\n with tempfile.TemporaryFile() as dummy_file:\n # set dummy file to specified object size\n dummy_file.truncate(object_size)\n\n def publish(queue):\n for tenant_id in self.context[\"tenants\"]:\n items = self.context[\"tenants\"][tenant_id][\"containers\"]\n for container in items:\n for i in range(objects_per_container):\n queue.append(container)\n\n def consume(cache, container):\n user = container[\"user\"]\n if user[\"id\"] not in cache:\n cache[user[\"id\"]] = swift_utils.SwiftScenario(\n {\"user\": user, \"task\": self.context.get(\"task\", {})})\n dummy_file.seek(0)\n object_name = cache[user[\"id\"]]._upload_object(\n container[\"container\"],\n dummy_file)[1]\n container[\"objects\"].append(object_name)\n objects.append((user[\"tenant_id\"], container[\"container\"],\n object_name))\n\n broker.run(publish, consume, threads)\n\n return objects\n\n def _delete_containers(self, threads):\n \"\"\"Delete containers created by Swift context and update Rally context.\n\n :param threads: int, number of threads to use for broker pattern\n \"\"\"\n def publish(queue):\n for tenant_id in self.context[\"tenants\"]:\n containers = self.context[\"tenants\"][tenant_id][\"containers\"]\n for container in containers[:]:\n args = container, containers\n queue.append(args)\n\n def consume(cache, args):\n container, tenant_containers = args\n user = container[\"user\"]\n if user[\"id\"] not in cache:\n cache[user[\"id\"]] = swift_utils.SwiftScenario(\n {\"user\": user, \"task\": self.context.get(\"task\", {})})\n cache[user[\"id\"]]._delete_container(container[\"container\"])\n tenant_containers.remove(container)\n\n broker.run(publish, consume, threads)\n\n def _delete_objects(self, threads):\n \"\"\"Delete objects created by Swift context and update Rally context.\n\n :param threads: int, number of threads to use for broker pattern\n \"\"\"\n def publish(queue):\n for tenant_id in self.context[\"tenants\"]:\n containers = self.context[\"tenants\"][tenant_id][\"containers\"]\n for container in containers:\n for object_name in container[\"objects\"][:]:\n args = object_name, container\n queue.append(args)\n\n def consume(cache, args):\n object_name, container = args\n user = container[\"user\"]\n if user[\"id\"] not in cache:\n cache[user[\"id\"]] = swift_utils.SwiftScenario(\n {\"user\": user, \"task\": self.context.get(\"task\", {})})\n cache[user[\"id\"]]._delete_object(container[\"container\"],\n object_name)\n container[\"objects\"].remove(object_name)\n\n broker.run(publish, consume, threads)\n" }, { "alpha_fraction": 0.6685994863510132, "alphanum_fraction": 0.6717216968536377, "avg_line_length": 36.36666488647461, "blob_id": "b060e70d1dee97776bc325ea15ba4ac880698fe0", "content_id": "8041b0739f1840be497de29cd3e3c7728228bad0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2242, "license_type": "permissive", "max_line_length": 75, "num_lines": 60, "path": "/rally_openstack/task/contexts/cinder/volume_types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import utils\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.storage import block\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"volume_types\", platform=\"openstack\", order=410)\nclass VolumeTypeGenerator(context.OpenStackContext):\n \"\"\"Adds cinder volumes types.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"array\",\n \"$schema\": consts.JSON_SCHEMA,\n \"items\": {\"type\": \"string\"}\n }\n\n def setup(self):\n admin_clients = osclients.Clients(\n self.context.get(\"admin\", {}).get(\"credential\"))\n cinder_service = block.BlockStorage(\n admin_clients,\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n self.context[\"volume_types\"] = []\n for vtype_name in self.config:\n LOG.debug(\"Creating Cinder volume type %s\" % vtype_name)\n vtype = cinder_service.create_volume_type(vtype_name)\n self.context[\"volume_types\"].append({\"id\": vtype.id,\n \"name\": vtype_name})\n\n def cleanup(self):\n mather = utils.make_name_matcher(*self.config)\n resource_manager.cleanup(\n names=[\"cinder.volume_types\"],\n admin=self.context[\"admin\"],\n superclass=mather,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6087579727172852, "alphanum_fraction": 0.6104307174682617, "avg_line_length": 39.82682800292969, "blob_id": "9f224a0974abc39b03fb7778b492336b472c5be4", "content_id": "544e2cf2cbaec916d247fd70fc9d44e63a8ca073", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16739, "license_type": "permissive", "max_line_length": 79, "num_lines": 410, "path": "/rally_openstack/task/scenarios/manila/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.common import cfg\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass ManilaScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Manila scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"manila.create_share\")\n def _create_share(self, share_proto, size=1, **kwargs):\n \"\"\"Create a share.\n\n :param share_proto: share protocol for new share,\n available values are NFS, CIFS, GlusterFS, HDFS and CEPHFS.\n :param size: size of a share in GB\n :param snapshot_id: ID of the snapshot\n :param name: name of new share\n :param description: description of a share\n :param metadata: optional metadata to set on share creation\n :param share_network: either instance of ShareNetwork or str with ID\n :param share_type: either instance of ShareType or str with ID\n :param is_public: defines whether to set share as public or not.\n :returns: instance of :class:`Share`\n \"\"\"\n if self.context:\n share_networks = self.context.get(\"tenant\", {}).get(\n consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get(\n \"share_networks\", [])\n if share_networks and not kwargs.get(\"share_network\"):\n kwargs[\"share_network\"] = share_networks[\n self.context[\"iteration\"] % len(share_networks)][\"id\"]\n\n if not kwargs.get(\"name\"):\n kwargs[\"name\"] = self.generate_random_name()\n\n share = self.clients(\"manila\").shares.create(\n share_proto, size, **kwargs)\n\n self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay)\n share = utils.wait_for_status(\n share,\n ready_statuses=[\"available\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.manila_share_create_timeout,\n check_interval=CONF.openstack.manila_share_create_poll_interval,\n )\n return share\n\n @atomic.action_timer(\"manila.delete_share\")\n def _delete_share(self, share):\n \"\"\"Delete the given share.\n\n :param share: :class:`Share`\n \"\"\"\n share.delete()\n error_statuses = (\"error_deleting\", )\n utils.wait_for_status(\n share,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(error_statuses),\n timeout=CONF.openstack.manila_share_delete_timeout,\n check_interval=CONF.openstack.manila_share_delete_poll_interval)\n\n def _export_location(self, share):\n \"\"\"Export share location.\n\n :param share: :class:`Share`\n \"\"\"\n location = share.export_locations\n return location\n\n def _get_access_from_share(self, share, access_id):\n \"\"\"Get access from share\n\n :param share: :class: `Share`\n :param access_id: The id of the access we want to get\n :returns: The access object from the share\n :raises GetResourceNotFound: if the access is not in the share\n \"\"\"\n try:\n return next(access for access in share.access_list()\n if access.id == access_id)\n except StopIteration:\n raise exceptions.GetResourceNotFound(resource=access_id)\n\n def _update_resource_in_allow_access_share(self, share, access_id):\n \"\"\"Helper to update resource state in allow_access_share method\n\n :param share: :class:`Share`\n :param access_id: id of the access\n :returns: A function to be used in wait_for_status for the update\n resource\n \"\"\"\n def _is_created(_):\n return self._get_access_from_share(share, access_id)\n\n return _is_created\n\n @atomic.action_timer(\"manila.access_allow_share\")\n def _allow_access_share(self, share, access_type, access, access_level):\n \"\"\"Allow access to a share\n\n :param share: :class:`Share`\n :param access_type: represents the access type (e.g: 'ip', 'domain'...)\n :param access: represents the object (e.g: '127.0.0.1'...)\n :param access_level: access level to the share (e.g: 'rw', 'ro')\n \"\"\"\n access_result = share.allow(access_type, access, access_level)\n # Get access from the list of accesses of the share\n access = next(access for access in share.access_list()\n if access.id == access_result[\"id\"])\n\n fn = self._update_resource_in_allow_access_share(share,\n access_result[\"id\"])\n\n # We check if the access in that access_list has the active state\n utils.wait_for_status(\n access,\n ready_statuses=[\"active\"],\n update_resource=fn,\n check_interval=CONF.openstack.manila_access_create_poll_interval,\n timeout=CONF.openstack.manila_access_create_timeout)\n\n return access_result\n\n def _update_resource_in_deny_access_share(self, share, access_id):\n \"\"\"Helper to update resource state in deny_access_share method\n\n :param share: :class:`Share`\n :param access_id: id of the access\n :returns: A function to be used in wait_for_status for the update\n resource\n \"\"\"\n def _is_deleted(_):\n access = self._get_access_from_share(share, access_id)\n return access\n\n return _is_deleted\n\n @atomic.action_timer(\"manila.access_deny_share\")\n def _deny_access_share(self, share, access_id):\n \"\"\"Deny access to a share\n\n :param share: :class:`Share`\n :param access_id: id of the access to delete\n \"\"\"\n # Get the access element that was created in the first place\n access = self._get_access_from_share(share, access_id)\n share.deny(access_id)\n\n fn = self._update_resource_in_deny_access_share(share,\n access_id)\n\n utils.wait_for_status(\n access,\n ready_statuses=[\"deleted\"],\n update_resource=fn,\n check_deletion=True,\n check_interval=CONF.openstack.manila_access_delete_poll_interval,\n timeout=CONF.openstack.manila_access_delete_timeout)\n\n @atomic.action_timer(\"manila.list_shares\")\n def _list_shares(self, detailed=True, search_opts=None):\n \"\"\"Returns user shares list.\n\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"name\", \"host\", \"share_type\", etc.\n \"\"\"\n return self.clients(\"manila\").shares.list(\n detailed=detailed, search_opts=search_opts)\n\n @atomic.action_timer(\"manila.extend_share\")\n def _extend_share(self, share, new_size):\n \"\"\"Extend the given share\n\n :param share: :class:`Share`\n :param new_size: new size of the share\n \"\"\"\n self.clients(\"manila\").shares.extend(share, new_size)\n utils.wait_for_status(\n share,\n ready_statuses=[\"available\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.manila_share_create_timeout,\n check_interval=CONF.openstack.manila_share_create_poll_interval)\n\n @atomic.action_timer(\"manila.shrink_share\")\n def _shrink_share(self, share, new_size):\n \"\"\"Shrink the given share\n\n :param share: :class:`Share`\n :param new_size: new size of the share\n \"\"\"\n share.shrink(new_size)\n utils.wait_for_status(\n share,\n ready_statuses=[\"available\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.manila_share_create_timeout,\n check_interval=CONF.openstack.manila_share_create_poll_interval)\n\n @atomic.action_timer(\"manila.create_share_network\")\n def _create_share_network(self, neutron_net_id=None,\n neutron_subnet_id=None,\n nova_net_id=None, description=None):\n \"\"\"Create share network.\n\n :param neutron_net_id: ID of Neutron network\n :param neutron_subnet_id: ID of Neutron subnet\n :param nova_net_id: ID of Nova network\n :param description: share network description\n :returns: instance of :class:`ShareNetwork`\n \"\"\"\n share_network = self.clients(\"manila\").share_networks.create(\n neutron_net_id=neutron_net_id,\n neutron_subnet_id=neutron_subnet_id,\n nova_net_id=nova_net_id,\n name=self.generate_random_name(),\n description=description)\n return share_network\n\n @atomic.action_timer(\"manila.delete_share_network\")\n def _delete_share_network(self, share_network):\n \"\"\"Delete share network.\n\n :param share_network: instance of :class:`ShareNetwork`.\n \"\"\"\n share_network.delete()\n utils.wait_for_status(\n share_network,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.manila_share_delete_timeout,\n check_interval=CONF.openstack.manila_share_delete_poll_interval)\n\n @atomic.action_timer(\"manila.list_share_networks\")\n def _list_share_networks(self, detailed=True, search_opts=None):\n \"\"\"List share networks.\n\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"project_id\" and \"name\".\n :returns: list of instances of :class:`ShareNetwork`\n \"\"\"\n share_networks = self.clients(\"manila\").share_networks.list(\n detailed=detailed, search_opts=search_opts)\n return share_networks\n\n @atomic.action_timer(\"manila.list_share_servers\")\n def _list_share_servers(self, search_opts=None):\n \"\"\"List share servers. Admin only.\n\n :param search_opts: set of key-value pairs to filter share servers by.\n Example: {\"share_network\": \"share_network_name_or_id\"}\n :returns: list of instances of :class:`ShareServer`\n \"\"\"\n share_servers = self.admin_clients(\"manila\").share_servers.list(\n search_opts=search_opts)\n return share_servers\n\n @atomic.action_timer(\"manila.create_security_service\")\n def _create_security_service(self, security_service_type, dns_ip=None,\n server=None, domain=None, user=None,\n password=None, description=None):\n \"\"\"Create security service.\n\n 'Security service' is data container in Manila that stores info\n about auth services 'Active Directory', 'Kerberos' and catalog\n service 'LDAP' that should be used for shares.\n\n :param security_service_type: security service type, permitted values\n are 'ldap', 'kerberos' or 'active_directory'.\n :param dns_ip: dns ip address used inside tenant's network\n :param server: security service server ip address or hostname\n :param domain: security service domain\n :param user: security identifier used by tenant\n :param password: password used by user\n :param description: security service description\n :returns: instance of :class:`SecurityService`\n \"\"\"\n security_service = self.clients(\"manila\").security_services.create(\n type=security_service_type,\n dns_ip=dns_ip,\n server=server,\n domain=domain,\n user=user,\n password=password,\n name=self.generate_random_name(),\n description=description)\n return security_service\n\n @atomic.action_timer(\"manila.delete_security_service\")\n def _delete_security_service(self, security_service):\n \"\"\"Delete security service.\n\n :param security_service: instance of :class:`SecurityService`.\n \"\"\"\n security_service.delete()\n utils.wait_for_status(\n security_service,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.manila_share_delete_timeout,\n check_interval=CONF.openstack.manila_share_delete_poll_interval)\n\n @atomic.action_timer(\"manila.add_security_service_to_share_network\")\n def _add_security_service_to_share_network(self, share_network,\n security_service):\n \"\"\"Associate given security service with a share network.\n\n :param share_network: ID or instance of :class:`ShareNetwork`.\n :param security_service: ID or instance of :class:`SecurityService`.\n :returns: instance of :class:`ShareNetwork`.\n \"\"\"\n share_network = self.clients(\n \"manila\").share_networks.add_security_service(\n share_network, security_service)\n return share_network\n\n @atomic.action_timer(\"manila.set_metadata\")\n def _set_metadata(self, share, sets=1, set_size=1,\n key_min_length=1, key_max_length=256,\n value_min_length=1, value_max_length=1024):\n \"\"\"Sets share metadata.\n\n :param share: the share to set metadata on\n :param sets: how many operations to perform\n :param set_size: number of metadata keys to set in each operation\n :param key_min_length: minimal size of metadata key to set\n :param key_max_length: maximum size of metadata key to set\n :param value_min_length: minimal size of metadata value to set\n :param value_max_length: maximum size of metadata value to set\n :returns: A list of keys that were set\n :raises exceptions.InvalidArgumentsException: if invalid arguments\n were provided.\n \"\"\"\n if not (key_min_length <= key_max_length\n and value_min_length <= value_max_length):\n raise exceptions.InvalidArgumentsException(\n \"Min length for keys and values of metadata can not be bigger \"\n \"than maximum length.\")\n\n keys = []\n for i in range(sets):\n metadata = {}\n for j in range(set_size):\n if key_min_length == key_max_length:\n key_length = key_min_length\n else:\n key_length = random.choice(\n range(key_min_length, key_max_length))\n if value_min_length == value_max_length:\n value_length = value_min_length\n else:\n value_length = random.choice(\n range(value_min_length, value_max_length))\n key = self._generate_random_part(length=key_length)\n keys.append(key)\n metadata[key] = self._generate_random_part(length=value_length)\n self.clients(\"manila\").shares.set_metadata(share[\"id\"], metadata)\n\n return keys\n\n @atomic.action_timer(\"manila.delete_metadata\")\n def _delete_metadata(self, share, keys, delete_size=3):\n \"\"\"Deletes share metadata.\n\n :param share: The share to delete metadata from.\n :param delete_size: number of metadata keys to delete using one single\n call.\n :param keys: a list or tuple of keys to choose deletion candidates from\n :raises exceptions.InvalidArgumentsException: if invalid arguments\n were provided.\n \"\"\"\n if not (isinstance(keys, list) and keys):\n raise exceptions.InvalidArgumentsException(\n \"Param 'keys' should be non-empty 'list'. keys = '%s'\" % keys)\n for i in range(0, len(keys), delete_size):\n self.clients(\"manila\").shares.delete_metadata(\n share[\"id\"], keys[i:i + delete_size])\n" }, { "alpha_fraction": 0.5896602272987366, "alphanum_fraction": 0.5989270210266113, "avg_line_length": 29.909547805786133, "blob_id": "c83d84d1e8e31802f6efa6f2076fc844984adcc6", "content_id": "7184f422d0842fe82eac2c975e8c4bfd41f77e37", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6151, "license_type": "permissive", "max_line_length": 79, "num_lines": 199, "path": "/rally_openstack/_compat.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport importlib\nimport importlib.abc\nimport importlib.machinery\nimport importlib.util\nimport sys\nimport warnings\n\n\nclass _MoveSpec(object):\n def __init__(self, deprecated, new, release):\n \"\"\"init moved module info\n\n :param deprecated: a module name that is deprecated\n :param new: a module name that should be used instead\n :param release: A release when the module was deprecated\n \"\"\"\n self.deprecated = deprecated\n self.new = new\n self.deprecated_path = self.deprecated.replace(\".\", \"/\")\n self.new_path = self.new.replace(\".\", \"/\")\n self.release = release\n\n def get_new_name(self, fullname):\n \"\"\"Get the new name for deprecated module.\"\"\"\n return fullname.replace(self.deprecated, self.new)\n\n def get_deprecated_path(self, path):\n \"\"\"Get a path to the deprecated module.\"\"\"\n return path.replace(self.new_path, self.deprecated_path)\n\n\n_MOVES = [\n _MoveSpec(\n deprecated=\"rally_openstack.embedcharts\",\n new=\"rally_openstack.task.ui.charts\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.cleanup\",\n new=\"rally_openstack.task.cleanup\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.contexts\",\n new=\"rally_openstack.task.contexts\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.hook\",\n new=\"rally_openstack.task.hooks\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.scenario\",\n new=\"rally_openstack.task.scenario\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.scenarios\",\n new=\"rally_openstack.task.scenarios\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.types\",\n new=\"rally_openstack.task.types\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.platforms\",\n new=\"rally_openstack.environment.platforms\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.service\",\n new=\"rally_openstack.common.service\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.services\",\n new=\"rally_openstack.common.services\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.validators\",\n new=\"rally_openstack.common.validators\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.wrappers\",\n new=\"rally_openstack.common.wrappers\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.credential\",\n new=\"rally_openstack.common.credential\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.osclients\",\n new=\"rally_openstack.common.osclients\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.consts\",\n new=\"rally_openstack.common.consts\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.exceptions\",\n new=\"rally_openstack.common.exceptions\",\n release=\"2.0.0\"\n ),\n _MoveSpec(\n deprecated=\"rally_openstack.cfg\",\n new=\"rally_openstack.common.cfg\",\n release=\"2.0.0\"\n ),\n]\n\n\nclass ModuleLoader(object):\n\n def __init__(self, move_spec):\n self.move_spec = move_spec\n\n def create_module(self, spec):\n # Python interpreter will use the default module creator in case of\n # None return value.\n return None\n\n def exec_module(self, module):\n \"\"\"Module executor.\"\"\"\n full_name = self.move_spec.get_new_name(module.__name__)\n\n original_module = importlib.import_module(full_name)\n\n if original_module.__file__.endswith(\"__init__.py\"):\n # NOTE(andreykurilin): In case we need to list submodules the\n # next code can be used:\n #\n # import pkgutil\n #\n # for m in pkgutil.iter_modules(original_module.__path__):\n # module.__dict__[m.name] = importlib.import_module(\n # f\"{full_name}.{m.name}\")\n\n module.__path__ = [\n self.move_spec.get_deprecated_path(original_module.__path__[0])\n ]\n for item in dir(original_module):\n if item.startswith(\"_\"):\n continue\n module.__dict__[item] = original_module.__dict__[item]\n module.__file__ = self.move_spec.get_deprecated_path(\n original_module.__file__)\n\n return module\n\n\nclass ModulesMovementsHandler(importlib.abc.MetaPathFinder):\n\n @classmethod\n def _process_spec(cls, fullname, spec):\n \"\"\"Make module spec and print warning message if needed.\"\"\"\n if spec.deprecated == fullname:\n warnings.warn(\n f\"Module {fullname} is deprecated since rally-openstack \"\n f\"{spec.release}. Use {spec.get_new_name(fullname)} instead.\",\n stacklevel=3\n )\n\n return importlib.machinery.ModuleSpec(fullname, ModuleLoader(spec))\n\n @classmethod\n def find_spec(cls, fullname, path=None, target=None):\n \"\"\"This functions is what gets executed by the loader.\"\"\"\n for spec in _MOVES:\n if spec.deprecated in fullname:\n return cls._process_spec(fullname, spec)\n\n\ndef init():\n \"\"\"Adds our custom module loader.\"\"\"\n\n sys.meta_path.append(ModulesMovementsHandler())\n" }, { "alpha_fraction": 0.7169747948646545, "alphanum_fraction": 0.7196638584136963, "avg_line_length": 40.31944274902344, "blob_id": "fede96f325d4d47f7a012b3b15809de7f7788109", "content_id": "502b6bdebbd7460f59282e2c237e23c39d25bf84", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2975, "license_type": "permissive", "max_line_length": 78, "num_lines": 72, "path": "/rally_openstack/task/scenarios/gnocchi/archive_policy_rule.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils\n\n\"\"\"Scenarios for Gnocchi archive policy rule.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"GnocchiArchivePolicyRule.list_archive_policy_rule\")\nclass ListArchivePolicyRule(gnocchiutils.GnocchiBase):\n\n def run(self):\n \"\"\"List archive policy rules.\"\"\"\n self.gnocchi.list_archive_policy_rule()\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"gnocchi.archive_policy_rule\"]},\n name=\"GnocchiArchivePolicyRule.create_archive_policy_rule\")\nclass CreateArchivePolicyRule(gnocchiutils.GnocchiBase):\n\n def run(self, metric_pattern=\"cpu_*\", archive_policy_name=\"low\"):\n \"\"\"Create archive policy rule.\n\n :param metric_pattern: Pattern for matching metrics\n :param archive_policy_name: Archive policy name\n \"\"\"\n name = self.generate_random_name()\n self.admin_gnocchi.create_archive_policy_rule(\n name,\n metric_pattern=metric_pattern,\n archive_policy_name=archive_policy_name)\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"gnocchi.archive_policy_rule\"]},\n name=\"GnocchiArchivePolicyRule.create_delete_archive_policy_rule\")\nclass CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase):\n\n def run(self, metric_pattern=\"cpu_*\", archive_policy_name=\"low\"):\n \"\"\"Create archive policy rule and then delete it.\n\n :param metric_pattern: Pattern for matching metrics\n :param archive_policy_name: Archive policy name\n \"\"\"\n name = self.generate_random_name()\n self.admin_gnocchi.create_archive_policy_rule(\n name,\n metric_pattern=metric_pattern,\n archive_policy_name=archive_policy_name)\n self.admin_gnocchi.delete_archive_policy_rule(name)\n" }, { "alpha_fraction": 0.6265506744384766, "alphanum_fraction": 0.6296877264976501, "avg_line_length": 35.336788177490234, "blob_id": "34c9156928a702edfc2e7551b391da000bc08a5c", "content_id": "811b4c7fbd7caa2d9b16c50a22bfa3a51bd0a05e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7013, "license_type": "permissive", "max_line_length": 78, "num_lines": 193, "path": "/rally_openstack/common/services/identity/keystone_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\n\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.identity import identity\n\n\nclass UnifiedKeystoneMixin(object):\n @staticmethod\n def _unify_service(service):\n return identity.Service(id=service.id, name=service.name)\n\n @staticmethod\n def _unify_role(role):\n return identity.Role(id=role.id, name=role.name)\n\n def delete_user(self, user_id):\n \"\"\"Deletes user by its id.\"\"\"\n return self._impl.delete_user(user_id)\n\n def get_user(self, user_id):\n \"\"\"Get user.\"\"\"\n return self._unify_user(self._impl.get_user(user_id))\n\n def create_service(self, name=None, service_type=None, description=None):\n \"\"\"Creates keystone service.\"\"\"\n\n return self._unify_service(self._impl.create_service(\n name=name, service_type=service_type, description=description))\n\n def delete_service(self, service_id):\n \"\"\"Deletes service.\"\"\"\n return self._impl.delete_service(service_id)\n\n def get_service(self, service_id):\n \"\"\"Get service.\"\"\"\n return self._unify_service(self._impl.get_service(service_id))\n\n def get_service_by_name(self, name):\n \"\"\"List all services to find proper one.\"\"\"\n return self._unify_service(self._impl.get_service_by_name(name))\n\n def get_role(self, role_id):\n \"\"\"Get role.\"\"\"\n return self._unify_role(self._impl.get_role(role_id))\n\n def delete_role(self, role_id):\n \"\"\"Deletes role.\"\"\"\n return self._impl.delete_role(role_id)\n\n def list_ec2credentials(self, user_id):\n \"\"\"List of access/secret pairs for a user_id.\n\n :param user_id: List all ec2-credentials for User ID\n\n :returns: Return ec2-credentials list\n \"\"\"\n return self._impl.list_ec2credentials(user_id)\n\n def delete_ec2credential(self, user_id, access):\n \"\"\"Delete ec2credential.\n\n :param user_id: User ID for which to delete credential\n :param access: access key for ec2credential to delete\n \"\"\"\n return self._impl.delete_ec2credential(user_id=user_id, access=access)\n\n def fetch_token(self):\n \"\"\"Authenticate user token.\"\"\"\n return self._impl.fetch_token()\n\n def validate_token(self, token):\n \"\"\"Validate user token.\n\n :param token: Auth token to validate\n \"\"\"\n return self._impl.validate_token(token)\n\n\nclass KeystoneMixin(object):\n\n def list_users(self):\n aname = \"keystone_v%s.list_users\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).users.list()\n\n def delete_user(self, user_id):\n \"\"\"Deletes user by its id.\"\"\"\n aname = \"keystone_v%s.delete_user\" % self.version\n with atomic.ActionTimer(self, aname):\n self._clients.keystone(self.version).users.delete(user_id)\n\n def get_user(self, user_id):\n \"\"\"Get user by its id.\"\"\"\n aname = \"keystone_v%s.get_user\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).users.get(user_id)\n\n def delete_service(self, service_id):\n \"\"\"Deletes service.\"\"\"\n aname = \"keystone_v%s.delete_service\" % self.version\n with atomic.ActionTimer(self, aname):\n self._clients.keystone(self.version).services.delete(service_id)\n\n def list_services(self):\n \"\"\"List all services.\"\"\"\n aname = \"keystone_v%s.list_services\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).services.list()\n\n def get_service(self, service_id):\n \"\"\"Get service.\"\"\"\n aname = \"keystone_v%s.get_services\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).services.get(\n service_id)\n\n def get_service_by_name(self, name):\n \"\"\"List all services to find proper one.\"\"\"\n for s in self.list_services():\n if s.name == name:\n return s\n\n def delete_role(self, role_id):\n \"\"\"Deletes role.\"\"\"\n aname = \"keystone_v%s.delete_role\" % self.version\n with atomic.ActionTimer(self, aname):\n self._clients.keystone(self.version).roles.delete(role_id)\n\n def list_roles(self):\n \"\"\"List all roles.\"\"\"\n aname = \"keystone_v%s.list_roles\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).roles.list()\n\n def get_role(self, role_id):\n \"\"\"Get role.\"\"\"\n aname = \"keystone_v%s.get_role\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).roles.get(role_id)\n\n def list_ec2credentials(self, user_id):\n \"\"\"List of access/secret pairs for a user_id.\n\n :param user_id: List all ec2-credentials for User ID\n\n :returns: Return ec2-credentials list\n \"\"\"\n aname = \"keystone_v%s.list_ec2creds\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._clients.keystone(self.version).ec2.list(user_id)\n\n def delete_ec2credential(self, user_id, access):\n \"\"\"Delete ec2credential.\n\n :param user_id: User ID for which to delete credential\n :param access: access key for ec2credential to delete\n \"\"\"\n aname = \"keystone_v%s.delete_ec2creds\" % self.version\n with atomic.ActionTimer(self, aname):\n self._clients.keystone(self.version).ec2.delete(user_id=user_id,\n access=access)\n\n def fetch_token(self):\n \"\"\"Authenticate user token.\"\"\"\n aname = \"keystone_v%s.fetch_token\" % self.version\n with atomic.ActionTimer(self, aname):\n # use another instance of osclients.Clients to avoid usage of\n # cached keystone session\n clients = osclients.Clients(credential=self._clients.credential)\n return clients.keystone.auth_ref.auth_token\n\n def validate_token(self, token):\n \"\"\"Validate user token.\n\n :param token: Auth token to validate\n \"\"\"\n aname = \"keystone_v%s.validate_token\" % self.version\n with atomic.ActionTimer(self, aname):\n self._clients.keystone(self.version).tokens.validate(token)\n" }, { "alpha_fraction": 0.6298513412475586, "alphanum_fraction": 0.6304094195365906, "avg_line_length": 39.14460372924805, "blob_id": "362a46c913f0926877992bee1e878d62491c5126", "content_id": "ac6e64954129296e96e6d8ceb4dcdcc5135f57e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19711, "license_type": "permissive", "max_line_length": 79, "num_lines": 491, "path": "/rally_openstack/common/services/storage/block.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.task import service\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nVolume = service.make_resource_cls(\n \"Volume\", properties=[\"id\", \"name\", \"size\", \"status\"])\nVolumeSnapshot = service.make_resource_cls(\n \"VolumeSnapshot\", properties=[\"id\", \"name\", \"volume_id\", \"status\"])\nVolumeBackup = service.make_resource_cls(\n \"VolumeBackup\", properties=[\"id\", \"name\", \"volume_id\", \"status\"])\nVolumeTransfer = service.make_resource_cls(\n \"VolumeTransfer\", properties=[\"id\", \"name\", \"volume_id\", \"auth_key\"])\nVolumeEncryptionType = service.make_resource_cls(\n \"VolumeEncryptionType\", properties=[\"id\", \"volume_type_id\"])\nQoSSpecs = service.make_resource_cls(\n \"QoSSpecs\", properties=[\"id\", \"name\", \"specs\"])\n\n\nclass BlockStorage(service.UnifiedService):\n\n @service.should_be_overridden\n def create_volume(self, size, consistencygroup_id=None,\n group_id=None, snapshot_id=None, source_volid=None,\n name=None, description=None,\n volume_type=None, user_id=None,\n project_id=None, availability_zone=None,\n metadata=None, imageRef=None, scheduler_hints=None,\n source_replica=None, backup_id=None):\n \"\"\"Creates a volume.\n\n :param size: Size of volume in GB\n :param consistencygroup_id: ID of the consistencygroup\n :param group_id: ID of the group\n :param snapshot_id: ID of the snapshot\n :param name: Name of the volume\n :param description: Description of the volume\n :param volume_type: Type of volume\n :param user_id: User id derived from context\n :param project_id: Project id derived from context\n :param availability_zone: Availability Zone to use\n :param metadata: Optional metadata to set on volume creation\n :param imageRef: reference to an image stored in glance\n :param source_volid: ID of source volume to clone from\n :param source_replica: ID of source volume to clone replica(IGNORED)\n :param scheduler_hints: (optional extension) arbitrary key-value pairs\n specified by the client to help boot an instance\n :param backup_id: ID of the backup\n\n :returns: Return a new volume.\n \"\"\"\n if source_replica:\n LOG.warning(\"The argument `source_replica` would be ignored\"\n \" because it was removed from cinder api.\")\n return self._impl.create_volume(\n size, consistencygroup_id=consistencygroup_id, group_id=group_id,\n snapshot_id=snapshot_id, source_volid=source_volid,\n name=name, description=description, volume_type=volume_type,\n user_id=user_id, project_id=project_id,\n availability_zone=availability_zone, metadata=metadata,\n imageRef=imageRef, scheduler_hints=scheduler_hints,\n backup_id=backup_id)\n\n @service.should_be_overridden\n def list_volumes(self, detailed=True, search_opts=None, marker=None,\n limit=None, sort=None):\n \"\"\"Lists all volumes.\n\n :param detailed: Whether to return detailed volume info.\n :param search_opts: Search options to filter out volumes.\n :param marker: Begin returning volumes that appear later in the volume\n list than that represented by this volume id.\n :param limit: Maximum number of volumes to return.\n :param sort: Sort information\n :returns: Return volumes list.\n \"\"\"\n return self._impl.list_volumes(\n detailed=detailed, search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n\n @service.should_be_overridden\n def get_volume(self, volume_id):\n \"\"\"Get a volume.\n\n :param volume_id: The ID of the volume to get.\n\n :returns: Return the volume.\n \"\"\"\n return self._impl.get_volume(volume_id)\n\n @service.should_be_overridden\n def update_volume(self, volume_id,\n name=None, description=None):\n \"\"\"Update the name or description for a volume.\n\n :param volume_id: The updated volume id.\n :param name: The volume name.\n :param description: The volume description.\n\n :returns: The updated volume.\n \"\"\"\n return self._impl.update_volume(\n volume_id, name=name, description=description)\n\n @service.should_be_overridden\n def delete_volume(self, volume):\n \"\"\"Delete a volume.\"\"\"\n self._impl.delete_volume(volume)\n\n @service.should_be_overridden\n def extend_volume(self, volume, new_size):\n \"\"\"Extend the size of the specified volume.\"\"\"\n return self._impl.extend_volume(volume, new_size=new_size)\n\n @service.should_be_overridden\n def list_snapshots(self, detailed=True):\n \"\"\"Get a list of all snapshots.\"\"\"\n return self._impl.list_snapshots(detailed=detailed)\n\n @service.should_be_overridden\n def list_types(self, search_opts=None, is_public=None):\n \"\"\"Lists all volume types.\"\"\"\n return self._impl.list_types(search_opts=search_opts,\n is_public=is_public)\n\n @service.should_be_overridden\n def set_metadata(self, volume, sets=10, set_size=3):\n \"\"\"Update/Set a volume metadata.\n\n :param volume: The updated/setted volume.\n :param sets: how many operations to perform\n :param set_size: number of metadata keys to set in each operation\n :returns: A list of keys that were set\n \"\"\"\n return self._impl.set_metadata(volume, sets=sets, set_size=set_size)\n\n @service.should_be_overridden\n def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n \"\"\"Delete volume metadata keys.\n\n Note that ``len(keys)`` must be greater than or equal to\n ``deletes * delete_size``.\n\n :param volume: The volume to delete metadata from\n :param deletes: how many operations to perform\n :param delete_size: number of metadata keys to delete in each operation\n :param keys: a list of keys to choose deletion candidates from\n \"\"\"\n self._impl.delete_metadata(volume, keys, deletes=deletes,\n delete_size=delete_size)\n\n @service.should_be_overridden\n def update_readonly_flag(self, volume, read_only):\n \"\"\"Update the read-only access mode flag of the specified volume.\n\n :param volume: The UUID of the volume to update.\n :param read_only: The value to indicate whether to update volume to\n read-only access mode.\n :returns: A tuple of http Response and body\n \"\"\"\n return self._impl.update_readonly_flag(volume, read_only=read_only)\n\n @service.should_be_overridden\n def upload_volume_to_image(self, volume, force=False,\n container_format=\"bare\", disk_format=\"raw\"):\n \"\"\"Upload the given volume to image.\n\n Returns created image.\n\n :param volume: volume object\n :param force: flag to indicate whether to snapshot a volume even if\n it's attached to an instance\n :param container_format: container format of image. Acceptable\n formats: ami, ari, aki, bare, and ovf\n :param disk_format: disk format of image. Acceptable formats:\n ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso\n :returns: Returns created image object\n \"\"\"\n return self._impl.upload_volume_to_image(\n volume, force=force, container_format=container_format,\n disk_format=disk_format)\n\n @service.should_be_overridden\n def create_qos(self, specs):\n \"\"\"Create a qos specs.\n\n :param specs: A dict of key/value pairs to be set\n :rtype: :class:'QoSSpecs'\n \"\"\"\n return self._impl.create_qos(specs)\n\n @service.should_be_overridden\n def list_qos(self, search_opts=None):\n \"\"\"Get a list of all qos specs.\n\n :param search_opts: search options\n :rtype: list of :class: 'QoSpecs'\n \"\"\"\n return self._impl.list_qos(search_opts)\n\n @service.should_be_overridden\n def get_qos(self, qos_id):\n \"\"\"Get a specific qos specs.\n\n :param qos_id: The ID of the :class:`QoSSpecs` to get.\n :rtype: :class:`QoSSpecs`\n \"\"\"\n return self._impl.get_qos(qos_id)\n\n @service.should_be_overridden\n def set_qos(self, qos, set_specs_args):\n \"\"\"Add/Update keys in qos specs.\n\n :param qos: The instance of the :class:`QoSSpecs` to set\n :param set_specs_args: A dict of key/value pairs to be set\n :rtype: :class:`QoSSpecs`\n \"\"\"\n return self._impl.set_qos(qos=qos,\n set_specs_args=set_specs_args)\n\n @service.should_be_overridden\n def qos_associate_type(self, qos_specs, volume_type):\n \"\"\"Associate qos specs from volume type.\n\n :param qos_specs: The qos specs to be associated with\n :param volume_type: The volume type id to be associated with\n :rtype: :class:`QoSSpecs`\n \"\"\"\n return self._impl.qos_associate_type(qos_specs, volume_type)\n\n @service.should_be_overridden\n def qos_disassociate_type(self, qos_specs, volume_type):\n \"\"\"Disassociate qos specs from volume type.\n\n :param qos_specs: The qos specs to be associated with\n :param volume_type: The volume type id to be disassociated with\n :rtype: :class:`QoSSpecs`\n \"\"\"\n return self._impl.qos_disassociate_type(qos_specs, volume_type)\n\n @service.should_be_overridden\n def create_snapshot(self, volume_id, force=False,\n name=None, description=None, metadata=None):\n \"\"\"Create one snapshot.\n\n Returns when the snapshot is actually created and is in the \"Available\"\n state.\n\n :param volume_id: volume uuid for creating snapshot\n :param force: If force is True, create a snapshot even if the volume is\n attached to an instance. Default is False.\n :param name: Name of the snapshot\n :param description: Description of the snapshot\n :param metadata: Metadata of the snapshot\n :returns: Created snapshot object\n \"\"\"\n return self._impl.create_snapshot(\n volume_id, force=force, name=name,\n description=description, metadata=metadata)\n\n @service.should_be_overridden\n def delete_snapshot(self, snapshot):\n \"\"\"Delete the given snapshot.\n\n Returns when the snapshot is actually deleted.\n\n :param snapshot: snapshot instance\n \"\"\"\n self._impl.delete_snapshot(snapshot)\n\n @service.should_be_overridden\n def create_backup(self, volume_id, container=None,\n name=None, description=None,\n incremental=False, force=False,\n snapshot_id=None):\n \"\"\"Creates a volume backup.\n\n :param volume_id: The ID of the volume to backup.\n :param container: The name of the backup service container.\n :param name: The name of the backup.\n :param description: The description of the backup.\n :param incremental: Incremental backup.\n :param force: If True, allows an in-use volume to be backed up.\n :param snapshot_id: The ID of the snapshot to backup.\n\n :returns: The created backup object.\n \"\"\"\n return self._impl.create_backup(volume_id, container=container,\n name=name, description=description,\n incremental=incremental, force=force,\n snapshot_id=snapshot_id)\n\n @service.should_be_overridden\n def delete_backup(self, backup):\n \"\"\"Delete a volume backup.\"\"\"\n self._impl.delete_backup(backup)\n\n @service.should_be_overridden\n def restore_backup(self, backup_id, volume_id=None):\n \"\"\"Restore the given backup.\n\n :param backup_id: The ID of the backup to restore.\n :param volume_id: The ID of the volume to restore the backup to.\n\n :returns: Return the restored backup.\n \"\"\"\n return self._impl.restore_backup(backup_id, volume_id=volume_id)\n\n @service.should_be_overridden\n def list_backups(self, detailed=True):\n \"\"\"Return user volume backups list.\"\"\"\n return self._impl.list_backups(detailed=detailed)\n\n @service.should_be_overridden\n def list_transfers(self, detailed=True, search_opts=None):\n \"\"\"Get a list of all volume transfers.\n\n :param detailed: If True, detailed information about transfer\n should be listed\n :param search_opts: Search options to filter out volume transfers\n :returns: list of :class:`VolumeTransfer`\n \"\"\"\n return self._impl.list_transfers(detailed=detailed,\n search_opts=search_opts)\n\n @service.should_be_overridden\n def create_volume_type(self, name=None, description=None, is_public=True):\n \"\"\"Creates a volume type.\n\n :param name: Descriptive name of the volume type\n :param description: Description of the volume type\n :param is_public: Volume type visibility\n :returns: Return the created volume type.\n \"\"\"\n return self._impl.create_volume_type(name=name,\n description=description,\n is_public=is_public)\n\n @service.should_be_overridden\n def update_volume_type(self, volume_type, name=None,\n description=None, is_public=None):\n \"\"\"Update the name and/or description for a volume type.\n\n :param volume_type: The ID or an instance of the :class:`VolumeType`\n to update.\n :param name: if None, updates name by generating random name.\n else updates name with provided name\n :param description: Description of the volume type.\n :returns: Returns an updated volume type object.\n \"\"\"\n return self._impl.update_volume_type(\n volume_type=volume_type, name=name, description=description,\n is_public=is_public\n )\n\n @service.should_be_overridden\n def add_type_access(self, volume_type, project):\n \"\"\"Add a project to the given volume type access list.\n\n :param volume_type: Volume type name or ID to add access for the given\n project\n :param project: Project ID to add volume type access for\n :return: An instance of cinderclient.apiclient.base.TupleWithMeta\n \"\"\"\n return self._impl.add_type_access(\n volume_type=volume_type, project=project\n )\n\n @service.should_be_overridden\n def list_type_access(self, volume_type):\n \"\"\"Print access information about the given volume type\n\n :param volume_type: Filter results by volume type name or ID\n :return: VolumeTypeAccess of specific project\n \"\"\"\n return self._impl.list_type_access(volume_type)\n\n @service.should_be_overridden\n def get_volume_type(self, volume_type):\n \"\"\"get details of volume_type.\n\n :param volume_type: The ID of the :class:`VolumeType` to get\n :returns: :class:`VolumeType`\n \"\"\"\n return self._impl.get_volume_type(volume_type)\n\n @service.should_be_overridden\n def delete_volume_type(self, volume_type):\n \"\"\"delete a volume type.\n\n :param volume_type: Name or Id of the volume type\n :returns: base on client response return True if the request\n has been accepted or not\n \"\"\"\n return self._impl.delete_volume_type(volume_type)\n\n @service.should_be_overridden\n def set_volume_type_keys(self, volume_type, metadata):\n \"\"\"Set extra specs on a volume type.\n\n :param volume_type: The :class:`VolumeType` to set extra spec on\n :param metadata: A dict of key/value pairs to be set\n :returns: extra_specs if the request has been accepted\n \"\"\"\n return self._impl.set_volume_type_keys(volume_type, metadata)\n\n @service.should_be_overridden\n def transfer_create(self, volume_id, name=None):\n \"\"\"Creates a volume transfer.\n\n :param name: The name of created transfer\n :param volume_id: The ID of the volume to transfer.\n :returns: Return the created transfer.\n \"\"\"\n return self._impl.transfer_create(volume_id, name=name)\n\n @service.should_be_overridden\n def transfer_accept(self, transfer_id, auth_key):\n \"\"\"Accept a volume transfer.\n\n :param transfer_id: The ID of the transfer to accept.\n :param auth_key: The auth_key of the transfer.\n :returns: VolumeTransfer\n \"\"\"\n return self._impl.transfer_accept(transfer_id, auth_key=auth_key)\n\n @service.should_be_overridden\n def create_encryption_type(self, volume_type, specs):\n \"\"\"Create encryption type for a volume type. Default: admin only.\n\n :param volume_type: the volume type on which to add an encryption type\n :param specs: the encryption type specifications to add\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._impl.create_encryption_type(volume_type, specs=specs)\n\n @service.should_be_overridden\n def get_encryption_type(self, volume_type):\n \"\"\"Get the volume encryption type for the specified volume type.\n\n :param volume_type: the volume type to query\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._impl.get_encryption_type(volume_type)\n\n @service.should_be_overridden\n def list_encryption_type(self, search_opts=None):\n \"\"\"List all volume encryption types.\n\n :param search_opts: Options used when search for encryption types\n :return: a list of :class: VolumeEncryptionType instances\n \"\"\"\n return self._impl.list_encryption_type(search_opts=search_opts)\n\n @service.should_be_overridden\n def delete_encryption_type(self, volume_type):\n \"\"\"Delete the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n must be deleted\n \"\"\"\n self._impl.delete_encryption_type(volume_type)\n\n @service.should_be_overridden\n def update_encryption_type(self, volume_type, specs):\n \"\"\"Update the encryption type information for the specified volume type\n\n :param volume_type: the volume type whose encryption type information\n will be updated\n :param specs: the encryption type specifications to update\n :return: an instance of :class: VolumeEncryptionType\n \"\"\"\n return self._impl.update_encryption_type(volume_type, specs=specs)\n" }, { "alpha_fraction": 0.6673004031181335, "alphanum_fraction": 0.6768060922622681, "avg_line_length": 28.22222137451172, "blob_id": "c6e62e9315d54785f18fb86fe07fa4fc951d5c0a", "content_id": "9ff3da20ecde33c4b8261fa0fce0b9fc2425b573", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 526, "license_type": "permissive", "max_line_length": 95, "num_lines": 18, "path": "/tests/ci/rally_functional_job.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nLOCAL_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\nDB_CONNECTION=\"$(rally db show)\"\n\nif [[ $DB_CONNECTION == sqlite* ]]; then\n CONCURRENCY=0\nelse\n # in case of not sqlite db backends we cannot launch tests in parallel due\n # to possible conflicts\n CONCURRENCY=1\n # currently, RCI_KEEP_DB variable is used to not create new databases per\n # each test\n export RCI_KEEP_DB=1\nfi\n\npython $LOCAL_DIR/pytest_launcher.py \"tests/functional\" --concurrency $CONCURRENCY --posargs=$1\n" }, { "alpha_fraction": 0.5822968482971191, "alphanum_fraction": 0.5891549587249756, "avg_line_length": 35.45338821411133, "blob_id": "ccb0d04aff5cc40ebeaadc8ffa3fce59a74b96a8", "content_id": "379833a0402531b15e15b8b55b61a188f474cbf6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17206, "license_type": "permissive", "max_line_length": 79, "num_lines": 472, "path": "/tests/unit/task/scenarios/manila/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.scenarios.manila import utils\nfrom tests.unit import test\n\nBM_UTILS = \"rally.task.utils.\"\n\n\[email protected]\nclass ManilaScenarioTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(ManilaScenarioTestCase, self).setUp()\n self.scenario = utils.ManilaScenario(self.context)\n\n def test__create_share(self):\n fake_share = mock.Mock()\n self.clients(\"manila\").shares.create.return_value = fake_share\n self.scenario.context = {\n \"tenant\": {\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"share_networks\": [{\"id\": \"sn_1_id\"}, {\"id\": \"sn_2_id\"}],\n }\n },\n \"iteration\": 0,\n }\n fake_random_name = \"fake_random_name_value\"\n self.scenario.generate_random_name = mock.Mock(\n return_value=fake_random_name)\n\n self.scenario._create_share(\"nfs\")\n\n self.clients(\"manila\").shares.create.assert_called_once_with(\n \"nfs\", 1, name=fake_random_name,\n share_network=self.scenario.context[\"tenant\"][\n consts.SHARE_NETWORKS_CONTEXT_NAME][\"share_networks\"][0][\"id\"])\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_share,\n ready_statuses=[\"available\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=300, check_interval=3)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n @mock.patch(BM_UTILS + \"wait_for_status\")\n def test__delete_share(self, mock_wait_for_status):\n fake_share = mock.MagicMock()\n\n self.scenario._delete_share(fake_share)\n\n fake_share.delete.assert_called_once_with()\n mock_wait_for_status.assert_called_once_with(\n fake_share,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=180, check_interval=2)\n self.mock_get_from_manager.mock.assert_called_once_with(\n (\"error_deleting\", ))\n\n def test_export_location(self):\n fake_share = mock.MagicMock()\n fake_share.export_locations = \"fake_location\"\n result = self.scenario._export_location(fake_share)\n self.assertEqual(result, \"fake_location\")\n\n @ddt.data(\n {},\n {\"detailed\": False, \"search_opts\": None},\n {\"detailed\": True, \"search_opts\": {\"name\": \"foo_sn\"}},\n {\"search_opts\": {\"project_id\": \"fake_project\"}},\n )\n def test__list_shares(self, params):\n fake_shares = [\"foo\", \"bar\"]\n self.clients(\"manila\").shares.list.return_value = fake_shares\n\n result = self.scenario._list_shares(**params)\n\n self.assertEqual(fake_shares, result)\n self.clients(\"manila\").shares.list.assert_called_once_with(\n detailed=params.get(\"detailed\", True),\n search_opts=params.get(\"search_opts\"))\n\n @ddt.data(\n {\"new_size\": 5},\n {\"new_size\": 10}\n )\n def test__extend_share(self, new_size):\n fake_share = mock.MagicMock()\n\n self.scenario._extend_share(fake_share, new_size)\n\n self.clients(\"manila\").shares.extend.assert_called_once_with(\n fake_share, new_size)\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_share,\n ready_statuses=[\"available\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=300, check_interval=3)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n @ddt.data(\n {\"new_size\": 5},\n {\"new_size\": 10}\n )\n def test__shrink_share(self, new_size):\n fake_share = mock.MagicMock()\n\n self.scenario._shrink_share(fake_share, new_size)\n\n fake_share.shrink.assert_called_with(new_size)\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_share,\n ready_statuses=[\"available\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=300, check_interval=3)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n @ddt.data(\n {\n \"access_type\": \"ip\",\n \"access\": \"1.2.3.4\",\n \"access_level\": \"rw\",\n \"access_id\": \"foo\"\n },\n {\n \"access_type\": \"domain\",\n \"access\": \"4.3.2.1\",\n \"access_level\": \"ro\",\n \"access_id\": \"bar\"\n }\n )\n @ddt.unpack\n def test__allow_access_share(self, access_type, access, access_level,\n access_id):\n fake_allow_result = {\"id\": access_id}\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_update = mock.MagicMock()\n self.scenario._update_resource_in_allow_access_share = mock.MagicMock(\n return_value=fake_update)\n\n fake_share = mock.MagicMock()\n fake_share.allow.return_value = fake_allow_result\n fake_share.access_list.return_value = [fake_access]\n\n self.assertEqual(self.scenario._allow_access_share(\n fake_share, access_type, access, access_level), fake_allow_result)\n\n self.scenario._update_resource_in_allow_access_share \\\n .assert_called_with(fake_share, access_id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_access,\n ready_statuses=[\"active\"],\n update_resource=fake_update,\n check_interval=3.0,\n timeout=300.0)\n\n def test__get_access_from_share_with_no_access_in_share(self):\n access_id = \"foo\"\n fake_share = mock.MagicMock()\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_share.access_list.return_value = []\n\n self.assertRaises(exceptions.GetResourceNotFound,\n self.scenario._get_access_from_share,\n fake_share, access_id)\n\n def test__get_access_from_share(self):\n access_id = \"foo\"\n fake_share = mock.MagicMock()\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_share.access_list.return_value = [fake_access]\n\n access = self.scenario._get_access_from_share(fake_share, access_id)\n\n self.assertEqual(access, fake_access)\n\n def test__update_resource_in_allow_access_share(self):\n access_id = \"foo\"\n fake_share = mock.MagicMock()\n fake_resource = mock.MagicMock()\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_share.access_list.return_value = [fake_access]\n\n fn = self.scenario._update_resource_in_allow_access_share(\n fake_share, access_id)\n\n self.assertEqual(fn(fake_resource), fake_access)\n\n def test__deny_access_share(self):\n access_id = \"foo\"\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_update = mock.MagicMock()\n self.scenario._update_resource_in_deny_access_share = mock.MagicMock(\n return_value=fake_update)\n\n fake_share = mock.MagicMock()\n fake_share.access_list.return_value = [fake_access]\n\n self.scenario._deny_access_share(fake_share, access_id)\n\n self.scenario._update_resource_in_deny_access_share \\\n .assert_called_with(fake_share, access_id)\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_access,\n check_deletion=True,\n ready_statuses=[\"deleted\"],\n update_resource=fake_update,\n check_interval=2.0,\n timeout=180.0)\n\n def test__update_resource_in_deny_access_share(self):\n access_id = \"foo\"\n fake_share = mock.MagicMock()\n fake_resource = mock.MagicMock()\n fake_access = mock.MagicMock()\n fake_access.id = access_id\n fake_share.access_list.return_value = [fake_access]\n\n fn = self.scenario._update_resource_in_deny_access_share(\n fake_share, access_id)\n\n assert fn(fake_resource) == fake_access\n\n def test__update_resource_in_deny_access_share_with_deleted_resource(self):\n access_id = \"foo\"\n fake_share = mock.MagicMock()\n fake_resource = mock.MagicMock()\n fake_access = mock.MagicMock()\n fake_access.access_id = access_id\n fake_share.access_list.return_value = []\n\n fn = self.scenario._update_resource_in_deny_access_share(\n fake_share, access_id)\n\n self.assertRaises(exceptions.GetResourceNotFound,\n fn, fake_resource)\n\n def test__create_share_network(self):\n fake_sn = mock.Mock()\n self.scenario.generate_random_name = mock.Mock()\n self.clients(\"manila\").share_networks.create.return_value = fake_sn\n data = {\n \"neutron_net_id\": \"fake_neutron_net_id\",\n \"neutron_subnet_id\": \"fake_neutron_subnet_id\",\n \"nova_net_id\": \"fake_nova_net_id\",\n \"description\": \"fake_description\",\n }\n expected = dict(data)\n expected[\"name\"] = self.scenario.generate_random_name.return_value\n\n result = self.scenario._create_share_network(**data)\n\n self.assertEqual(fake_sn, result)\n self.clients(\"manila\").share_networks.create.assert_called_once_with(\n **expected)\n\n @mock.patch(BM_UTILS + \"wait_for_status\")\n def test__delete_share_network(self, mock_wait_for_status):\n fake_sn = mock.MagicMock()\n\n self.scenario._delete_share_network(fake_sn)\n\n fake_sn.delete.assert_called_once_with()\n mock_wait_for_status.assert_called_once_with(\n fake_sn,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=180, check_interval=2)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n @ddt.data(\n {\"detailed\": True, \"search_opts\": {\"name\": \"foo_sn\"}},\n {\"detailed\": False, \"search_opts\": None},\n {},\n {\"search_opts\": {\"project_id\": \"fake_project\"}},\n )\n def test__list_share_networks(self, params):\n fake_share_networks = [\"foo\", \"bar\"]\n self.clients(\"manila\").share_networks.list.return_value = (\n fake_share_networks)\n\n result = self.scenario._list_share_networks(**params)\n\n self.assertEqual(fake_share_networks, result)\n self.clients(\"manila\").share_networks.list.assert_called_once_with(\n detailed=params.get(\"detailed\", True),\n search_opts=params.get(\"search_opts\"))\n\n @ddt.data(\n {},\n {\"search_opts\": None},\n {\"search_opts\": {\"project_id\": \"fake_project\"}},\n )\n def test__list_share_servers(self, params):\n fake_share_servers = [\"foo\", \"bar\"]\n self.admin_clients(\"manila\").share_servers.list.return_value = (\n fake_share_servers)\n\n result = self.scenario._list_share_servers(**params)\n\n self.assertEqual(fake_share_servers, result)\n self.admin_clients(\n \"manila\").share_servers.list.assert_called_once_with(\n search_opts=params.get(\"search_opts\"))\n\n @ddt.data(\"ldap\", \"kerberos\", \"active_directory\")\n def test__create_security_service(self, ss_type):\n fake_ss = mock.Mock()\n self.clients(\"manila\").security_services.create.return_value = fake_ss\n self.scenario.generate_random_name = mock.Mock()\n data = {\n \"security_service_type\": ss_type,\n \"dns_ip\": \"fake_dns_ip\",\n \"server\": \"fake_server\",\n \"domain\": \"fake_domain\",\n \"user\": \"fake_user\",\n \"password\": \"fake_password\",\n \"description\": \"fake_description\",\n }\n expected = dict(data)\n expected[\"type\"] = expected.pop(\"security_service_type\")\n expected[\"name\"] = self.scenario.generate_random_name.return_value\n\n result = self.scenario._create_security_service(**data)\n\n self.assertEqual(fake_ss, result)\n self.clients(\n \"manila\").security_services.create.assert_called_once_with(\n **expected)\n\n @mock.patch(BM_UTILS + \"wait_for_status\")\n def test__delete_security_service(self, mock_wait_for_status):\n fake_ss = mock.MagicMock()\n\n self.scenario._delete_security_service(fake_ss)\n\n fake_ss.delete.assert_called_once_with()\n mock_wait_for_status.assert_called_once_with(\n fake_ss,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=180, check_interval=2)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n def test__add_security_service_to_share_network(self):\n fake_sn = mock.MagicMock()\n fake_ss = mock.MagicMock()\n\n result = self.scenario._add_security_service_to_share_network(\n share_network=fake_sn, security_service=fake_ss)\n\n self.assertEqual(\n self.clients(\n \"manila\").share_networks.add_security_service.return_value,\n result)\n self.clients(\n \"manila\").share_networks.add_security_service.assert_has_calls([\n mock.call(fake_sn, fake_ss)])\n\n @ddt.data(\n {\"key_min_length\": 5, \"key_max_length\": 4},\n {\"value_min_length\": 5, \"value_max_length\": 4},\n )\n def test__set_metadata_wrong_params(self, params):\n self.assertRaises(\n exceptions.InvalidArgumentsException,\n self.scenario._set_metadata,\n {\"id\": \"fake_share_id\"}, **params)\n\n @ddt.data(\n {},\n {\"sets\": 0, \"set_size\": 1},\n {\"sets\": 1, \"set_size\": 1},\n {\"sets\": 5, \"set_size\": 7},\n {\"sets\": 5, \"set_size\": 2},\n {\"key_min_length\": 1, \"key_max_length\": 1},\n {\"key_min_length\": 1, \"key_max_length\": 2},\n {\"key_min_length\": 256, \"key_max_length\": 256},\n {\"value_min_length\": 1, \"value_max_length\": 1},\n {\"value_min_length\": 1, \"value_max_length\": 2},\n {\"value_min_length\": 1024, \"value_max_length\": 1024},\n )\n def test__set_metadata(self, params):\n share = {\"id\": \"fake_share_id\"}\n sets = params.get(\"sets\", 1)\n set_size = params.get(\"set_size\", 1)\n gen_name_calls = sets * set_size * 2\n data = range(gen_name_calls)\n generator_data = iter(data)\n\n def fake_random_name(prefix=\"fake\", length=\"fake\"):\n return next(generator_data)\n\n scenario = self.scenario\n scenario.clients = mock.MagicMock()\n scenario._generate_random_part = mock.MagicMock(\n side_effect=fake_random_name)\n\n keys = scenario._set_metadata(share, **params)\n\n self.assertEqual(\n gen_name_calls,\n scenario._generate_random_part.call_count)\n self.assertEqual(\n params.get(\"sets\", 1),\n scenario.clients.return_value.shares.set_metadata.call_count)\n scenario.clients.return_value.shares.set_metadata.assert_has_calls([\n mock.call(\n share[\"id\"],\n dict([(j, j + 1) for j in data[\n i * set_size * 2: (i + 1) * set_size * 2: 2]])\n ) for i in range(sets)\n ])\n self.assertEqual([i for i in range(0, gen_name_calls, 2)], keys)\n\n @ddt.data(None, [], {\"fake_set\"}, {\"fake_key\": \"fake_value\"})\n def test__delete_metadata_wrong_params(self, keys):\n self.assertRaises(\n exceptions.InvalidArgumentsException,\n self.scenario._delete_metadata,\n \"fake_share\", keys=keys,\n )\n\n @ddt.data(\n {\"keys\": [i for i in range(30)]},\n {\"keys\": list(range(7)), \"delete_size\": 2},\n {\"keys\": list(range(7)), \"delete_size\": 3},\n {\"keys\": list(range(7)), \"delete_size\": 4},\n )\n def test__delete_metadata(self, params):\n share = {\"id\": \"fake_share_id\"}\n delete_size = params.get(\"delete_size\", 3)\n keys = params.get(\"keys\", [])\n scenario = self.scenario\n scenario.clients = mock.MagicMock()\n\n scenario._delete_metadata(share, **params)\n\n scenario.clients.return_value.shares.delete_metadata.assert_has_calls([\n mock.call(share[\"id\"], keys[i:i + delete_size])\n for i in range(0, len(keys), delete_size)\n ])\n" }, { "alpha_fraction": 0.5886562466621399, "alphanum_fraction": 0.5941060185432434, "avg_line_length": 45.13472366333008, "blob_id": "cc833f695abba8efb826395e6c3fa159997c9d1f", "content_id": "f63d4f671875e02d06700ec8dfbadaa4a7c3ef44", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48626, "license_type": "permissive", "max_line_length": 79, "num_lines": 1054, "path": "/tests/unit/common/test_osclients.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential as oscredential\nfrom rally_openstack.common import osclients\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nPATH = \"rally_openstack.common.osclients\"\n\n\[email protected](\"dummy\", supported_versions=(\"0.1\", \"1\"),\n default_service_type=\"bar\")\nclass DummyClient(osclients.OSClient):\n def create_client(self, *args, **kwargs):\n pass\n\n\nclass OSClientTestCaseUtils(object):\n\n def set_up_keystone_mocks(self):\n self.ksc_module = mock.MagicMock(__version__=\"2.0.0\")\n self.ksc_client = mock.MagicMock()\n self.ksa_identity_plugin = mock.MagicMock()\n self.ksa_password = mock.MagicMock(\n return_value=self.ksa_identity_plugin)\n self.ksa_identity = mock.MagicMock(Password=self.ksa_password)\n\n self.ksa_auth = mock.MagicMock()\n self.ksa_session = mock.MagicMock()\n self.patcher = mock.patch.dict(\"sys.modules\",\n {\"keystoneclient\": self.ksc_module,\n \"keystoneauth1\": self.ksa_auth})\n self.patcher.start()\n self.addCleanup(self.patcher.stop)\n self.ksc_module.client = self.ksc_client\n self.ksa_auth.identity = self.ksa_identity\n self.ksa_auth.session = self.ksa_session\n\n def make_auth_args(self):\n auth_kwargs = {\n \"auth_url\": \"http://auth_url/\", \"username\": \"user\",\n \"password\": \"password\", \"tenant_name\": \"tenant\",\n \"domain_name\": \"domain\", \"project_name\": \"project_name\",\n \"project_domain_name\": \"project_domain_name\",\n \"user_domain_name\": \"user_domain_name\",\n }\n kwargs = {\"https_insecure\": False, \"https_cacert\": None}\n kwargs.update(auth_kwargs)\n return auth_kwargs, kwargs\n\n\[email protected]\nclass OSClientTestCase(test.TestCase, OSClientTestCaseUtils):\n\n @ddt.data((0.1, True), (1, True), (\"0.1\", True), (\"1\", True),\n (0.2, False), (\"foo\", False))\n @ddt.unpack\n def test_validate_version(self, version, valid):\n if valid:\n DummyClient.validate_version(version)\n else:\n self.assertRaises(exceptions.ValidationError,\n DummyClient.validate_version, version)\n\n def test_choose_service_type(self):\n default_service_type = \"default_service_type\"\n\n @osclients.configure(self.id(),\n default_service_type=default_service_type)\n class FakeClient(osclients.OSClient):\n create_client = mock.MagicMock()\n\n fake_client = FakeClient({\"auth_url\": \"url\", \"username\": \"user\",\n \"password\": \"pass\"}, {})\n self.assertEqual(default_service_type,\n fake_client.choose_service_type())\n self.assertEqual(\"foo\",\n fake_client.choose_service_type(\"foo\"))\n\n @mock.patch(\"%s.Keystone.service_catalog\" % PATH)\n @ddt.data(\n {\"endpoint_type\": None, \"service_type\": None, \"region_name\": None},\n {\"endpoint_type\": \"et\", \"service_type\": \"st\", \"region_name\": \"rn\"}\n )\n @ddt.unpack\n def test__get_endpoint(self, mock_keystone_service_catalog, endpoint_type,\n service_type, region_name):\n credential = oscredential.OpenStackCredential(\n \"http://auth_url/v2.0\", \"user\", \"pass\",\n endpoint_type=endpoint_type,\n region_name=region_name)\n mock_choose_service_type = mock.MagicMock()\n osclient = osclients.OSClient(credential, mock.MagicMock())\n osclient.choose_service_type = mock_choose_service_type\n mock_url_for = mock_keystone_service_catalog.url_for\n self.assertEqual(mock_url_for.return_value,\n osclient._get_endpoint(service_type))\n call_args = {\n \"service_type\": mock_choose_service_type.return_value,\n \"region_name\": region_name}\n if endpoint_type:\n call_args[\"interface\"] = endpoint_type\n mock_url_for.assert_called_once_with(**call_args)\n mock_choose_service_type.assert_called_once_with(service_type)\n\n\nclass CachedTestCase(test.TestCase):\n\n def test_cached(self):\n clients = osclients.Clients({\"auth_url\": \"url\", \"username\": \"user\",\n \"password\": \"pass\"})\n\n @osclients.configure(self.id())\n class SomeClient(osclients.OSClient):\n pass\n\n fake_client = SomeClient(clients.credential, clients.cache)\n fake_client.create_client = mock.MagicMock()\n\n self.assertEqual({}, clients.cache)\n fake_client()\n self.assertEqual(\n {self.id(): fake_client.create_client.return_value},\n clients.cache)\n fake_client.create_client.assert_called_once_with()\n fake_client()\n fake_client.create_client.assert_called_once_with()\n fake_client(\"2\")\n self.assertEqual(\n {self.id(): fake_client.create_client.return_value,\n \"%s('2',)\" % self.id(): fake_client.create_client.return_value},\n clients.cache)\n clients.clear()\n self.assertEqual({}, clients.cache)\n\n\[email protected]\nclass TestCreateKeystoneClient(test.TestCase, OSClientTestCaseUtils):\n\n def setUp(self):\n super(TestCreateKeystoneClient, self).setUp()\n self.credential = oscredential.OpenStackCredential(\n \"http://auth_url/v2.0\", \"user\", \"pass\", \"tenant\")\n\n def test_create_client(self):\n # NOTE(bigjools): This is a very poor testing strategy as it\n # tightly couples the test implementation to the tested\n # function's implementation. Ideally, we'd use a fake keystone\n # but all that's happening here is that it's checking the right\n # parameters were passed to the various parts that create a\n # client. Hopefully one day we'll get a real fake from the\n # keystone guys.\n self.set_up_keystone_mocks()\n keystone = osclients.Keystone(self.credential, mock.MagicMock())\n keystone.get_session = mock.Mock(\n return_value=(self.ksa_session, self.ksa_identity_plugin,))\n client = keystone.create_client(version=3)\n\n kwargs_session = self.credential.to_dict()\n kwargs_session.update({\n \"auth_url\": \"http://auth_url/\",\n \"session\": self.ksa_session,\n \"timeout\": 180.0})\n keystone.get_session.assert_called_with()\n called_with = self.ksc_client.Client.call_args_list[0][1]\n self.assertEqual(\n {\"session\": self.ksa_session, \"timeout\": 180.0, \"version\": \"3\"},\n called_with)\n self.ksc_client.Client.assert_called_once_with(\n session=self.ksa_session, timeout=180.0, version=\"3\")\n self.assertIs(client, self.ksc_client.Client())\n\n def test_create_client_removes_url_path_if_version_specified(self):\n # If specifying a version on the client creation call, ensure\n # the auth_url is versionless and the version required is passed\n # into the Client() call.\n self.set_up_keystone_mocks()\n auth_kwargs, all_kwargs = self.make_auth_args()\n keystone = osclients.Keystone(\n self.credential, mock.MagicMock())\n keystone.get_session = mock.Mock(\n return_value=(self.ksa_session, self.ksa_identity_plugin,))\n client = keystone.create_client(version=\"3\")\n\n self.assertIs(client, self.ksc_client.Client())\n called_with = self.ksc_client.Client.call_args_list[0][1]\n self.assertEqual(\n {\"session\": self.ksa_session, \"timeout\": 180.0, \"version\": \"3\"},\n called_with)\n\n @ddt.data({\"original\": \"https://example.com/identity/foo/v3\",\n \"cropped\": \"https://example.com/identity/foo\"},\n {\"original\": \"https://example.com/identity/foo/v3/\",\n \"cropped\": \"https://example.com/identity/foo\"},\n {\"original\": \"https://example.com/identity/foo/v2.0\",\n \"cropped\": \"https://example.com/identity/foo\"},\n {\"original\": \"https://example.com/identity/foo/v2.0/\",\n \"cropped\": \"https://example.com/identity/foo\"},\n {\"original\": \"https://example.com/identity/foo\",\n \"cropped\": \"https://example.com/identity/foo\"})\n @ddt.unpack\n def test__remove_url_version(self, original, cropped):\n credential = oscredential.OpenStackCredential(\n original, \"user\", \"pass\", \"tenant\")\n keystone = osclients.Keystone(credential, {})\n self.assertEqual(cropped, keystone._remove_url_version())\n\n @ddt.data(\"http://auth_url/v2.0\", \"http://auth_url/v3\",\n \"http://auth_url/\", \"auth_url\")\n def test_keystone_get_session(self, auth_url):\n credential = oscredential.OpenStackCredential(\n auth_url, \"user\", \"pass\", \"tenant\")\n self.set_up_keystone_mocks()\n keystone = osclients.Keystone(credential, {})\n\n version_data = mock.Mock(return_value=[{\"version\": (1, 0)}])\n self.ksa_auth.discover.Discover.return_value = (\n mock.Mock(version_data=version_data))\n\n self.assertEqual((self.ksa_session.Session.return_value,\n self.ksa_identity_plugin),\n keystone.get_session())\n if auth_url.endswith(\"v2.0\"):\n self.ksa_password.assert_called_once_with(\n auth_url=auth_url, password=\"pass\",\n tenant_name=\"tenant\", username=\"user\")\n else:\n self.ksa_password.assert_called_once_with(\n auth_url=auth_url, password=\"pass\",\n tenant_name=\"tenant\", username=\"user\",\n domain_name=None, project_domain_name=None,\n user_domain_name=None)\n self.assertEqual(\n [mock.call(timeout=180.0, verify=True, cert=None),\n mock.call(auth=self.ksa_identity_plugin, timeout=180.0,\n verify=True, cert=None)],\n self.ksa_session.Session.call_args_list\n )\n\n def test_keystone_property(self):\n keystone = osclients.Keystone(self.credential, None)\n self.assertRaises(exceptions.RallyException, lambda: keystone.keystone)\n\n @mock.patch(\"%s.Keystone.get_session\" % PATH)\n def test_auth_ref(self, mock_keystone_get_session):\n session = mock.MagicMock()\n auth_plugin = mock.MagicMock()\n mock_keystone_get_session.return_value = (session, auth_plugin)\n cache = {}\n keystone = osclients.Keystone(self.credential, cache)\n\n self.assertEqual(auth_plugin.get_access.return_value,\n keystone.auth_ref)\n self.assertEqual(auth_plugin.get_access.return_value,\n cache[\"keystone_auth_ref\"])\n\n # check that auth_ref was cached.\n keystone.auth_ref\n mock_keystone_get_session.assert_called_once_with()\n\n @mock.patch(\"%s.LOG.exception\" % PATH)\n @mock.patch(\"%s.logging.is_debug\" % PATH)\n def test_auth_ref_fails(self, mock_is_debug, mock_log_exception):\n mock_is_debug.return_value = False\n keystone = osclients.Keystone(self.credential, {})\n session = mock.Mock()\n auth_plugin = mock.Mock()\n auth_plugin.get_access.side_effect = Exception\n keystone.get_session = mock.Mock(return_value=(session, auth_plugin))\n\n self.assertRaises(osclients.AuthenticationFailed,\n lambda: keystone.auth_ref)\n\n self.assertFalse(mock_log_exception.called)\n mock_is_debug.assert_called_once_with()\n auth_plugin.get_access.assert_called_once_with(session)\n\n @mock.patch(\"%s.LOG.exception\" % PATH)\n @mock.patch(\"%s.logging.is_debug\" % PATH)\n def test_auth_ref_fails_debug(self, mock_is_debug, mock_log_exception):\n mock_is_debug.return_value = True\n keystone = osclients.Keystone(self.credential, {})\n session = mock.Mock()\n auth_plugin = mock.Mock()\n auth_plugin.get_access.side_effect = Exception\n keystone.get_session = mock.Mock(return_value=(session, auth_plugin))\n\n self.assertRaises(osclients.AuthenticationFailed,\n lambda: keystone.auth_ref)\n\n mock_log_exception.assert_called_once_with(mock.ANY)\n mock_is_debug.assert_called_once_with()\n auth_plugin.get_access.assert_called_once_with(session)\n\n @mock.patch(\"%s.LOG.exception\" % PATH)\n @mock.patch(\"%s.logging.is_debug\" % PATH)\n def test_auth_ref_fails_debug_with_native_keystone_error(\n self, mock_is_debug, mock_log_exception):\n from keystoneauth1 import exceptions as ks_exc\n\n mock_is_debug.return_value = True\n keystone = osclients.Keystone(self.credential, {})\n session = mock.Mock()\n auth_plugin = mock.Mock()\n auth_plugin.get_access.side_effect = ks_exc.ConnectFailure(\"foo\")\n keystone.get_session = mock.Mock(return_value=(session, auth_plugin))\n\n self.assertRaises(osclients.AuthenticationFailed,\n lambda: keystone.auth_ref)\n\n self.assertFalse(mock_log_exception.called)\n mock_is_debug.assert_called_once_with()\n auth_plugin.get_access.assert_called_once_with(session)\n\n def test_authentication_failed_exception(self):\n from keystoneauth1 import exceptions as ks_exc\n\n original_e = KeyError(\"Oops\")\n e = osclients.AuthenticationFailed(\n url=\"https://example.com\", username=\"foo\", project=\"project\",\n error=original_e\n )\n self.assertEqual(\n \"Failed to authenticate to https://example.com for user 'foo' in \"\n \"project 'project': [KeyError] 'Oops'\",\n e.format_message())\n\n original_e = ks_exc.Unauthorized(\"The request you have made requires \"\n \"authentication.\", request_id=\"ID\")\n e = osclients.AuthenticationFailed(\n url=\"https://example.com\", username=\"foo\", project=\"project\",\n error=original_e\n )\n self.assertEqual(\n \"Failed to authenticate to https://example.com for user 'foo' in \"\n \"project 'project': The request you have made requires \"\n \"authentication.\",\n e.format_message())\n\n original_e = ks_exc.ConnectionError(\"Some user-friendly native error\")\n e = osclients.AuthenticationFailed(\n url=\"https://example.com\", username=\"foo\", project=\"project\",\n error=original_e\n )\n self.assertEqual(\"Some user-friendly native error\",\n e.format_message())\n\n original_e = ks_exc.ConnectionError(\n \"Unable to establish connection to https://example.com:500: \"\n \"HTTPSConnectionPool(host='example.com', port=500): Max retries \"\n \"exceeded with url: / (Caused by NewConnectionError('<urllib3.\"\n \"connection.VerifiedHTTPSConnection object at 0x7fb87a48e510>: \"\n \"Failed to establish a new connection: [Errno 101] Network \"\n \"is unreachable\")\n e = osclients.AuthenticationFailed(\n url=\"https://example.com\", username=\"foo\", project=\"project\",\n error=original_e\n )\n self.assertEqual(\n \"Unable to establish connection to https://example.com:500\",\n e.format_message())\n\n original_e = ks_exc.ConnectionError(\n \"Unable to establish connection to https://example.com:500: \"\n # another pool class\n \"HTTPConnectionPool(host='example.com', port=500): Max retries \"\n \"exceeded with url: / (Caused by NewConnectionError('<urllib3.\"\n \"connection.VerifiedHTTPSConnection object at 0x7fb87a48e510>: \"\n \"Failed to establish a new connection: [Errno 101] Network \"\n \"is unreachable\")\n e = osclients.AuthenticationFailed(\n url=\"https://example.com\", username=\"foo\", project=\"project\",\n error=original_e\n )\n self.assertEqual(\n \"Unable to establish connection to https://example.com:500\",\n e.format_message())\n\n\[email protected]\nclass OSClientsTestCase(test.TestCase):\n\n def setUp(self):\n super(OSClientsTestCase, self).setUp()\n self.credential = oscredential.OpenStackCredential(\n \"http://auth_url/v2.0\", \"user\", \"pass\", \"tenant\")\n self.clients = osclients.Clients(self.credential, {})\n\n self.fake_keystone = fakes.FakeKeystoneClient()\n\n keystone_patcher = mock.patch(\n \"%s.Keystone.create_client\" % PATH,\n return_value=self.fake_keystone)\n self.mock_create_keystone_client = keystone_patcher.start()\n\n self.auth_ref_patcher = mock.patch(\"%s.Keystone.auth_ref\" % PATH)\n self.auth_ref = self.auth_ref_patcher.start()\n\n self.service_catalog = self.auth_ref.service_catalog\n self.service_catalog.url_for = mock.MagicMock()\n\n def test_create_from_env(self):\n with mock.patch.dict(\"os.environ\",\n {\"OS_AUTH_URL\": \"foo_auth_url\",\n \"OS_USERNAME\": \"foo_username\",\n \"OS_PASSWORD\": \"foo_password\",\n \"OS_TENANT_NAME\": \"foo_tenant_name\",\n \"OS_REGION_NAME\": \"foo_region_name\"}):\n clients = osclients.Clients.create_from_env()\n\n self.assertEqual(\"foo_auth_url\", clients.credential.auth_url)\n self.assertEqual(\"foo_username\", clients.credential.username)\n self.assertEqual(\"foo_password\", clients.credential.password)\n self.assertEqual(\"foo_tenant_name\", clients.credential.tenant_name)\n self.assertEqual(\"foo_region_name\", clients.credential.region_name)\n\n def test_keystone(self):\n self.assertNotIn(\"keystone\", self.clients.cache)\n client = self.clients.keystone()\n self.assertEqual(self.fake_keystone, client)\n credential = {\"timeout\": cfg.CONF.openstack_client_http_timeout,\n \"insecure\": False, \"cacert\": None}\n kwargs = self.credential.to_dict()\n kwargs.update(credential)\n self.mock_create_keystone_client.assert_called_once_with()\n self.assertEqual(self.fake_keystone, self.clients.cache[\"keystone\"])\n\n def test_keystone_versions(self):\n self.clients.keystone.validate_version(2)\n self.clients.keystone.validate_version(3)\n\n def test_keysonte_service_type(self):\n self.assertRaises(exceptions.RallyException,\n self.clients.keystone.is_service_type_configurable)\n\n def test_verified_keystone(self):\n self.auth_ref.role_names = [\"admin\"]\n self.assertEqual(self.mock_create_keystone_client.return_value,\n self.clients.verified_keystone())\n\n def test_verified_keystone_user_not_admin(self):\n self.auth_ref.role_names = [\"notadmin\"]\n self.assertRaises(exceptions.InvalidAdminException,\n self.clients.verified_keystone)\n\n @mock.patch(\"%s.Keystone.get_session\" % PATH)\n def test_verified_keystone_authentication_fails(self,\n mock_keystone_get_session):\n self.auth_ref_patcher.stop()\n mock_keystone_get_session.side_effect = (\n exceptions.AuthenticationFailed(\n username=self.credential.username,\n project=self.credential.tenant_name,\n url=self.credential.auth_url,\n etype=KeyError,\n error=\"oops\")\n )\n self.assertRaises(exceptions.AuthenticationFailed,\n self.clients.verified_keystone)\n\n @mock.patch(\"%s.Nova._get_endpoint\" % PATH)\n def test_nova(self, mock_nova__get_endpoint):\n fake_nova = fakes.FakeNovaClient()\n mock_nova__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_nova = mock.MagicMock()\n mock_nova.client.Client.return_value = fake_nova\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"nova\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"novaclient\": mock_nova,\n \"keystoneauth1\": mock_keystoneauth1}):\n mock_keystoneauth1.discover.Discover.return_value = (\n mock.Mock(version_data=mock.Mock(return_value=[\n {\"version\": (2, 0)}]))\n )\n client = self.clients.nova()\n self.assertEqual(fake_nova, client)\n kw = {\n \"version\": \"2\",\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_nova__get_endpoint.return_value}\n mock_nova.client.Client.assert_called_once_with(**kw)\n self.assertEqual(fake_nova, self.clients.cache[\"nova\"])\n\n def test_nova_validate_version(self):\n osclients.Nova.validate_version(\"2\")\n self.assertRaises(exceptions.RallyException,\n osclients.Nova.validate_version, \"foo\")\n\n def test_nova_service_type(self):\n self.clients.nova.is_service_type_configurable()\n\n @mock.patch(\"%s.Neutron._get_endpoint\" % PATH)\n def test_neutron(self, mock_neutron__get_endpoint):\n fake_neutron = fakes.FakeNeutronClient()\n mock_neutron__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_neutron = mock.MagicMock()\n mock_keystoneauth1 = mock.MagicMock()\n mock_neutron.client.Client.return_value = fake_neutron\n self.assertNotIn(\"neutron\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"neutronclient.neutron\": mock_neutron,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.neutron()\n self.assertEqual(fake_neutron, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_neutron__get_endpoint.return_value}\n mock_neutron.client.Client.assert_called_once_with(\"2.0\", **kw)\n self.assertEqual(fake_neutron, self.clients.cache[\"neutron\"])\n\n @mock.patch(\"%s.Neutron._get_endpoint\" % PATH)\n def test_neutron_endpoint_type(self, mock_neutron__get_endpoint):\n fake_neutron = fakes.FakeNeutronClient()\n mock_neutron__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_neutron = mock.MagicMock()\n mock_keystoneauth1 = mock.MagicMock()\n mock_neutron.client.Client.return_value = fake_neutron\n self.assertNotIn(\"neutron\", self.clients.cache)\n self.credential[\"endpoint_type\"] = \"internal\"\n with mock.patch.dict(\"sys.modules\",\n {\"neutronclient.neutron\": mock_neutron,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.neutron()\n self.assertEqual(fake_neutron, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_neutron__get_endpoint.return_value,\n \"endpoint_type\": \"internal\"}\n mock_neutron.client.Client.assert_called_once_with(\"2.0\", **kw)\n self.assertEqual(fake_neutron, self.clients.cache[\"neutron\"])\n\n @mock.patch(\"%s.Octavia._get_endpoint\" % PATH)\n def test_octavia(self, mock_octavia__get_endpoint):\n fake_octavia = fakes.FakeOctaviaClient()\n mock_octavia__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_octavia = mock.MagicMock()\n mock_keystoneauth1 = mock.MagicMock()\n mock_octavia.octavia.OctaviaAPI.return_value = fake_octavia\n self.assertNotIn(\"octavia\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"octaviaclient.api.v2\": mock_octavia,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.octavia()\n self.assertEqual(fake_octavia, client)\n kw = {\"endpoint\": mock_octavia__get_endpoint.return_value,\n \"session\": mock_keystoneauth1.session.Session()}\n mock_octavia.octavia.OctaviaAPI.assert_called_once_with(**kw)\n self.assertEqual(fake_octavia, self.clients.cache[\"octavia\"])\n\n @mock.patch(\"%s.Heat._get_endpoint\" % PATH)\n def test_heat(self, mock_heat__get_endpoint):\n fake_heat = fakes.FakeHeatClient()\n mock_heat__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_heat = mock.MagicMock()\n mock_keystoneauth1 = mock.MagicMock()\n mock_heat.client.Client.return_value = fake_heat\n self.assertNotIn(\"heat\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"heatclient\": mock_heat,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.heat()\n self.assertEqual(fake_heat, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_heat__get_endpoint.return_value}\n mock_heat.client.Client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(fake_heat, self.clients.cache[\"heat\"])\n\n @mock.patch(\"%s.Heat._get_endpoint\" % PATH)\n def test_heat_endpoint_type_interface(self, mock_heat__get_endpoint):\n fake_heat = fakes.FakeHeatClient()\n mock_heat__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_heat = mock.MagicMock()\n mock_keystoneauth1 = mock.MagicMock()\n mock_heat.client.Client.return_value = fake_heat\n self.assertNotIn(\"heat\", self.clients.cache)\n self.credential[\"endpoint_type\"] = \"internal\"\n with mock.patch.dict(\"sys.modules\",\n {\"heatclient\": mock_heat,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.heat()\n self.assertEqual(fake_heat, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_heat__get_endpoint.return_value,\n \"interface\": \"internal\"}\n mock_heat.client.Client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(fake_heat, self.clients.cache[\"heat\"])\n\n @mock.patch(\"%s.Glance._get_endpoint\" % PATH)\n def test_glance(self, mock_glance__get_endpoint):\n fake_glance = fakes.FakeGlanceClient()\n mock_glance = mock.MagicMock()\n mock_glance__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n mock_glance.Client = mock.MagicMock(return_value=fake_glance)\n with mock.patch.dict(\"sys.modules\",\n {\"glanceclient\": mock_glance,\n \"keystoneauth1\": mock_keystoneauth1}):\n self.assertNotIn(\"glance\", self.clients.cache)\n client = self.clients.glance()\n self.assertEqual(fake_glance, client)\n kw = {\n \"version\": \"2\",\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_glance__get_endpoint.return_value}\n mock_glance.Client.assert_called_once_with(**kw)\n self.assertEqual(fake_glance, self.clients.cache[\"glance\"])\n\n @mock.patch(\"%s.Cinder._get_endpoint\" % PATH)\n def test_cinder(self, mock_cinder__get_endpoint):\n fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient())\n mock_cinder = mock.MagicMock()\n mock_cinder.client.Client.return_value = fake_cinder\n mock_cinder__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"cinder\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"cinderclient\": mock_cinder,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.cinder()\n self.assertEqual(fake_cinder, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint_override\": mock_cinder__get_endpoint.return_value}\n mock_cinder.client.Client.assert_called_once_with(\n \"3\", **kw)\n self.assertEqual(fake_cinder, self.clients.cache[\"cinder\"])\n\n @mock.patch(\"%s.Manila._get_endpoint\" % PATH)\n def test_manila(self, mock_manila__get_endpoint):\n mock_manila = mock.MagicMock()\n mock_manila__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"manila\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"manilaclient\": mock_manila,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.manila()\n self.assertEqual(mock_manila.client.Client.return_value, client)\n kw = {\n \"insecure\": False,\n \"session\": mock_keystoneauth1.session.Session(),\n \"service_catalog_url\": mock_manila__get_endpoint.return_value\n }\n mock_manila.client.Client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(\n mock_manila.client.Client.return_value,\n self.clients.cache[\"manila\"])\n\n def test_manila_validate_version(self):\n osclients.Manila.validate_version(\"2.0\")\n osclients.Manila.validate_version(\"2.32\")\n self.assertRaises(exceptions.RallyException,\n osclients.Manila.validate_version, \"foo\")\n\n def test_gnocchi(self):\n fake_gnocchi = fakes.FakeGnocchiClient()\n mock_gnocchi = mock.MagicMock()\n mock_gnocchi.client.Client.return_value = fake_gnocchi\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"gnocchi\", self.clients.cache)\n self.credential[\"endpoint_type\"] = \"internal\"\n with mock.patch.dict(\"sys.modules\",\n {\"gnocchiclient\": mock_gnocchi,\n \"keystoneauth1\": mock_keystoneauth1}):\n mock_keystoneauth1.discover.Discover.return_value = (\n mock.Mock(version_data=mock.Mock(return_value=[\n {\"version\": (1, 0)}]))\n )\n client = self.clients.gnocchi()\n\n self.assertEqual(fake_gnocchi, client)\n kw = {\"version\": \"1\",\n \"session\": mock_keystoneauth1.session.Session(),\n \"adapter_options\": {\"service_type\": \"metric\",\n \"interface\": \"internal\"}}\n mock_gnocchi.client.Client.assert_called_once_with(**kw)\n self.assertEqual(fake_gnocchi, self.clients.cache[\"gnocchi\"])\n\n def test_monasca(self):\n fake_monasca = fakes.FakeMonascaClient()\n mock_monasca = mock.MagicMock()\n mock_monasca.client.Client.return_value = fake_monasca\n self.assertNotIn(\"monasca\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"monascaclient\": mock_monasca}):\n client = self.clients.monasca()\n self.assertEqual(fake_monasca, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"monitoring\",\n region_name=self.credential.region_name)\n os_endpoint = self.service_catalog.url_for.return_value\n kw = {\"token\": self.auth_ref.auth_token,\n \"timeout\": cfg.CONF.openstack_client_http_timeout,\n \"insecure\": False, \"cacert\": None,\n \"username\": self.credential.username,\n \"password\": self.credential.password,\n \"tenant_name\": self.credential.tenant_name,\n \"auth_url\": self.credential.auth_url\n }\n mock_monasca.client.Client.assert_called_once_with(\"2_0\",\n os_endpoint,\n **kw)\n self.assertEqual(mock_monasca.client.Client.return_value,\n self.clients.cache[\"monasca\"])\n\n @mock.patch(\"%s.Ironic._get_endpoint\" % PATH)\n def test_ironic(self, mock_ironic__get_endpoint):\n fake_ironic = fakes.FakeIronicClient()\n mock_ironic = mock.MagicMock()\n mock_ironic.client.get_client = mock.MagicMock(\n return_value=fake_ironic)\n mock_ironic__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"ironic\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"ironicclient\": mock_ironic,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.ironic()\n self.assertEqual(fake_ironic, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint\": mock_ironic__get_endpoint.return_value}\n mock_ironic.client.get_client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(fake_ironic, self.clients.cache[\"ironic\"])\n\n @mock.patch(\"%s.Sahara._get_endpoint\" % PATH)\n def test_sahara(self, mock_sahara__get_endpoint):\n fake_sahara = fakes.FakeSaharaClient()\n mock_sahara = mock.MagicMock()\n mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara)\n mock_sahara__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"sahara\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"saharaclient\": mock_sahara,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.sahara()\n self.assertEqual(fake_sahara, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"sahara_url\": mock_sahara__get_endpoint.return_value}\n mock_sahara.client.Client.assert_called_once_with(1.1, **kw)\n self.assertEqual(fake_sahara, self.clients.cache[\"sahara\"])\n\n def test_zaqar(self):\n fake_zaqar = fakes.FakeZaqarClient()\n mock_zaqar = mock.MagicMock()\n mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar)\n self.assertNotIn(\"zaqar\", self.clients.cache)\n mock_keystoneauth1 = mock.MagicMock()\n with mock.patch.dict(\"sys.modules\", {\"zaqarclient.queues\":\n mock_zaqar,\n \"keystoneauth1\":\n mock_keystoneauth1}):\n client = self.clients.zaqar()\n self.assertEqual(fake_zaqar, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"messaging\",\n region_name=self.credential.region_name)\n fake_zaqar_url = self.service_catalog.url_for.return_value\n mock_zaqar.client.Client.assert_called_once_with(\n url=fake_zaqar_url, version=1.1,\n session=mock_keystoneauth1.session.Session())\n self.assertEqual(fake_zaqar, self.clients.cache[\"zaqar\"],\n mock_keystoneauth1.session.Session())\n\n @mock.patch(\"%s.Trove._get_endpoint\" % PATH)\n def test_trove(self, mock_trove__get_endpoint):\n fake_trove = fakes.FakeTroveClient()\n mock_trove = mock.MagicMock()\n mock_trove.client.Client = mock.MagicMock(return_value=fake_trove)\n mock_trove__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n self.assertNotIn(\"trove\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"troveclient\": mock_trove,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.trove()\n self.assertEqual(fake_trove, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint\": mock_trove__get_endpoint.return_value}\n mock_trove.client.Client.assert_called_once_with(\"1.0\", **kw)\n self.assertEqual(fake_trove, self.clients.cache[\"trove\"])\n\n def test_mistral(self):\n fake_mistral = fakes.FakeMistralClient()\n mock_mistral = mock.Mock()\n mock_mistral.client.client.return_value = fake_mistral\n\n self.assertNotIn(\"mistral\", self.clients.cache)\n with mock.patch.dict(\n \"sys.modules\", {\"mistralclient\": mock_mistral,\n \"mistralclient.api\": mock_mistral}):\n client = self.clients.mistral()\n self.assertEqual(fake_mistral, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"workflowv2\",\n region_name=self.credential.region_name\n )\n fake_mistral_url = self.service_catalog.url_for.return_value\n mock_mistral.client.client.assert_called_once_with(\n mistral_url=fake_mistral_url,\n service_type=\"workflowv2\",\n auth_token=self.auth_ref.auth_token\n )\n self.assertEqual(fake_mistral, self.clients.cache[\"mistral\"])\n\n def test_swift(self):\n fake_swift = fakes.FakeSwiftClient()\n mock_swift = mock.MagicMock()\n mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift)\n self.assertNotIn(\"swift\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\", {\"swiftclient\": mock_swift}):\n client = self.clients.swift()\n self.assertEqual(fake_swift, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"object-store\",\n region_name=self.credential.region_name)\n kw = {\"retries\": 1,\n \"preauthurl\": self.service_catalog.url_for.return_value,\n \"preauthtoken\": self.auth_ref.auth_token,\n \"insecure\": False,\n \"cacert\": None,\n \"user\": self.credential.username,\n \"tenant_name\": self.credential.tenant_name,\n }\n mock_swift.client.Connection.assert_called_once_with(**kw)\n self.assertEqual(fake_swift, self.clients.cache[\"swift\"])\n\n @mock.patch(\"%s.Keystone.service_catalog\" % PATH)\n def test_services(self, mock_keystone_service_catalog):\n available_services = {consts.ServiceType.IDENTITY: {},\n consts.ServiceType.COMPUTE: {},\n \"some_service\": {}}\n mock_get_endpoints = mock_keystone_service_catalog.get_endpoints\n mock_get_endpoints.return_value = available_services\n clients = osclients.Clients(self.credential)\n\n self.assertEqual(\n {consts.ServiceType.IDENTITY: consts.Service.KEYSTONE,\n consts.ServiceType.COMPUTE: consts.Service.NOVA,\n \"some_service\": \"__unknown__\"},\n clients.services())\n\n def test_murano(self):\n fake_murano = fakes.FakeMuranoClient()\n mock_murano = mock.Mock()\n mock_murano.client.Client.return_value = fake_murano\n self.assertNotIn(\"murano\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\", {\"muranoclient\": mock_murano}):\n client = self.clients.murano()\n self.assertEqual(fake_murano, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"application-catalog\",\n region_name=self.credential.region_name\n )\n kw = {\"endpoint\": self.service_catalog.url_for.return_value,\n \"token\": self.auth_ref.auth_token}\n mock_murano.client.Client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(fake_murano, self.clients.cache[\"murano\"])\n\n @mock.patch(\"%s.Keystone.get_session\" % PATH)\n @ddt.data(\n {},\n {\"version\": \"2\"},\n {\"version\": None}\n )\n @ddt.unpack\n def test_designate(self, mock_keystone_get_session, version=None):\n fake_designate = fakes.FakeDesignateClient()\n mock_designate = mock.Mock()\n mock_designate.client.Client.return_value = fake_designate\n\n mock_keystone_get_session.return_value = (\"fake_session\",\n \"fake_auth_plugin\")\n\n self.assertNotIn(\"designate\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"designateclient\": mock_designate}):\n if version is not None:\n client = self.clients.designate(version=version)\n else:\n client = self.clients.designate()\n self.assertEqual(fake_designate, client)\n self.service_catalog.url_for.assert_called_once_with(\n service_type=\"dns\",\n region_name=self.credential.region_name\n )\n\n default = version or \"2\"\n\n # Check that we append /v<version>\n url = self.service_catalog.url_for.return_value\n url.__iadd__.assert_called_once_with(\"/v%s\" % default)\n\n mock_keystone_get_session.assert_called_once_with()\n\n mock_designate.client.Client.assert_called_once_with(\n default,\n endpoint_override=url.__iadd__.return_value,\n session=\"fake_session\")\n\n key = \"designate\"\n if version is not None:\n key += \"%s\" % {\"version\": version}\n self.assertEqual(fake_designate, self.clients.cache[key])\n\n def test_senlin(self):\n mock_senlin = mock.MagicMock()\n self.assertNotIn(\"senlin\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\", {\"senlinclient\": mock_senlin}):\n client = self.clients.senlin()\n self.assertEqual(mock_senlin.client.Client.return_value, client)\n mock_senlin.client.Client.assert_called_once_with(\n \"1\",\n username=self.credential.username,\n password=self.credential.password,\n project_name=self.credential.tenant_name,\n cert=self.credential.cacert,\n auth_url=self.credential.auth_url)\n self.assertEqual(\n mock_senlin.client.Client.return_value,\n self.clients.cache[\"senlin\"])\n\n @mock.patch(\"%s.Magnum._get_endpoint\" % PATH)\n def test_magnum(self, mock_magnum__get_endpoint):\n fake_magnum = fakes.FakeMagnumClient()\n mock_magnum = mock.MagicMock()\n mock_magnum.client.Client.return_value = fake_magnum\n\n mock_magnum__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n\n self.assertNotIn(\"magnum\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"magnumclient\": mock_magnum,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.magnum()\n\n self.assertEqual(fake_magnum, client)\n kw = {\n \"interface\": self.credential.endpoint_type,\n \"session\": mock_keystoneauth1.session.Session(),\n \"magnum_url\": mock_magnum__get_endpoint.return_value}\n\n mock_magnum.client.Client.assert_called_once_with(**kw)\n self.assertEqual(fake_magnum, self.clients.cache[\"magnum\"])\n\n @mock.patch(\"%s.Watcher._get_endpoint\" % PATH)\n def test_watcher(self, mock_watcher__get_endpoint):\n fake_watcher = fakes.FakeWatcherClient()\n mock_watcher = mock.MagicMock()\n mock_watcher__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n mock_watcher.client.Client.return_value = fake_watcher\n self.assertNotIn(\"watcher\", self.clients.cache)\n with mock.patch.dict(\"sys.modules\",\n {\"watcherclient\": mock_watcher,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.watcher()\n\n self.assertEqual(fake_watcher, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"endpoint\": mock_watcher__get_endpoint.return_value}\n\n mock_watcher.client.Client.assert_called_once_with(\"1\", **kw)\n self.assertEqual(fake_watcher, self.clients.cache[\"watcher\"])\n\n @mock.patch(\"%s.Barbican._get_endpoint\" % PATH)\n def test_barbican(self, mock_barbican__get_endpoint):\n fake_barbican = fakes.FakeBarbicanClient()\n mock_barbican = mock.MagicMock()\n mock_barbican__get_endpoint.return_value = \"http://fake.to:2/fake\"\n mock_keystoneauth1 = mock.MagicMock()\n mock_barbican.client.Client.return_value = fake_barbican\n with mock.patch.dict(\"sys.modules\",\n {\"barbicanclient\": mock_barbican,\n \"keystoneauth1\": mock_keystoneauth1}):\n client = self.clients.barbican()\n\n self.assertEqual(fake_barbican, client)\n kw = {\n \"session\": mock_keystoneauth1.session.Session(),\n \"version\": \"v1\"\n }\n mock_barbican.client.Client.assert_called_once_with(**kw)\n self.assertEqual(fake_barbican, self.clients.cache[\"barbican\"])\n\n\nclass AuthenticationFailedTestCase(test.TestCase):\n def test_init(self):\n from keystoneauth1 import exceptions as ks_exc\n\n actual_exc = ks_exc.ConnectionError(\"Something\")\n exc = osclients.AuthenticationFailed(\n error=actual_exc, url=\"https://example.com\", username=\"user\",\n project=\"project\")\n # only original exc should be used\n self.assertEqual(\"Something\", exc.format_message())\n\n actual_exc = Exception(\"Something\")\n exc = osclients.AuthenticationFailed(\n error=actual_exc, url=\"https://example.com\", username=\"user\",\n project=\"project\")\n # additional info should be added\n self.assertEqual(\"Failed to authenticate to https://example.com for \"\n \"user 'user' in project 'project': \"\n \"[Exception] Something\", exc.format_message())\n\n # check cutting message\n actual_exc = ks_exc.DiscoveryFailure(\n \"Could not find versioned identity endpoints when attempting to \"\n \"authenticate. Please check that your auth_url is correct. \"\n \"Unable to establish connection to https://example.com: \"\n \"HTTPConnectionPool(host='example.com', port=80): Max retries \"\n \"exceeded with url: / (Caused by NewConnectionError('\"\n \"<urllib3.connection.HTTPConnection object at 0x7f32ab9809d0>: \"\n \"Failed to establish a new connection: [Errno -2] Name or service\"\n \" not known',))\")\n exc = osclients.AuthenticationFailed(\n error=actual_exc, url=\"https://example.com\", username=\"user\",\n project=\"project\")\n # original message should be simplified\n self.assertEqual(\n \"Could not find versioned identity endpoints when attempting to \"\n \"authenticate. Please check that your auth_url is correct. \"\n \"Unable to establish connection to https://example.com\",\n exc.format_message())\n" }, { "alpha_fraction": 0.6744566559791565, "alphanum_fraction": 0.6760925650596619, "avg_line_length": 41.790000915527344, "blob_id": "4cb8558b69851a52f2ea0e97059abbfe68f0fa4e", "content_id": "95e78f0f8ef70948c89cec77f97a936a30e45606", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8558, "license_type": "permissive", "max_line_length": 79, "num_lines": 200, "path": "/rally_openstack/task/scenarios/nova/aggregates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016 IBM Corp.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally import exceptions\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils\n\n\n\"\"\"Scenarios for Nova aggregates.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaAggregates.list_aggregates\",\n platform=\"openstack\")\nclass ListAggregates(utils.NovaScenario):\n\n def run(self):\n \"\"\"List all nova aggregates.\n\n Measure the \"nova aggregate-list\" command performance.\n \"\"\"\n self._list_aggregates()\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_and_list_aggregates\",\n platform=\"openstack\")\nclass CreateAndListAggregates(utils.NovaScenario):\n \"\"\"scenario for create and list aggregate.\"\"\"\n\n def run(self, availability_zone):\n \"\"\"Create a aggregate and then list all aggregates.\n\n This scenario creates a aggregate and then lists all aggregates.\n :param availability_zone: The availability zone of the aggregate\n \"\"\"\n aggregate = self._create_aggregate(availability_zone)\n msg = \"Aggregate isn't created\"\n self.assertTrue(aggregate, err_msg=msg)\n all_aggregates = self._list_aggregates()\n msg = (\"Created aggregate is not in the\"\n \" list of all available aggregates\")\n self.assertIn(aggregate, all_aggregates, err_msg=msg)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_and_delete_aggregate\",\n platform=\"openstack\")\nclass CreateAndDeleteAggregate(utils.NovaScenario):\n \"\"\"Scenario for create and delete aggregate.\"\"\"\n\n def run(self, availability_zone):\n \"\"\"Create an aggregate and then delete it.\n\n This scenario first creates an aggregate and then delete it.\n :param availability_zone: The availability zone of the aggregate\n \"\"\"\n aggregate = self._create_aggregate(availability_zone)\n self._delete_aggregate(aggregate)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_and_update_aggregate\",\n platform=\"openstack\")\nclass CreateAndUpdateAggregate(utils.NovaScenario):\n \"\"\"Scenario for create and update aggregate.\"\"\"\n\n def run(self, availability_zone):\n \"\"\"Create an aggregate and then update its name and availability_zone\n\n This scenario first creates an aggregate and then update its name and\n availability_zone\n :param availability_zone: The availability zone of the aggregate\n \"\"\"\n aggregate = self._create_aggregate(availability_zone)\n self._update_aggregate(aggregate)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_aggregate_add_and_remove_host\",\n platform=\"openstack\")\nclass CreateAggregateAddAndRemoveHost(utils.NovaScenario):\n \"\"\"Scenario for add a host to and remove the host from an aggregate.\"\"\"\n\n def run(self, availability_zone):\n \"\"\"Create an aggregate, add a host to and remove the host from it\n\n Measure \"nova aggregate-add-host\" and \"nova aggregate-remove-host\"\n command performance.\n :param availability_zone: The availability zone of the aggregate\n \"\"\"\n aggregate = self._create_aggregate(availability_zone)\n hosts = self._list_hypervisors()\n host_name = hosts[0].service[\"host\"]\n self._aggregate_add_host(aggregate, host_name)\n self._aggregate_remove_host(aggregate, host_name)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_and_get_aggregate_details\",\n platform=\"openstack\")\nclass CreateAndGetAggregateDetails(utils.NovaScenario):\n \"\"\"Scenario for create and get aggregate details.\"\"\"\n\n def run(self, availability_zone):\n \"\"\"Create an aggregate and then get its details.\n\n This scenario first creates an aggregate and then get details of it.\n :param availability_zone: The availability zone of the aggregate\n \"\"\"\n aggregate = self._create_aggregate(availability_zone)\n self._get_aggregate_details(aggregate)\n\n\[email protected](image={\"type\": \"glance_image\"})\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\n context={\"admin_cleanup@openstack\": [\"nova\"],\n \"cleanup@openstack\": [\"nova\"]},\n name=\"NovaAggregates.create_aggregate_add_host_and_boot_server\",\n platform=\"openstack\")\nclass CreateAggregateAddHostAndBootServer(utils.NovaScenario):\n \"\"\"Scenario to verify an aggregate.\"\"\"\n\n def run(self, image, metadata, availability_zone=None, ram=512, vcpus=1,\n disk=1, boot_server_kwargs=None):\n \"\"\"Scenario to create and verify an aggregate\n\n This scenario creates an aggregate, adds a compute host and metadata\n to the aggregate, adds the same metadata to the flavor and creates an\n instance. Verifies that instance host is one of the hosts in the\n aggregate.\n\n :param image: The image ID to boot from\n :param metadata: The metadata to be set as flavor extra specs\n :param availability_zone: The availability zone of the aggregate\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param boot_server_kwargs: Optional additional arguments to verify host\n aggregates\n :raises RallyException: if instance and aggregate hosts do not match\n \"\"\"\n\n boot_server_kwargs = boot_server_kwargs or {}\n\n aggregate = self._create_aggregate(availability_zone)\n hosts = self._list_hypervisors()\n\n host_name = None\n for i in range(len(hosts)):\n if hosts[i].state == \"up\" and hosts[i].status == \"enabled\":\n host_name = hosts[i].service[\"host\"]\n break\n if not host_name:\n raise exceptions.RallyException(\"Could not find an available host\")\n\n self._aggregate_set_metadata(aggregate, metadata)\n self._aggregate_add_host(aggregate, host_name)\n flavor = self._create_flavor(ram, vcpus, disk)\n flavor.set_keys(metadata)\n\n server = self._boot_server(image, flavor.id, **boot_server_kwargs)\n # NOTE: we need to get server object by admin user to obtain\n # \"hypervisor_hostname\" attribute\n server = self.admin_clients(\"nova\").servers.get(server.id)\n instance_hostname = getattr(server,\n \"OS-EXT-SRV-ATTR:hypervisor_hostname\")\n if instance_hostname != host_name:\n raise exceptions.RallyException(\"Instance host and aggregate \"\n \"host are different\")\n" }, { "alpha_fraction": 0.6728590726852417, "alphanum_fraction": 0.6782963275909424, "avg_line_length": 43.13999938964844, "blob_id": "dca705adb9150bde5d5c2ff75a9b0caeb5ed183c", "content_id": "a848d07bd3dbbdaf73f2b6de0329bf765a15820f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2207, "license_type": "permissive", "max_line_length": 79, "num_lines": 50, "path": "/rally_openstack/task/scenarios/senlin/clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.senlin import utils\n\n\n\"\"\"Scenarios for Senlin clusters.\"\"\"\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\"required_services\", services=[consts.Service.SENLIN])\[email protected](\"required_contexts\", contexts=(\"profiles\"))\[email protected](context={\"admin_cleanup@openstack\": [\"senlin\"]},\n name=\"SenlinClusters.create_and_delete_cluster\",\n platform=\"openstack\")\nclass CreateAndDeleteCluster(utils.SenlinScenario):\n\n def run(self, desired_capacity=0, min_size=0,\n max_size=-1, timeout=3600, metadata=None):\n \"\"\"Create a cluster and then delete it.\n\n Measure the \"senlin cluster-create\" and \"senlin cluster-delete\"\n commands performance.\n\n :param desired_capacity: The capacity or initial number of nodes\n owned by the cluster\n :param min_size: The minimum number of nodes owned by the cluster\n :param max_size: The maximum number of nodes owned by the cluster.\n -1 means no limit\n :param timeout: The timeout value in seconds for cluster creation\n :param metadata: A set of key value pairs to associate with the cluster\n \"\"\"\n\n profile_id = self.context[\"tenant\"][\"profile\"]\n cluster = self._create_cluster(profile_id, desired_capacity,\n min_size, max_size, timeout, metadata)\n self._delete_cluster(cluster)\n" }, { "alpha_fraction": 0.6575935482978821, "alphanum_fraction": 0.6600680351257324, "avg_line_length": 38.42683029174805, "blob_id": "0292d059f85b1e71fe34991ea6bdfdca51fa066d", "content_id": "78b94909c7e8fba3b473ae051c2af663a9889e08", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3233, "license_type": "permissive", "max_line_length": 78, "num_lines": 82, "path": "/tests/unit/task/scenarios/barbican/test_orders.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.barbican import orders\nfrom tests.unit import test\n\n\nclass BarbicanOrdersTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(BarbicanOrdersTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(BarbicanOrdersTestCase, self).setUp()\n m = \"rally_openstack.common.services.key_manager.barbican\"\n patch = mock.patch(\"%s.BarbicanService\" % m)\n self.addCleanup(patch.stop)\n self.mock_secrets = patch.start()\n\n def test_list_orders(self):\n barbican_service = self.mock_secrets.return_value\n scenario = orders.BarbicanOrdersList(self.context)\n scenario.run()\n barbican_service.orders_list.assert_called_once_with()\n\n def test_key_create_and_delete(self):\n keys = {\"order_ref\": \"fake-key\"}\n barbican_service = self.mock_secrets.return_value\n scenario = orders.BarbicanOrdersCreateKeyAndDelete(self.context)\n scenario.run()\n keys = barbican_service.create_key.return_value\n barbican_service.create_key.assert_called_once_with()\n barbican_service.orders_delete.assert_called_once_with(\n keys.order_ref)\n\n def test_certificate_create_and_delete(self):\n certificate = {\"order_ref\": \"fake-certificate\"}\n barbican_service = self.mock_secrets.return_value\n scenario = orders.BarbicanOrdersCreateCertificateAndDelete(\n self.context)\n scenario.run()\n certificate = barbican_service.create_certificate.return_value\n barbican_service.create_certificate.assert_called_once_with()\n barbican_service.orders_delete.assert_called_once_with(\n certificate.order_ref)\n\n def test_asymmetric_create_and_delete(self):\n certificate = {\"order_ref\": \"fake-certificate\"}\n barbican_service = self.mock_secrets.return_value\n scenario = orders.BarbicanOrdersCreateAsymmetricAndDelete(\n self.context)\n scenario.run()\n certificate = barbican_service.create_asymmetric.return_value\n barbican_service.create_asymmetric.assert_called_once_with()\n barbican_service.orders_delete.assert_called_once_with(\n certificate.order_ref)\n" }, { "alpha_fraction": 0.6353557705879211, "alphanum_fraction": 0.6383793354034424, "avg_line_length": 43.693695068359375, "blob_id": "5e03f07d196e526177daa6a2c2f1ebeec72f792e", "content_id": "6cd8ae744aa8839da999ed6d9bee023afae6f3c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4961, "license_type": "permissive", "max_line_length": 79, "num_lines": 111, "path": "/rally_openstack/task/scenarios/elasticsearch/logging.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport requests\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import utils as commonutils\nfrom rally.task import atomic\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\"\"\"Scenario for Elasticsearch logging system.\"\"\"\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"ElasticsearchLogging.log_instance\",\n platform=\"openstack\")\nclass ElasticsearchLogInstanceName(nova_utils.NovaScenario):\n \"\"\"Test logging instance in conjunction with Elasticsearch system.\n\n Let OpenStack platform already has logging agent (for example, Filebeat),\n which sends nova logs to Elasticsearch through data processing pipeline\n (e.g. Logstash). The test verifies Openstack nova logs stored in logging\n system. It creates nova instance with random name and after instance\n becomes available, checks it's name in Elasticsearch indices by querying.\n \"\"\"\n\n @atomic.action_timer(\"elasticsearch.check_server_log_indexed\")\n def _check_server_name(self, server_id, logging_vip, elasticsearch_port,\n sleep_time, retries_total, additional_query=None):\n request_data = {\n \"query\": {\n \"bool\": {\n \"must\": [{\"match_phrase\": {\"Payload\": server_id}}]\n }\n }\n }\n if additional_query:\n request_data[\"query\"][\"bool\"].update(additional_query)\n\n LOG.info(\"Check server ID %s in elasticsearch\" % server_id)\n i = 0\n while i < retries_total:\n LOG.debug(\"Attempt number %s\" % (i + 1))\n resp = requests.get(\"http://%(ip)s:%(port)s/_search\" % {\n \"ip\": logging_vip, \"port\": elasticsearch_port},\n data=json.dumps(request_data))\n result = resp.json()\n if result[\"hits\"][\"total\"] < 1 and i + 1 >= retries_total:\n LOG.debug(\"No instance data found in Elasticsearch\")\n self.assertGreater(result[\"hits\"][\"total\"], 0)\n elif result[\"hits\"][\"total\"] < 1:\n i += 1\n commonutils.interruptable_sleep(sleep_time)\n else:\n LOG.debug(\"Instance data found in Elasticsearch\")\n self.assertGreater(result[\"hits\"][\"total\"], 0)\n break\n\n def run(self, image, flavor, logging_vip, elasticsearch_port, sleep_time=5,\n retries_total=30, boot_server_kwargs=None, force_delete=False,\n query_by_name=False, additional_query=None):\n \"\"\"Create nova instance and check it indexed in elasticsearch.\n\n :param image: image for server\n :param flavor: flavor for server\n :param logging_vip: logging system IP to check server name in\n elasticsearch index\n :param boot_server_kwargs: special server kwargs for boot\n :param force_delete: force delete server or not\n :param elasticsearch_port: elasticsearch port to use for check server\n :param additional_query: map of additional arguments for scenario\n elasticsearch query to check nova info in els index.\n :param query_by_name: query nova server by name if True otherwise by id\n :param sleep_time: sleep time in seconds between elasticsearch request\n :param retries_total: total number of retries to check server name in\n elasticsearch\n \"\"\"\n server = self._boot_server(image, flavor, **(boot_server_kwargs or {}))\n if query_by_name:\n server_id = server.name\n else:\n server_id = server.id\n self._check_server_name(server_id, logging_vip, elasticsearch_port,\n sleep_time, retries_total,\n additional_query=additional_query)\n self._delete_server(server, force=force_delete)\n" }, { "alpha_fraction": 0.4666123688220978, "alphanum_fraction": 0.4873778522014618, "avg_line_length": 71.23529052734375, "blob_id": "8b426ea900d1667a2291944c0e7f603e4b45b3f2", "content_id": "e4123ac49a13b34e1b6a96e6dd9e81c29e72d037", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2456, "license_type": "permissive", "max_line_length": 84, "num_lines": 34, "path": "/requirements.txt", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# of appearance. Changing the order has an impact on the overall integration\n# process, which may cause wedges in the gate later.\n\nrequests!=2.20.0,!=2.24.0 # Apache License, Version 2.0\n\nrally>=3.4.0 # Apache License, Version 2.0\n\n# OpenStack related\ngnocchiclient # Apache Software License\nkeystoneauth1 # Apache Software License\nkubernetes # Apache License Version 2.0\nos-faults>=0.2.0 # Apache Software License\nosprofiler # Apache Software License\npython-barbicanclient # Apache Software License\npython-cinderclient!=4.0.0 # Apache Software License\npython-designateclient # Apache License, Version 2.0\npython-heatclient # Apache Software License\npython-glanceclient # Apache License, Version 2.0\npython-ironicclient!=2.5.2,!=2.7.1,!=3.0.0 # Apache Software License\npython-keystoneclient!=2.1.0 # Apache Software License\npython-magnumclient # Apache Software License\npython-manilaclient # Apache Software License\npython-mistralclient!=3.2.0 # Apache Software License\npython-muranoclient # Apache License, Version 2.0\npython-monascaclient # Apache Software License\npython-neutronclient # Apache Software License\npython-novaclient # Apache License, Version 2.0\npython-octaviaclient # Apache Software License\npython-saharaclient # Apache License, Version 2.0\npython-senlinclient # Apache Software License\npython-swiftclient # Apache License, Version 2.0\npython-troveclient # Apache Software License\npython-watcherclient # Apache Software License\npython-zaqarclient # Apache Software License\n" }, { "alpha_fraction": 0.5021283626556396, "alphanum_fraction": 0.5085134506225586, "avg_line_length": 34.511627197265625, "blob_id": "b939460adc96d54e03ebcc4bfea63633c3be3f70", "content_id": "ce084edd8b1c04cb4ae41f805b890deab6861410", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6108, "license_type": "permissive", "max_line_length": 79, "num_lines": 172, "path": "/tests/unit/task/contexts/nova/test_servers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport copy\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.nova import servers\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.nova\"\nSCN = \"rally_openstack.task.scenarios\"\nTYP = \"rally_openstack.task.types\"\n\n\nclass ServerGeneratorTestCase(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n return tenants\n\n def test_init(self):\n tenants_count = 2\n servers_per_tenant = 5\n self.context.update({\n \"config\": {\n \"servers\": {\n \"servers_per_tenant\": servers_per_tenant,\n }\n },\n \"tenants\": self._gen_tenants(tenants_count)})\n\n inst = servers.ServerGenerator(self.context)\n self.assertEqual({\"auto_assign_nic\": False, \"servers_per_tenant\": 5},\n inst.config)\n\n @mock.patch(\"%s.nova.utils.NovaScenario._boot_servers\" % SCN,\n return_value=[\n fakes.FakeServer(id=\"uuid\"),\n fakes.FakeServer(id=\"uuid\"),\n fakes.FakeServer(id=\"uuid\"),\n fakes.FakeServer(id=\"uuid\"),\n fakes.FakeServer(id=\"uuid\")\n ])\n @mock.patch(\"%s.GlanceImage\" % TYP)\n @mock.patch(\"%s.Flavor\" % TYP)\n def test_setup(self, mock_flavor, mock_glance_image,\n mock_nova_scenario__boot_servers):\n\n tenants_count = 2\n users_per_tenant = 5\n servers_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"servers\": {\n \"auto_assign_nic\": True,\n \"servers_per_tenant\": 5,\n \"image\": {\n \"name\": \"cirros-0.5.2-x86_64-uec\",\n },\n \"flavor\": {\n \"name\": \"m1.tiny\",\n },\n \"nics\": [\"foo\", \"bar\"]\n },\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n new_context = copy.deepcopy(self.context)\n for id_ in new_context[\"tenants\"]:\n new_context[\"tenants\"][id_].setdefault(\"servers\", [])\n for i in range(servers_per_tenant):\n new_context[\"tenants\"][id_][\"servers\"].append(\"uuid\")\n\n servers_ctx = servers.ServerGenerator(self.context)\n servers_ctx.setup()\n self.assertEqual(new_context, self.context)\n image_id = mock_glance_image.return_value.pre_process.return_value\n flavor_id = mock_flavor.return_value.pre_process.return_value\n servers_ctx_config = self.context[\"config\"][\"servers\"]\n expected_auto_nic = servers_ctx_config.get(\"auto_assign_nic\", False)\n expected_requests = servers_ctx_config.get(\"servers_per_tenant\", False)\n called_times = len(tenants)\n mock_calls = [mock.call(image_id, flavor_id,\n auto_assign_nic=expected_auto_nic,\n nics=[{\"net-id\": \"foo\"}, {\"net-id\": \"bar\"}],\n requests=expected_requests)\n for i in range(called_times)]\n mock_nova_scenario__boot_servers.assert_has_calls(mock_calls)\n\n @mock.patch(\"%s.servers.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n\n tenants_count = 2\n users_per_tenant = 5\n servers_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": \"credential\"})\n tenants[id_].setdefault(\"servers\", [])\n for j in range(servers_per_tenant):\n tenants[id_][\"servers\"].append(\"uuid\")\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"servers\": {\n \"servers_per_tenant\": 5,\n \"image\": {\n \"name\": \"cirros-0.5.2-x86_64-uec\",\n },\n \"flavor\": {\n \"name\": \"m1.tiny\",\n },\n },\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n servers_ctx = servers.ServerGenerator(self.context)\n servers_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"nova.servers\"],\n users=self.context[\"users\"],\n superclass=nova_utils.NovaScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.6217858791351318, "alphanum_fraction": 0.6320710778236389, "avg_line_length": 35.879310607910156, "blob_id": "f933ba9bf8f936598a842813bfc6447f93f75c60", "content_id": "6bcb7b44a0050637807fcedfa91939b75c5a1e60", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "permissive", "max_line_length": 78, "num_lines": 58, "path": "/tests/unit/task/contexts/quotas/test_neutron_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.quotas import neutron_quotas\nfrom tests.unit import test\n\n\nclass NeutronQuotasTestCase(test.TestCase):\n def setUp(self):\n super(NeutronQuotasTestCase, self).setUp()\n self.quotas = {\n \"network\": 20,\n \"subnet\": 20,\n \"port\": 100,\n \"router\": 20,\n \"floatingip\": 100,\n \"security_group\": 100,\n \"security_group_rule\": 100\n }\n\n def test_update(self):\n clients = mock.MagicMock()\n neutron_quo = neutron_quotas.NeutronQuotas(clients)\n tenant_id = mock.MagicMock()\n neutron_quo.update(tenant_id, **self.quotas)\n body = {\"quota\": self.quotas}\n clients.neutron().update_quota.assert_called_once_with(tenant_id,\n body=body)\n\n def test_delete(self):\n clients = mock.MagicMock()\n neutron_quo = neutron_quotas.NeutronQuotas(clients)\n tenant_id = mock.MagicMock()\n neutron_quo.delete(tenant_id)\n clients.neutron().delete_quota.assert_called_once_with(tenant_id)\n\n def test_get(self):\n tenant_id = \"tenant_id\"\n clients = mock.MagicMock()\n clients.neutron.return_value.show_quota.return_value = {\n \"quota\": self.quotas}\n neutron_quo = neutron_quotas.NeutronQuotas(clients)\n\n self.assertEqual(self.quotas, neutron_quo.get(tenant_id))\n clients.neutron().show_quota.assert_called_once_with(tenant_id)\n" }, { "alpha_fraction": 0.6063522696495056, "alphanum_fraction": 0.6102021336555481, "avg_line_length": 35.24418640136719, "blob_id": "344c4dc045266d1526e57bef28f70960d2f3490f", "content_id": "6dd227e3d698ca49184e479c57d3b7bd870f31ac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3117, "license_type": "permissive", "max_line_length": 79, "num_lines": 86, "path": "/tests/unit/task/contexts/cleanup/test_admin.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import utils\nfrom rally.task import context\n\nfrom rally_openstack.task.contexts.cleanup import admin\nfrom rally_openstack.task import scenario\nfrom tests.unit import test\n\n\nADMIN = \"rally_openstack.task.contexts.cleanup.admin\"\nBASE = \"rally_openstack.task.contexts.cleanup.base\"\n\n\[email protected]\nclass AdminCleanupTestCase(test.TestCase):\n\n @mock.patch(\"%s.manager\" % BASE)\n @ddt.data(([\"a\", \"b\"], True),\n ([\"a\", \"e\"], False),\n (3, False))\n @ddt.unpack\n def test_validate(self, config, valid, mock_manager):\n mock_manager.list_resource_names.return_value = {\"a\", \"b\", \"c\"}\n results = context.Context.validate(\n \"admin_cleanup\", None, None, config, allow_hidden=True)\n if valid:\n self.assertEqual([], results)\n else:\n self.assertGreater(len(results), 0)\n\n @mock.patch(\"rally.common.plugin.discover.itersubclasses\")\n @mock.patch(\"%s.manager.find_resource_managers\" % ADMIN,\n return_value=[mock.MagicMock(), mock.MagicMock()])\n @mock.patch(\"%s.manager.SeekAndDestroy\" % ADMIN)\n def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers,\n mock_itersubclasses):\n class ResourceClass(utils.RandomNameGeneratorMixin):\n pass\n\n mock_itersubclasses.return_value = [ResourceClass]\n\n ctx = {\n \"config\": {\"admin_cleanup\": [\"a\", \"b\"]},\n \"admin\": mock.MagicMock(),\n \"users\": mock.MagicMock(),\n \"task\": {\"uuid\": \"task_id\"}\n }\n\n admin_cleanup = admin.AdminCleanup(ctx)\n admin_cleanup.setup()\n admin_cleanup.cleanup()\n\n mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario)\n mock_find_resource_managers.assert_called_once_with((\"a\", \"b\"), True)\n mock_seek_and_destroy.assert_has_calls([\n mock.call(mock_find_resource_managers.return_value[0],\n ctx[\"admin\"],\n ctx[\"users\"],\n resource_classes=[ResourceClass],\n task_id=\"task_id\"),\n mock.call().exterminate(),\n mock.call(mock_find_resource_managers.return_value[1],\n ctx[\"admin\"],\n ctx[\"users\"],\n resource_classes=[ResourceClass],\n task_id=\"task_id\"),\n mock.call().exterminate()\n ])\n" }, { "alpha_fraction": 0.7320234179496765, "alphanum_fraction": 0.7353678941726685, "avg_line_length": 40.24137878417969, "blob_id": "039eb1e3bc73240d2e1102b40e5bf5e399ccbd99", "content_id": "9c122caf65515d9e3bde51da62fac365c2fc1a84", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2392, "license_type": "permissive", "max_line_length": 78, "num_lines": 58, "path": "/rally_openstack/common/cfg/opts.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.common.cfg import cinder\nfrom rally_openstack.common.cfg import glance\nfrom rally_openstack.common.cfg import heat\nfrom rally_openstack.common.cfg import ironic\nfrom rally_openstack.common.cfg import magnum\nfrom rally_openstack.common.cfg import manila\nfrom rally_openstack.common.cfg import mistral\nfrom rally_openstack.common.cfg import monasca\nfrom rally_openstack.common.cfg import murano\nfrom rally_openstack.common.cfg import neutron\nfrom rally_openstack.common.cfg import nova\nfrom rally_openstack.common.cfg import octavia\nfrom rally_openstack.common.cfg import osclients\nfrom rally_openstack.common.cfg import profiler\nfrom rally_openstack.common.cfg import sahara\nfrom rally_openstack.common.cfg import senlin\nfrom rally_openstack.common.cfg import vm\nfrom rally_openstack.common.cfg import watcher\n\nfrom rally_openstack.common.cfg import tempest\n\nfrom rally_openstack.common.cfg import keystone_roles\nfrom rally_openstack.common.cfg import keystone_users\n\nfrom rally_openstack.common.cfg import cleanup\n\nfrom rally_openstack.task.ui.charts import osprofilerchart\n\n\ndef list_opts():\n\n opts = {}\n for l_opts in (cinder.OPTS, heat.OPTS, ironic.OPTS, magnum.OPTS,\n manila.OPTS, mistral.OPTS, monasca.OPTS, murano.OPTS,\n nova.OPTS, osclients.OPTS, profiler.OPTS, sahara.OPTS,\n vm.OPTS, glance.OPTS, watcher.OPTS, tempest.OPTS,\n keystone_roles.OPTS, keystone_users.OPTS, cleanup.OPTS,\n senlin.OPTS, neutron.OPTS, octavia.OPTS,\n osprofilerchart.OPTS):\n for category, opt in l_opts.items():\n opts.setdefault(category, [])\n opts[category].extend(opt)\n return [(k, v) for k, v in opts.items()]\n" }, { "alpha_fraction": 0.5945847630500793, "alphanum_fraction": 0.5973452925682068, "avg_line_length": 45.96894454956055, "blob_id": "b75e65612469c76262be6050601aaef0e3276c85", "content_id": "d214d55eab2930035b1aefe26c6c947a63889527", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60496, "license_type": "permissive", "max_line_length": 79, "num_lines": 1288, "path": "/tests/unit/task/scenarios/nova/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.common import cfg\nfrom rally import exceptions as rally_exceptions\nfrom rally_openstack.task.scenarios.nova import utils\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nBM_UTILS = \"rally.task.utils\"\nNOVA_UTILS = \"rally_openstack.task.scenarios.nova.utils\"\nCONF = cfg.CONF\n\n\[email protected]\nclass NovaScenarioTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(NovaScenarioTestCase, self).setUp()\n self.server = mock.Mock()\n self.server1 = mock.Mock()\n self.volume = mock.Mock()\n self.floating_ip = mock.Mock()\n self.image = mock.Mock()\n self.context.update(\n {\"user\": {\"id\": \"fake_user_id\", \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake_tenant\"}})\n\n def _context_with_secgroup(self, secgroup):\n retval = {\"user\": {\"secgroup\": secgroup,\n \"credential\": mock.MagicMock()}}\n retval.update(self.context)\n return retval\n\n def test__list_servers(self):\n servers_list = []\n self.clients(\"nova\").servers.list.return_value = servers_list\n nova_scenario = utils.NovaScenario(self.context)\n return_servers_list = nova_scenario._list_servers(True)\n self.assertEqual(servers_list, return_servers_list)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_servers\")\n\n def test__pick_random_nic(self):\n context = {\"tenant\": {\"networks\": [{\"id\": \"net_id_1\"},\n {\"id\": \"net_id_2\"}]},\n \"iteration\": 0}\n nova_scenario = utils.NovaScenario(context=context)\n nic1 = nova_scenario._pick_random_nic()\n self.assertEqual(nic1, [{\"net-id\": \"net_id_1\"}])\n\n context[\"iteration\"] = 1\n nova_scenario = utils.NovaScenario(context=context)\n nic2 = nova_scenario._pick_random_nic()\n # balance to net 2\n self.assertEqual(nic2, [{\"net-id\": \"net_id_2\"}])\n\n context[\"iteration\"] = 2\n nova_scenario = utils.NovaScenario(context=context)\n nic3 = nova_scenario._pick_random_nic()\n # balance again, get net 1\n self.assertEqual(nic3, [{\"net-id\": \"net_id_1\"}])\n\n def test__get_network_id(self):\n networks = {\"networks\": [{\"name\": \"foo1\", \"id\": 1},\n {\"name\": \"foo2\", \"id\": 2}]}\n self.clients(\"neutron\").list_networks.return_value = networks\n scenario = utils.NovaScenario(self.context)\n self.assertEqual(1, scenario._get_network_id(\"foo1\"))\n self.assertEqual(2, scenario._get_network_id(\"foo2\"))\n self.clients(\"neutron\").list_networks.assert_called_once_with()\n self.assertRaises(rally_exceptions.NotFoundException,\n scenario._get_network_id, \"foo\")\n\n @ddt.data(\n {},\n {\"kwargs\": {\"auto_assign_nic\": True}},\n {\"kwargs\": {\"auto_assign_nic\": True, \"nics\": [{\"net-id\": \"baz_id\"}]}},\n {\"context\": {\"user\": {\"secgroup\": {\"name\": \"test\"}}}},\n {\"context\": {\"user\": {\"secgroup\": {\"name\": \"new8\"}}},\n \"kwargs\": {\"security_groups\": [\"test8\"]}},\n {\"context\": {\"user\": {\"secgroup\": {\"name\": \"test1\"}}},\n \"kwargs\": {\"security_groups\": [\"test1\"]}},\n {\"kwargs\": {\"auto_assign_nic\": False,\n \"nics\": [{\"net-name\": \"foo_name\"}]}}\n )\n @ddt.unpack\n def test__boot_server(self, context=None, kwargs=None):\n self.clients(\"nova\").servers.create.return_value = self.server\n\n if context is None:\n context = self.context\n context.setdefault(\"user\", {}).setdefault(\"credential\",\n mock.MagicMock())\n context.setdefault(\"config\", {})\n\n nova_scenario = utils.NovaScenario(context=context)\n nova_scenario.generate_random_name = mock.Mock()\n nova_scenario._pick_random_nic = mock.Mock(\n return_value=[{\"net-id\": \"foo\"}])\n nova_scenario._get_network_id = mock.Mock(return_value=\"foo\")\n\n if kwargs is None:\n kwargs = {}\n kwargs[\"fakearg\"] = \"fakearg\"\n return_server = nova_scenario._boot_server(\"image_id\", \"flavor_id\",\n **kwargs)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_boot_poll_interval,\n timeout=CONF.openstack.nova_server_boot_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self.assertEqual(self.mock_wait_for_status.mock.return_value,\n return_server)\n\n expected_kwargs = {\"fakearg\": \"fakearg\"}\n if \"nics\" in kwargs:\n expected_kwargs[\"nics\"] = kwargs[\"nics\"]\n elif \"auto_assign_nic\" in kwargs:\n expected_kwargs[\"nics\"] = (nova_scenario._pick_random_nic.\n return_value)\n\n expected_secgroups = set()\n if \"security_groups\" in kwargs:\n expected_secgroups.update(kwargs[\"security_groups\"])\n if \"secgroup\" in context[\"user\"]:\n expected_secgroups.add(context[\"user\"][\"secgroup\"][\"name\"])\n if expected_secgroups:\n expected_kwargs[\"security_groups\"] = list(expected_secgroups)\n\n self.clients(\"nova\").servers.create.assert_called_once_with(\n nova_scenario.generate_random_name.return_value,\n \"image_id\", \"flavor_id\", **expected_kwargs)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.boot_server\")\n\n def test__boot_server_with_network_exception(self):\n self.context.update({\"tenant\": {\"networks\": None}})\n\n self.clients(\"nova\").servers.create.return_value = self.server\n\n nova_scenario = utils.NovaScenario(\n context=self.context)\n self.assertRaises(TypeError, nova_scenario._boot_server,\n \"image_id\", \"flavor_id\",\n auto_assign_nic=True)\n\n def test__suspend_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._suspend_server(self.server)\n self.server.suspend.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"SUSPENDED\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_suspend_poll_interval,\n timeout=CONF.openstack.nova_server_suspend_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.suspend_server\")\n\n def test__resume_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._resume_server(self.server)\n self.server.resume.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_resume_poll_interval,\n timeout=CONF.openstack.nova_server_resume_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.resume_server\")\n\n def test__pause_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._pause_server(self.server)\n self.server.pause.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"PAUSED\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_pause_poll_interval,\n timeout=CONF.openstack.nova_server_pause_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.pause_server\")\n\n def test__unpause_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._unpause_server(self.server)\n self.server.unpause.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_unpause_poll_interval,\n timeout=CONF.openstack.nova_server_unpause_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.unpause_server\")\n\n def test__shelve_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._shelve_server(self.server)\n self.server.shelve.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_has_calls([\n mock.call(\n self.server,\n ready_statuses=[\"SHELVED_OFFLOADED\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_shelve_poll_interval,\n timeout=CONF.openstack.nova_server_shelve_timeout\n ),\n mock.call(\n self.server,\n ready_statuses=[\"None\"],\n status_attr=\"OS-EXT-STS:task_state\",\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_shelve_poll_interval,\n timeout=CONF.openstack.nova_server_shelve_timeout)]\n )\n self.assertEqual(2, self.mock_get_from_manager.mock.call_count)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.shelve_server\")\n\n def test__unshelve_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._unshelve_server(self.server)\n self.server.unshelve.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_unshelve_poll_interval,\n timeout=CONF.openstack.nova_server_unshelve_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.unshelve_server\")\n\n @mock.patch(\"rally_openstack.task.scenarios.nova.utils.image_service\")\n def test__create_image(self, mock_image_service):\n glance = mock_image_service.Image.return_value\n glance.get_image.return_value = self.image\n nova_scenario = utils.NovaScenario(context=self.context)\n return_image = nova_scenario._create_image(self.server)\n self.mock_wait_for_status.mock.assert_has_calls([\n mock.call(\n self.image,\n ready_statuses=[\"ACTIVE\"],\n update_resource=glance.get_image,\n check_interval=CONF.openstack.\n nova_server_image_create_poll_interval,\n timeout=CONF.openstack.nova_server_image_create_timeout),\n mock.call(\n self.server,\n ready_statuses=[\"None\"],\n status_attr=\"OS-EXT-STS:task_state\",\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.\n nova_server_image_create_poll_interval,\n timeout=CONF.openstack.nova_server_image_create_timeout)\n ])\n self.assertEqual(self.mock_wait_for_status.mock.return_value,\n return_image)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.snapshot_server\")\n\n def test__default_delete_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._delete_server(self.server)\n self.server.delete.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_delete_poll_interval,\n timeout=CONF.openstack.nova_server_delete_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_server\")\n\n def test__force_delete_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._delete_server(self.server, force=True)\n self.server.force_delete.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_delete_poll_interval,\n timeout=CONF.openstack.nova_server_delete_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.force_delete_server\")\n\n def test__reboot_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._reboot_server(self.server)\n self.server.reboot.assert_called_once_with(reboot_type=\"HARD\")\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_reboot_poll_interval,\n timeout=CONF.openstack.nova_server_reboot_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.reboot_server\")\n\n def test__soft_reboot_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._soft_reboot_server(self.server)\n self.server.reboot.assert_called_once_with(reboot_type=\"SOFT\")\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_reboot_poll_interval,\n timeout=CONF.openstack.nova_server_reboot_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.soft_reboot_server\")\n\n def test__rebuild_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._rebuild_server(self.server, \"img\", fakearg=\"fakearg\")\n self.server.rebuild.assert_called_once_with(\"img\", fakearg=\"fakearg\")\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_rebuild_poll_interval,\n timeout=CONF.openstack.nova_server_rebuild_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.rebuild_server\")\n\n def test__start_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._start_server(self.server)\n self.server.start.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_start_poll_interval,\n timeout=CONF.openstack.nova_server_start_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.start_server\")\n\n def test__stop_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._stop_server(self.server)\n self.server.stop.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"SHUTOFF\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_stop_poll_interval,\n timeout=CONF.openstack.nova_server_stop_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.stop_server\")\n\n def test__rescue_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._rescue_server(self.server)\n self.server.rescue.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"RESCUE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_rescue_poll_interval,\n timeout=CONF.openstack.nova_server_rescue_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.rescue_server\")\n\n def test__unrescue_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._unrescue_server(self.server)\n self.server.unrescue.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_unrescue_poll_interval,\n timeout=CONF.openstack.nova_server_unrescue_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.unrescue_server\")\n\n def _test_delete_servers(self, force=False):\n servers = [self.server, self.server1]\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._delete_servers(servers, force=force)\n check_interval = CONF.openstack.nova_server_delete_poll_interval\n expected = []\n for server in servers:\n expected.append(mock.call(\n server,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=check_interval,\n timeout=CONF.openstack.nova_server_delete_timeout))\n if force:\n server.force_delete.assert_called_once_with()\n self.assertFalse(server.delete.called)\n else:\n server.delete.assert_called_once_with()\n self.assertFalse(server.force_delete.called)\n\n self.mock_wait_for_status.mock.assert_has_calls(expected)\n timer_name = \"nova.%sdelete_servers\" % (\"force_\" if force else \"\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n timer_name)\n\n def test__default_delete_servers(self):\n self._test_delete_servers()\n\n def test__force_delete_servers(self):\n self._test_delete_servers(force=True)\n\n @mock.patch(\"rally_openstack.task.scenarios.nova.utils.image_service\")\n def test__delete_image(self, mock_image_service):\n glance = mock_image_service.Image.return_value\n nova_scenario = utils.NovaScenario(context=self.context,\n clients=mock.Mock())\n nova_scenario._delete_image(self.image)\n glance.delete_image.assert_called_once_with(self.image.id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.image,\n ready_statuses=[\"deleted\", \"pending_delete\"],\n check_deletion=True,\n update_resource=glance.get_image,\n check_interval=CONF.openstack.\n nova_server_image_delete_poll_interval,\n timeout=CONF.openstack.nova_server_image_delete_timeout)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_image\")\n\n @ddt.data(\n {\"requests\": 1},\n {\"requests\": 25},\n {\"requests\": 2, \"instances_amount\": 100, \"auto_assign_nic\": True,\n \"fakearg\": \"fake\"},\n {\"auto_assign_nic\": True, \"nics\": [{\"net-id\": \"foo\"}]},\n {\"auto_assign_nic\": False, \"nics\": [{\"net-id\": \"foo\"}]},\n {\"auto_assign_nic\": False, \"nics\": [{\"net-name\": \"foo_name\"}]})\n @ddt.unpack\n def test__boot_servers(self, image_id=\"image\", flavor_id=\"flavor\",\n requests=1, instances_amount=1,\n auto_assign_nic=False, **kwargs):\n servers = [mock.Mock() for i in range(instances_amount)]\n self.clients(\"nova\").servers.list.return_value = servers\n scenario = utils.NovaScenario(context=self.context)\n scenario.generate_random_name = mock.Mock()\n scenario._pick_random_nic = mock.Mock(\n return_value=[{\"net-id\": \"foo\"}])\n scenario._get_network_id = mock.Mock(return_value=\"foo\")\n\n scenario._boot_servers(image_id, flavor_id, requests,\n instances_amount=instances_amount,\n auto_assign_nic=auto_assign_nic,\n **kwargs)\n\n expected_kwargs = dict(kwargs)\n if auto_assign_nic and \"nics\" not in kwargs:\n expected_kwargs[\"nics\"] = scenario._pick_random_nic.return_value\n\n create_calls = [\n mock.call(\n \"%s_%d\" % (scenario.generate_random_name.return_value, i),\n image_id, flavor_id,\n min_count=instances_amount, max_count=instances_amount,\n **expected_kwargs)\n for i in range(requests)]\n self.clients(\"nova\").servers.create.assert_has_calls(create_calls)\n\n wait_for_status_calls = [\n mock.call(\n servers[i],\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_boot_poll_interval,\n timeout=CONF.openstack.nova_server_boot_timeout)\n for i in range(instances_amount)]\n self.mock_wait_for_status.mock.assert_has_calls(wait_for_status_calls)\n\n self.mock_get_from_manager.mock.assert_has_calls(\n [mock.call() for i in range(instances_amount)])\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"nova.boot_servers\")\n\n def test__show_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._show_server(self.server)\n self.clients(\"nova\").servers.get.assert_called_once_with(\n self.server\n )\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.show_server\")\n\n def test__get_console_server(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._get_server_console_output(self.server)\n self.clients(\n \"nova\").servers.get_console_output.assert_called_once_with(\n self.server, length=None)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_console_output_server\")\n\n def test__get_console_url(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._get_console_url_server(self.server, \"foo\")\n self.clients(\n \"nova\").servers.get_console_url.assert_called_once_with(\n self.server, \"foo\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_console_url_server\")\n\n def test__associate_floating_ip(self):\n clients = mock.MagicMock()\n nova_scenario = utils.NovaScenario(context=self.context,\n clients=clients)\n neutronclient = clients.neutron.return_value\n neutronclient.list_ports.return_value = {\"ports\": [{\"id\": \"p1\"},\n {\"id\": \"p2\"}]}\n\n fip_ip = \"172.168.0.1\"\n fip_id = \"some\"\n # case #1- an object from neutronclient\n floating_ip = {\"floating_ip_address\": fip_ip, \"id\": fip_id}\n\n nova_scenario._associate_floating_ip(self.server, floating_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": \"p1\"}}\n )\n # case #2 - an object from network wrapper\n neutronclient.update_floatingip.reset_mock()\n floating_ip = {\"ip\": fip_ip, \"id\": fip_id}\n\n nova_scenario._associate_floating_ip(self.server, floating_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": \"p1\"}}\n )\n\n # these should not be called in both cases\n self.assertFalse(neutronclient.list_floatingips.called)\n # it is an old behavior. let's check that it was not called\n self.assertFalse(self.server.add_floating_ip.called)\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.associate_floating_ip\", count=2)\n\n def test__associate_floating_ip_deprecated_behavior(self):\n clients = mock.MagicMock()\n nova_scenario = utils.NovaScenario(context=self.context,\n clients=clients)\n neutronclient = clients.neutron.return_value\n neutronclient.list_ports.return_value = {\"ports\": [{\"id\": \"p1\"},\n {\"id\": \"p2\"}]}\n\n fip_id = \"fip1\"\n fip_ip = \"172.168.0.1\"\n neutronclient.list_floatingips.return_value = {\n \"floatingips\": [\n {\"id\": fip_id, \"floating_ip_address\": fip_ip},\n {\"id\": \"fip2\", \"floating_ip_address\": \"127.0.0.1\"}]}\n\n nova_scenario._associate_floating_ip(self.server, fip_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": \"p1\"}}\n )\n\n neutronclient.list_floatingips.assert_called_once_with(\n floating_ip_address=fip_ip)\n\n # it is an old behavior. let's check that it was not called\n self.assertFalse(self.server.add_floating_ip.called)\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.associate_floating_ip\")\n\n def test__dissociate_floating_ip(self):\n clients = mock.MagicMock()\n nova_scenario = utils.NovaScenario(context=self.context,\n clients=clients)\n neutronclient = clients.neutron.return_value\n\n fip_ip = \"172.168.0.1\"\n fip_id = \"some\"\n # case #1- an object from neutronclient\n floating_ip = {\"floating_ip_address\": fip_ip, \"id\": fip_id}\n\n nova_scenario._dissociate_floating_ip(self.server, floating_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": None}}\n )\n # case #2 - an object from network wrapper\n neutronclient.update_floatingip.reset_mock()\n floating_ip = {\"ip\": fip_ip, \"id\": fip_id}\n\n nova_scenario._dissociate_floating_ip(self.server, floating_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": None}}\n )\n\n # these should not be called in both cases\n self.assertFalse(neutronclient.list_floatingips.called)\n # it is an old behavior. let's check that it was not called\n self.assertFalse(self.server.add_floating_ip.called)\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.dissociate_floating_ip\", count=2)\n\n def test__disassociate_floating_ip_deprecated_behavior(self):\n clients = mock.MagicMock()\n nova_scenario = utils.NovaScenario(context=self.context,\n clients=clients)\n neutronclient = clients.neutron.return_value\n\n fip_id = \"fip1\"\n fip_ip = \"172.168.0.1\"\n neutronclient.list_floatingips.return_value = {\n \"floatingips\": [\n {\"id\": fip_id, \"floating_ip_address\": fip_ip},\n {\"id\": \"fip2\", \"floating_ip_address\": \"127.0.0.1\"}]}\n\n nova_scenario._dissociate_floating_ip(self.server, fip_ip)\n\n neutronclient.update_floatingip.assert_called_once_with(\n fip_id, {\"floatingip\": {\"port_id\": None}}\n )\n\n neutronclient.list_floatingips.assert_called_once_with(\n floating_ip_address=fip_ip)\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.dissociate_floating_ip\")\n\n def test__check_ip_address(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n fake_server = fakes.FakeServerManager().create(\"test_server\",\n \"image_id_01\",\n \"flavor_id_01\")\n fake_server.addresses = {\n \"private\": [\n {\"version\": 4, \"addr\": \"1.2.3.4\"},\n ]}\n floating_ip = fakes.FakeFloatingIP()\n floating_ip.ip = \"10.20.30.40\"\n\n # Also test function check_ip_address accept a string as attr\n self.assertFalse(\n nova_scenario.check_ip_address(floating_ip.ip)(fake_server))\n self.assertTrue(\n nova_scenario.check_ip_address(floating_ip.ip, must_exist=False)\n (fake_server))\n\n fake_server.addresses[\"private\"].append(\n {\"version\": 4, \"addr\": floating_ip.ip}\n )\n # Also test function check_ip_address accept an object with attr ip\n self.assertTrue(\n nova_scenario.check_ip_address(floating_ip)\n (fake_server))\n self.assertFalse(\n nova_scenario.check_ip_address(floating_ip, must_exist=False)\n (fake_server))\n\n def test__resize(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n to_flavor = mock.Mock()\n nova_scenario._resize(self.server, to_flavor)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.resize\")\n\n def test__resize_confirm(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._resize_confirm(self.server)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.resize_confirm\")\n\n @ddt.data({},\n {\"status\": \"SHUTOFF\"})\n @ddt.unpack\n def test__resize_revert(self, status=None):\n nova_scenario = utils.NovaScenario(context=self.context)\n if status is None:\n nova_scenario._resize_revert(self.server)\n status = \"ACTIVE\"\n else:\n nova_scenario._resize_revert(self.server, status=status)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[status],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.\n nova_server_resize_revert_poll_interval,\n timeout=CONF.openstack.nova_server_resize_revert_timeout)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.resize_revert\")\n\n @mock.patch(\"rally_openstack.common.services.storage.block.BlockStorage\")\n def test__update_volume_resource(self, mock_block_storage):\n volume = fakes.FakeVolume(id=1)\n cinder = mock_block_storage.return_value\n cinder.get_volume = mock.MagicMock()\n nova_scenario = utils.NovaScenario(context=self.context)\n self.assertEqual(cinder.get_volume.return_value,\n nova_scenario._update_volume_resource(volume))\n\n def test__attach_volume(self):\n expect_attach = mock.MagicMock()\n device = None\n (self.clients(\"nova\").volumes.create_server_volume\n .return_value) = expect_attach\n nova_scenario = utils.NovaScenario(context=self.context)\n attach = nova_scenario._attach_volume(self.server, self.volume, device)\n (self.clients(\"nova\").volumes.create_server_volume\n .assert_called_once_with(self.server.id, self.volume.id, device))\n self.assertEqual(expect_attach, attach)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.attach_volume\")\n\n def test__list_attachments(self):\n expect_attachments = [mock.MagicMock()]\n (self.clients(\"nova\").volumes.get_server_volumes\n .return_value) = expect_attachments\n nova_scenario = utils.NovaScenario(context=self.context)\n list_attachments = nova_scenario._list_attachments(self.server.id)\n self.assertEqual(expect_attachments, list_attachments)\n (self.clients(\"nova\").volumes.get_server_volumes\n .assert_called_once_with(self.server.id))\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_attachments\")\n\n def test__detach_volume(self):\n attach = mock.MagicMock(id=\"attach_id\")\n self.clients(\"nova\").volumes.delete_server_volume.return_value = None\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._detach_volume(self.server, self.volume, attach)\n (self.clients(\"nova\").volumes.delete_server_volume\n .assert_called_once_with(self.server.id, self.volume.id))\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.detach_volume\")\n\n def test__detach_volume_no_attach(self):\n self.clients(\"nova\").volumes.delete_server_volume.return_value = None\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._detach_volume(self.server, self.volume, None)\n (self.clients(\"nova\").volumes.delete_server_volume\n .assert_called_once_with(self.server.id, self.volume.id))\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.detach_volume\")\n\n def test__live_migrate_server(self):\n self.admin_clients(\"nova\").servers.get(return_value=self.server)\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._live_migrate(self.server,\n block_migration=False,\n disk_over_commit=False,\n skip_compute_nodes_check=True,\n skip_host_check=True)\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.\n nova_server_live_migrate_poll_interval,\n timeout=CONF.openstack.nova_server_live_migrate_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.live_migrate\")\n\n def test__migrate_server(self):\n fake_server = self.server\n setattr(fake_server, \"OS-EXT-SRV-ATTR:host\", \"a1\")\n self.clients(\"nova\").servers.get(return_value=fake_server)\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._migrate(fake_server, skip_compute_nodes_check=True,\n skip_host_check=True)\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n fake_server,\n ready_statuses=[\"VERIFY_RESIZE\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.nova_server_migrate_poll_interval,\n timeout=CONF.openstack.nova_server_migrate_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.migrate\")\n\n self.assertRaises(rally_exceptions.RallyException,\n nova_scenario._migrate,\n fake_server, skip_compute_nodes_check=True,\n skip_host_check=False)\n self.assertRaises(rally_exceptions.RallyException,\n nova_scenario._migrate,\n fake_server, skip_compute_nodes_check=False,\n skip_host_check=True)\n\n def test__add_server_secgroups(self):\n server = mock.Mock()\n fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, \"uuid1\")]\n\n nova_scenario = utils.NovaScenario()\n security_group = fake_secgroups[0]\n result = nova_scenario._add_server_secgroups(server,\n security_group.name)\n self.assertEqual(\n self.clients(\"nova\").servers.add_security_group.return_value,\n result)\n (self.clients(\"nova\").servers.add_security_group.\n assert_called_once_with(server, security_group.name))\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.add_server_secgroups\")\n\n def test__list_keypairs(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_keypairs()\n self.assertEqual(self.clients(\"nova\").keypairs.list.return_value,\n result)\n self.clients(\"nova\").keypairs.list.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_keypairs\")\n\n def test__create_keypair(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario.generate_random_name = mock.Mock(\n return_value=\"rally_nova_keypair_fake\")\n result = nova_scenario._create_keypair(fakeargs=\"fakeargs\")\n self.assertEqual(\n self.clients(\"nova\").keypairs.create.return_value.name,\n result)\n self.clients(\"nova\").keypairs.create.assert_called_once_with(\n \"rally_nova_keypair_fake\", fakeargs=\"fakeargs\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.create_keypair\")\n\n def test__get_server_group(self):\n nova_scenario = utils.NovaScenario()\n fakeid = 12345\n result = nova_scenario._get_server_group(fakeid)\n self.assertEqual(\n self.clients(\"nova\").server_groups.get.return_value,\n result)\n self.clients(\"nova\").server_groups.get.assert_called_once_with(\n fakeid)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_server_group\")\n\n def test__create_server_group(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario.generate_random_name = mock.Mock(\n return_value=\"random_name\")\n result = nova_scenario._create_server_group(fakeargs=\"fakeargs\")\n self.assertEqual(\n self.clients(\"nova\").server_groups.create.return_value,\n result)\n self.clients(\"nova\").server_groups.create.assert_called_once_with(\n name=\"random_name\", fakeargs=\"fakeargs\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.create_server_group\")\n\n def test__delete_server_group(self):\n nova_scenario = utils.NovaScenario()\n fakeid = 12345\n result = nova_scenario._delete_server_group(fakeid)\n self.assertEqual(\n self.clients(\"nova\").server_groups.delete.return_value,\n result)\n self.clients(\"nova\").server_groups.delete.assert_called_once_with(\n fakeid)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_server_group\")\n\n def test__list_server_groups(self):\n nova_scenario = utils.NovaScenario()\n result1 = nova_scenario._list_server_groups(all_projects=False)\n result2 = nova_scenario._list_server_groups(all_projects=True)\n self.assertEqual(self.clients(\"nova\").server_groups.list.return_value,\n result1)\n admcli = self.admin_clients(\"nova\")\n self.assertEqual(admcli.server_groups.list.return_value, result2)\n self.clients(\"nova\").server_groups.list.assert_called_once_with(\n False)\n self.admin_clients(\"nova\").server_groups.list.assert_called_once_with(\n True)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_server_groups\", count=2)\n\n def test__delete_keypair(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario._delete_keypair(\"fake_keypair\")\n self.clients(\"nova\").keypairs.delete.assert_called_once_with(\n \"fake_keypair\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_keypair\")\n\n def test__get_keypair(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario._get_keypair(\"fake_keypair\")\n self.clients(\"nova\").keypairs.get.assert_called_once_with(\n \"fake_keypair\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_keypair\")\n\n def test__list_hypervisors(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_hypervisors(detailed=False)\n self.assertEqual(\n self.admin_clients(\"nova\").hypervisors.list.return_value, result)\n self.admin_clients(\"nova\").hypervisors.list.assert_called_once_with(\n False)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_hypervisors\")\n\n def test__statistics_hypervisors(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._statistics_hypervisors()\n self.assertEqual(\n self.admin_clients(\"nova\").hypervisors.statistics.return_value,\n result)\n (self.admin_clients(\"nova\").hypervisors.statistics.\n assert_called_once_with())\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.statistics_hypervisors\")\n\n def test__get_hypervisor(self):\n hypervisor = mock.Mock()\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._get_hypervisor(hypervisor)\n self.assertEqual(\n self.admin_clients(\"nova\").hypervisors.get.return_value,\n result)\n self.admin_clients(\"nova\").hypervisors.get.assert_called_once_with(\n hypervisor)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_hypervisor\")\n\n def test__search_hypervisors(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario._search_hypervisors(\"fake_hostname\", servers=False)\n\n self.admin_clients(\"nova\").hypervisors.search.assert_called_once_with(\n \"fake_hostname\", servers=False)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.search_hypervisors\")\n\n def test__list_interfaces(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_interfaces(\"server\")\n self.assertEqual(\n self.clients(\"nova\").servers.interface_list.return_value,\n result)\n self.clients(\"nova\").servers.interface_list.assert_called_once_with(\n \"server\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_interfaces\")\n\n def test__lock_server(self):\n server = mock.Mock()\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._lock_server(server)\n server.lock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.lock_server\")\n\n def test__unlock_server(self):\n server = mock.Mock()\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario._unlock_server(server)\n server.unlock.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.unlock_server\")\n\n def test__delete_network(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._delete_network(\"fake_net_id\")\n self.assertEqual(\n self.admin_clients(\"nova\").networks.delete.return_value,\n result)\n self.admin_clients(\"nova\").networks.delete.assert_called_once_with(\n \"fake_net_id\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_network\")\n\n def test__list_flavors(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_flavors(detailed=True, fakearg=\"fakearg\")\n self.assertEqual(self.clients(\"nova\").flavors.list.return_value,\n result)\n self.clients(\"nova\").flavors.list.assert_called_once_with(\n True, fakearg=\"fakearg\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_flavors\")\n\n def test__set_flavor_keys(self):\n flavor = mock.MagicMock()\n nova_scenario = utils.NovaScenario()\n extra_specs = {\"fakeargs\": \"foo\"}\n flavor.set_keys = mock.MagicMock()\n\n result = nova_scenario._set_flavor_keys(flavor, extra_specs)\n self.assertEqual(flavor.set_keys.return_value, result)\n flavor.set_keys.assert_called_once_with(extra_specs)\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.set_flavor_keys\")\n\n @ddt.data({},\n {\"hypervisor\": \"foo_hypervisor\"})\n @ddt.unpack\n def test__list_agents(self, hypervisor=None):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_agents(hypervisor)\n self.assertEqual(\n self.admin_clients(\"nova\").agents.list.return_value, result)\n self.admin_clients(\"nova\").agents.list.assert_called_once_with(\n hypervisor)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_agents\")\n\n def test__list_aggregates(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_aggregates()\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.list.return_value, result)\n self.admin_clients(\"nova\").aggregates.list.assert_called_once_with()\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_aggregates\")\n\n def test__list_availability_zones(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_availability_zones(detailed=True)\n self.assertEqual(\n self.admin_clients(\"nova\").availability_zones.list.return_value,\n result)\n avail_zones_client = self.admin_clients(\"nova\").availability_zones\n avail_zones_client.list.assert_called_once_with(True)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_availability_zones\")\n\n @ddt.data({},\n {\"host\": \"foo_host\"},\n {\"binary\": \"foo_binary\"},\n {\"host\": \"foo_host\", \"binary\": \"foo_binary\"})\n @ddt.unpack\n def test__list_services(self, host=None, binary=None):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_services(host=host, binary=binary)\n self.assertEqual(self.admin_clients(\"nova\").services.list.return_value,\n result)\n self.admin_clients(\"nova\").services.list.assert_called_once_with(\n host, binary)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_services\")\n\n def test__list_flavor_access(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._list_flavor_access(\"foo_id\")\n self.assertEqual(\n self.admin_clients(\"nova\").flavor_access.list.return_value,\n result)\n self.admin_clients(\"nova\").flavor_access.list.assert_called_once_with(\n flavor=\"foo_id\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.list_flavor_access\")\n\n def test__add_tenant_access(self):\n tenant = mock.Mock()\n flavor = mock.Mock()\n nova_scenario = utils.NovaScenario()\n admin_clients = self.admin_clients(\"nova\")\n result = nova_scenario._add_tenant_access(flavor.id, tenant.id)\n self.assertEqual(\n admin_clients.flavor_access.add_tenant_access.return_value,\n result)\n admin_clients.flavor_access.add_tenant_access.assert_called_once_with(\n flavor.id, tenant.id)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.add_tenant_access\")\n\n def test__create_flavor(self):\n nova_scenario = utils.NovaScenario()\n random_name = \"random_name\"\n nova_scenario.generate_random_name = mock.Mock(\n return_value=random_name)\n result = nova_scenario._create_flavor(500, 1, 1,\n fakearg=\"fakearg\")\n self.assertEqual(\n self.admin_clients(\"nova\").flavors.create.return_value,\n result)\n self.admin_clients(\"nova\").flavors.create.assert_called_once_with(\n random_name, 500, 1, 1, fakearg=\"fakearg\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.create_flavor\")\n\n def test__get_flavor(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._get_flavor(\"foo_flavor_id\")\n self.assertEqual(\n self.admin_clients(\"nova\").flavors.get.return_value,\n result)\n self.admin_clients(\"nova\").flavors.get.assert_called_once_with(\n \"foo_flavor_id\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_flavor\")\n\n def test__delete_flavor(self):\n nova_scenario = utils.NovaScenario()\n result = nova_scenario._delete_flavor(\"foo_flavor_id\")\n self.assertEqual(\n self.admin_clients(\"nova\").flavors.delete.return_value,\n result)\n self.admin_clients(\"nova\").flavors.delete.assert_called_once_with(\n \"foo_flavor_id\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_flavor\")\n\n def test__update_server(self):\n server = mock.Mock()\n nova_scenario = utils.NovaScenario()\n nova_scenario.generate_random_name = mock.Mock(\n return_value=\"new_name\")\n server.update = mock.Mock()\n\n result = nova_scenario._update_server(server)\n self.assertEqual(result, server.update.return_value)\n nova_scenario.generate_random_name.assert_called_once_with()\n server.update.assert_called_once_with(name=\"new_name\")\n\n nova_scenario.generate_random_name.reset_mock()\n server.update.reset_mock()\n\n result = nova_scenario._update_server(server,\n description=\"desp\")\n self.assertEqual(result, server.update.return_value)\n nova_scenario.generate_random_name.assert_called_once_with()\n server.update.assert_called_once_with(name=\"new_name\",\n description=\"desp\")\n\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.update_server\", count=2)\n\n def test_create_aggregate(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n random_name = \"random_name\"\n nova_scenario.generate_random_name = mock.Mock(\n return_value=random_name)\n result = nova_scenario._create_aggregate(\"nova\")\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.create.return_value,\n result)\n self.admin_clients(\"nova\").aggregates.create.assert_called_once_with(\n random_name, \"nova\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.create_aggregate\")\n\n def test_delete_aggregate(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n result = nova_scenario._delete_aggregate(\"fake_aggregate\")\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.delete.return_value,\n result)\n self.admin_clients(\"nova\").aggregates.delete.assert_called_once_with(\n \"fake_aggregate\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.delete_aggregate\")\n\n def test_get_aggregate_details(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n result = nova_scenario._get_aggregate_details(\"fake_aggregate\")\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.get_details.return_value,\n result)\n self.admin_clients(\n \"nova\").aggregates.get_details.assert_called_once_with(\n \"fake_aggregate\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.get_aggregate_details\")\n\n def test_update_aggregate(self):\n aggregate = mock.Mock()\n nova_scenario = utils.NovaScenario(context=self.context)\n nova_scenario.generate_random_name = mock.Mock(\n return_value=\"random_name\")\n values = {\"name\": \"random_name\",\n \"availability_zone\": \"random_name\"}\n result = nova_scenario._update_aggregate(aggregate=aggregate)\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.update.return_value,\n result)\n self.admin_clients(\"nova\").aggregates.update.assert_called_once_with(\n aggregate, values)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.update_aggregate\")\n\n def test_aggregate_add_host(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n result = nova_scenario._aggregate_add_host(\"fake_agg\", \"fake_host\")\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.add_host.return_value,\n result)\n self.admin_clients(\"nova\").aggregates.add_host.assert_called_once_with(\n \"fake_agg\", \"fake_host\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.aggregate_add_host\")\n\n def test_aggregate_remove_host(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n result = nova_scenario._aggregate_remove_host(\"fake_agg\", \"fake_host\")\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.remove_host.return_value,\n result)\n self.admin_clients(\n \"nova\").aggregates.remove_host.assert_called_once_with(\n \"fake_agg\", \"fake_host\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.aggregate_remove_host\")\n\n def test__uptime_hypervisor(self):\n nova_scenario = utils.NovaScenario()\n nova_scenario._uptime_hypervisor(\"fake_hostname\")\n\n self.admin_clients(\"nova\").hypervisors.uptime.assert_called_once_with(\n \"fake_hostname\")\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.uptime_hypervisor\")\n\n def test__attach_interface(self):\n fake_server = mock.Mock()\n nova_scenario = utils.NovaScenario()\n\n result = nova_scenario._attach_interface(fake_server, net_id=\"id\")\n self.assertEqual(\n self.clients(\"nova\").servers.interface_attach.return_value,\n result)\n self.clients(\"nova\").servers.interface_attach.assert_called_once_with(\n fake_server, None, \"id\", None)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.attach_interface\")\n\n def test_aggregate_set_metadata(self):\n nova_scenario = utils.NovaScenario(context=self.context)\n fake_metadata = {\"test_metadata\": \"true\"}\n result = nova_scenario._aggregate_set_metadata(\"fake_aggregate\",\n fake_metadata)\n self.assertEqual(\n self.admin_clients(\"nova\").aggregates.set_metadata.return_value,\n result)\n self.admin_clients(\n \"nova\").aggregates.set_metadata.assert_called_once_with(\n \"fake_aggregate\", fake_metadata)\n self._test_atomic_action_timer(nova_scenario.atomic_actions(),\n \"nova.aggregate_set_metadata\")\n" }, { "alpha_fraction": 0.5727957487106323, "alphanum_fraction": 0.5747971534729004, "avg_line_length": 41.4744987487793, "blob_id": "db361004e779ecc079a0b6afc0e3b4e183229e68", "content_id": "77dcda3985706ee12d39bcf8b5b020f3ba2a6887", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27481, "license_type": "permissive", "max_line_length": 79, "num_lines": 647, "path": "/rally_openstack/common/validators.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport inspect\nimport os\nimport re\n\nimport yaml\n\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\nfrom rally.plugins.common import validators\nfrom rally.task import types\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.contexts.keystone import roles\nfrom rally_openstack.task.contexts.nova import flavors as flavors_ctx\nfrom rally_openstack.task import types as openstack_types\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\")\nclass RequiredOpenStackValidator(validation.RequiredPlatformValidator):\n def __init__(self, admin=False, users=False):\n \"\"\"Validates credentials for OpenStack platform.\n\n This allows us to create 3 kind of tests cases:\n 1) requires platform with admin\n 2) requires platform with admin + users\n 3) requires platform with users\n\n :param admin: requires admin credential\n :param users: requires user credentials\n \"\"\"\n super(RequiredOpenStackValidator, self).__init__(platform=\"openstack\")\n self.admin = admin\n self.users = users\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n if not (self.admin or self.users):\n self.fail(\"You should specify admin=True or users=True or both.\")\n\n context = context[\"platforms\"].get(self.platform, {})\n\n if self.admin and context.get(\"admin\") is None:\n self.fail(\"No admin credentials for openstack\")\n if self.users and len(context.get(\"users\", ())) == 0:\n if context.get(\"admin\") is None:\n self.fail(\"No user credentials for openstack\")\n else:\n # NOTE(andreykurilin): It is a case when the plugin requires\n # 'users' for launching, but there are no specified users in\n # deployment. Let's assume that 'users' context can create\n # them via admin user and do not fail.\"\n pass\n\n\ndef with_roles_ctx():\n \"\"\"Add roles to users for validate\n\n \"\"\"\n def decorator(func):\n def wrapper(*args, **kw):\n func_type = inspect.getcallargs(func, *args, **kw)\n config = func_type.get(\"config\", {})\n context = func_type.get(\"context\", {})\n if config.get(\"contexts\", {}).get(\"roles\") \\\n and context.get(\"admin\", {}):\n context[\"config\"] = config[\"contexts\"]\n rolegenerator = roles.RoleGenerator(context)\n with rolegenerator:\n rolegenerator.setup()\n func(*args, **kw)\n else:\n func(*args, **kw)\n return wrapper\n return decorator\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"image_exists\", platform=\"openstack\")\nclass ImageExistsValidator(validation.Validator):\n\n def __init__(self, param_name, nullable):\n \"\"\"Validator checks existed image or not\n\n :param param_name: defines which variable should be used\n to get image id value.\n :param nullable: defines image id param is required\n \"\"\"\n super(ImageExistsValidator, self).__init__()\n self.param_name = param_name\n self.nullable = nullable\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n\n from glanceclient import exc as glance_exc\n\n image_args = config.get(\"args\", {}).get(self.param_name)\n\n if not image_args and self.nullable:\n return\n\n image_context = config.get(\"contexts\", {}).get(\"images\", {})\n image_ctx_name = image_context.get(\"image_name\")\n\n if not image_args:\n self.fail(\"Parameter %s is not specified.\" % self.param_name)\n\n if \"image_name\" in image_context:\n # NOTE(rvasilets) check string is \"exactly equal to\" a regex\n # or image name from context equal to image name from args\n if \"regex\" in image_args:\n match = re.match(image_args.get(\"regex\"), image_ctx_name)\n if image_ctx_name == image_args.get(\"name\") or (\n \"regex\" in image_args and match):\n return\n try:\n for user in context[\"users\"]:\n image_processor = openstack_types.GlanceImage(\n context={\"admin\": {\"credential\": user[\"credential\"]}})\n image_id = image_processor.pre_process(image_args, config={})\n user[\"credential\"].clients().glance().images.get(image_id)\n except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument):\n self.fail(\"Image '%s' not found\" % image_args)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"external_network_exists\", platform=\"openstack\")\nclass ExternalNetworkExistsValidator(validation.Validator):\n\n def __init__(self, param_name):\n \"\"\"Validator checks that external network with given name exists.\n\n :param param_name: name of validated network\n \"\"\"\n super(ExternalNetworkExistsValidator, self).__init__()\n self.param_name = param_name\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n\n ext_network = config.get(\"args\", {}).get(self.param_name)\n if not ext_network:\n return\n\n result = []\n for user in context[\"users\"]:\n creds = user[\"credential\"]\n\n networks = creds.clients().neutron().list_networks()[\"networks\"]\n external_networks = [net[\"name\"] for net in networks if\n net.get(\"router:external\", False)]\n if ext_network not in external_networks:\n message = (\"External (floating) network with name {1} \"\n \"not found by user {0}. \"\n \"Available networks: {2}\").format(creds.username,\n ext_network,\n networks)\n result.append(message)\n if result:\n self.fail(\"\\n\".join(result))\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"required_neutron_extensions\", platform=\"openstack\")\nclass RequiredNeutronExtensionsValidator(validation.Validator):\n\n def __init__(self, extensions, *args):\n \"\"\"Validator checks if the specified Neutron extension is available\n\n :param extensions: list of Neutron extensions\n \"\"\"\n super(RequiredNeutronExtensionsValidator, self).__init__()\n if isinstance(extensions, (list, tuple)):\n # services argument is a list, so it is a new way of validators\n # usage, args in this case should not be provided\n self.req_ext = extensions\n if args:\n LOG.warning(\"Positional argument is not what \"\n \"'required_neutron_extensions' decorator expects. \"\n \"Use `extensions` argument instead\")\n else:\n # it is old way validator\n self.req_ext = [extensions]\n self.req_ext.extend(args)\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n clients = context[\"users\"][0][\"credential\"].clients()\n extensions = clients.neutron().list_extensions()[\"extensions\"]\n aliases = [x[\"alias\"] for x in extensions]\n for extension in self.req_ext:\n if extension not in aliases:\n self.fail(\"Neutron extension %s is not configured\" % extension)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"flavor_exists\", platform=\"openstack\")\nclass FlavorExistsValidator(validation.Validator):\n\n def __init__(self, param_name):\n \"\"\"Returns validator for flavor\n\n :param param_name: defines which variable should be used\n to get flavor id value.\n \"\"\"\n super(FlavorExistsValidator, self).__init__()\n\n self.param_name = param_name\n\n def _get_flavor_from_context(self, config, flavor_value):\n if \"flavors\" not in config.get(\"contexts\", {}):\n self.fail(\"No flavors context\")\n\n flavors = [flavors_ctx.FlavorConfig(**f)\n for f in config[\"contexts\"][\"flavors\"]]\n resource = types.obj_from_name(resource_config=flavor_value,\n resources=flavors, typename=\"flavor\")\n flavor = flavors_ctx.FlavorConfig(**resource)\n flavor.id = \"<context flavor: %s>\" % flavor.name\n return flavor\n\n def _get_validated_flavor(self, config, clients, param_name):\n\n from novaclient import exceptions as nova_exc\n\n flavor_value = config.get(\"args\", {}).get(param_name)\n if not flavor_value:\n self.fail(\"Parameter %s is not specified.\" % param_name)\n try:\n flavor_processor = openstack_types.Flavor(\n context={\"admin\": {\"credential\": clients.credential}})\n flavor_id = flavor_processor.pre_process(flavor_value, config={})\n flavor = clients.nova().flavors.get(flavor=flavor_id)\n return flavor\n except (nova_exc.NotFound, exceptions.InvalidScenarioArgument):\n try:\n return self._get_flavor_from_context(config, flavor_value)\n except validation.ValidationError:\n pass\n self.fail(\"Flavor '%s' not found\" % flavor_value)\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n # flavors do not depend on user or tenant, so checking for one user\n # should be enough\n clients = context[\"users\"][0][\"credential\"].clients()\n self._get_validated_flavor(config=config,\n clients=clients,\n param_name=self.param_name)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"image_valid_on_flavor\", platform=\"openstack\")\nclass ImageValidOnFlavorValidator(FlavorExistsValidator):\n\n def __init__(self, flavor_param, image_param,\n fail_on_404_image=True, validate_disk=True):\n \"\"\"Returns validator for image could be used for current flavor\n\n :param flavor_param: defines which variable should be used\n to get flavor id value.\n :param image_param: defines which variable should be used\n to get image id value.\n :param validate_disk: flag to indicate whether to validate flavor's\n disk. Should be True if instance is booted from\n image. Should be False if instance is booted\n from volume. Default value is True.\n :param fail_on_404_image: flag what indicate whether to validate image\n or not.\n \"\"\"\n super(ImageValidOnFlavorValidator, self).__init__(flavor_param)\n self.image_name = image_param\n self.fail_on_404_image = fail_on_404_image\n self.validate_disk = validate_disk\n\n def _get_validated_image(self, config, clients, param_name):\n\n from glanceclient import exc as glance_exc\n\n image_context = config.get(\"contexts\", {}).get(\"images\", {})\n image_args = config.get(\"args\", {}).get(param_name)\n image_ctx_name = image_context.get(\"image_name\")\n\n if not image_args:\n self.fail(\"Parameter %s is not specified.\" % param_name)\n\n if \"image_name\" in image_context:\n # NOTE(rvasilets) check string is \"exactly equal to\" a regex\n # or image name from context equal to image name from args\n if \"regex\" in image_args:\n match = re.match(image_args.get(\"regex\"), image_ctx_name)\n if image_ctx_name == image_args.get(\"name\") or (\"regex\"\n in image_args\n and match):\n image = {\n \"size\": image_context.get(\"min_disk\", 0),\n \"min_ram\": image_context.get(\"min_ram\", 0),\n \"min_disk\": image_context.get(\"min_disk\", 0)\n }\n return image\n try:\n image_processor = openstack_types.GlanceImage(\n context={\"admin\": {\"credential\": clients.credential}})\n image_id = image_processor.pre_process(image_args, config={})\n image = clients.glance().images.get(image_id)\n if hasattr(image, \"to_dict\"):\n # NOTE(stpierre): Glance v1 images are objects that can be\n # converted to dicts; Glance v2 images are already\n # dict-like\n image = image.to_dict()\n if not image.get(\"size\"):\n image[\"size\"] = 0\n if not image.get(\"min_ram\"):\n image[\"min_ram\"] = 0\n if not image.get(\"min_disk\"):\n image[\"min_disk\"] = 0\n return image\n except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument):\n self.fail(\"Image '%s' not found\" % image_args)\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n\n flavor = None\n for user in context[\"users\"]:\n clients = user[\"credential\"].clients()\n\n if not flavor:\n flavor = self._get_validated_flavor(\n config, clients, self.param_name)\n\n try:\n image = self._get_validated_image(config, clients,\n self.image_name)\n except validation.ValidationError:\n if not self.fail_on_404_image:\n return\n raise\n\n if flavor.ram < image[\"min_ram\"]:\n self.fail(\"The memory size for flavor '%s' is too small \"\n \"for requested image '%s'.\" %\n (flavor.id, image[\"id\"]))\n\n if flavor.disk and self.validate_disk:\n if flavor.disk * (1024 ** 3) < image[\"size\"]:\n self.fail(\"The disk size for flavor '%s' is too small \"\n \"for requested image '%s'.\" %\n (flavor.id, image[\"id\"]))\n\n if flavor.disk < image[\"min_disk\"]:\n self.fail(\"The minimal disk size for flavor '%s' is \"\n \"too small for requested image '%s'.\" %\n (flavor.id, image[\"id\"]))\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"required_services\", platform=\"openstack\")\nclass RequiredServicesValidator(validation.Validator):\n\n def __init__(self, services, *args):\n \"\"\"Validator checks if specified OpenStack services are available.\n\n :param services: list with names of required services\n \"\"\"\n\n super(RequiredServicesValidator, self).__init__()\n if isinstance(services, (list, tuple)):\n # services argument is a list, so it is a new way of validators\n # usage, args in this case should not be provided\n self.services = services\n if args:\n LOG.warning(\"Positional argument is not what \"\n \"'required_services' decorator expects. \"\n \"Use `services` argument instead\")\n else:\n # it is old way validator\n self.services = [services]\n self.services.extend(args)\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n if consts.Service.NOVA_NET in self.services:\n self.fail(\"We are sorry, but Nova-network was deprecated for a \"\n \"long time and latest novaclient doesn't support it, so \"\n \"we too.\")\n\n creds = (context.get(\"admin\", {}).get(\"credential\", None)\n or context[\"users\"][0][\"credential\"])\n\n if \"api_versions\" in config.get(\"contexts\", {}):\n api_versions = config[\"contexts\"][\"api_versions\"]\n else:\n api_versions = config.get(\"contexts\", {}).get(\n \"api_versions@openstack\", {})\n\n available_services = creds.clients().services().values()\n\n for service in self.services:\n service_config = api_versions.get(service, {})\n if (\"service_type\" in service_config\n or \"service_name\" in service_config):\n # NOTE(andreykurilin): validator should ignore services\n # configured via api_versions@openstack since the context\n # plugin itself should perform a proper validation\n continue\n\n if service not in available_services:\n self.fail(\n (\"'{0}' service is not available. Hint: If '{0}' \"\n \"service has non-default service_type, try to setup \"\n \"it via 'api_versions@openstack' context.\"\n ).format(service))\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"validate_heat_template\", platform=\"openstack\")\nclass ValidateHeatTemplateValidator(validation.Validator):\n\n def __init__(self, params, *args):\n \"\"\"Validates heat template.\n\n :param params: list of parameters to be validated.\n \"\"\"\n super(ValidateHeatTemplateValidator, self).__init__()\n if isinstance(params, (list, tuple)):\n # services argument is a list, so it is a new way of validators\n # usage, args in this case should not be provided\n self.params = params\n if args:\n LOG.warning(\"Positional argument is not what \"\n \"'validate_heat_template' decorator expects. \"\n \"Use `params` argument instead\")\n else:\n # it is old way validator\n self.params = [params]\n self.params.extend(args)\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n\n for param_name in self.params:\n template_path = config.get(\"args\", {}).get(param_name)\n if not template_path:\n msg = (\"Path to heat template is not specified. Its needed \"\n \"for heat template validation. Please check the \"\n \"content of `{}` scenario argument.\")\n\n return self.fail(msg.format(param_name))\n template_path = os.path.expanduser(template_path)\n if not os.path.exists(template_path):\n self.fail(\"No file found by the given path %s\" % template_path)\n with open(template_path, \"r\") as f:\n try:\n for user in context[\"users\"]:\n clients = user[\"credential\"].clients()\n clients.heat().stacks.validate(template=f.read())\n except Exception as e:\n self.fail(\"Heat template validation failed on %(path)s. \"\n \"Original error message: %(msg)s.\" %\n {\"path\": template_path, \"msg\": str(e)})\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"required_cinder_services\", platform=\"openstack\")\nclass RequiredCinderServicesValidator(validation.Validator):\n\n def __init__(self, services):\n \"\"\"Validator checks that specified Cinder service is available.\n\n It uses Cinder client with admin permissions to call\n 'cinder service-list' call\n\n :param services: Cinder service name\n \"\"\"\n super(RequiredCinderServicesValidator, self).__init__()\n self.services = services\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n\n clients = context[\"admin\"][\"credential\"].clients()\n for service in clients.cinder().services.list():\n if (service.binary == str(self.services)\n and service.state == str(\"up\")):\n return\n\n self.fail(\"%s service is not available\" % self.services)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"required_api_versions\", platform=\"openstack\")\nclass RequiredAPIVersionsValidator(validation.Validator):\n\n def __init__(self, component, versions):\n \"\"\"Validator checks component API versions.\n\n :param component: name of required component\n :param versions: version of required component\n \"\"\"\n super(RequiredAPIVersionsValidator, self).__init__()\n self.component = component\n self.versions = versions\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n versions = [str(v) for v in self.versions]\n versions_str = \", \".join(versions)\n msg = (\"Task was designed to be used with %(component)s \"\n \"V%(version)s, but V%(found_version)s is \"\n \"selected.\")\n for user in context[\"users\"]:\n clients = user[\"credential\"].clients()\n if self.component == \"keystone\":\n if \"2.0\" not in versions and hasattr(\n clients.keystone(), \"tenants\"):\n self.fail(msg % {\"component\": self.component,\n \"version\": versions_str,\n \"found_version\": \"2.0\"})\n if \"3\" not in versions and hasattr(\n clients.keystone(), \"projects\"):\n self.fail(msg % {\"component\": self.component,\n \"version\": versions_str,\n \"found_version\": \"3\"})\n else:\n av_ctx = config.get(\"contexts\", {}).get(\n \"api_versions@openstack\", {})\n default_version = getattr(clients,\n self.component).choose_version()\n used_version = av_ctx.get(self.component, {}).get(\n \"version\", default_version)\n if not used_version:\n self.fail(\"Unable to determine the API version.\")\n if str(used_version) not in versions:\n self.fail(msg % {\"component\": self.component,\n \"version\": versions_str,\n \"found_version\": used_version})\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"volume_type_exists\", platform=\"openstack\")\nclass VolumeTypeExistsValidator(validation.Validator):\n\n def __init__(self, param_name, nullable=True):\n \"\"\"Returns validator for volume types.\n\n :param param_name: defines variable to be used as the flag to\n determine if volume types should be checked for\n existence.\n :param nullable: defines volume_type param is required\n \"\"\"\n super(VolumeTypeExistsValidator, self).__init__()\n self.param = param_name\n self.nullable = nullable\n\n @with_roles_ctx()\n def validate(self, context, config, plugin_cls, plugin_cfg):\n volume_type = config.get(\"args\", {}).get(self.param, False)\n\n if not volume_type:\n if self.nullable:\n return\n\n self.fail(\"The parameter '%s' is required and should not be empty.\"\n % self.param)\n\n for user in context[\"users\"]:\n clients = user[\"credential\"].clients()\n vt_names = [vt.name for vt in\n clients.cinder().volume_types.list()]\n ctx = config.get(\"contexts\", {}).get(\"volume_types\", [])\n vt_names += ctx\n if volume_type not in vt_names:\n self.fail(\"Specified volume type %s not found for user %s.\"\n \" List of available types: %s\" %\n (volume_type, user, vt_names))\n\n\[email protected](name=\"workbook_contains_workflow\", platform=\"openstack\")\nclass WorkbookContainsWorkflowValidator(validators.FileExistsValidator):\n\n def __init__(self, workbook_param, workflow_param):\n \"\"\"Validate that workflow exist in workbook when workflow is passed\n\n :param workbook_param: parameter containing the workbook definition\n :param workflow_param: parameter containing the workflow name\n \"\"\"\n super(WorkbookContainsWorkflowValidator, self).__init__(workflow_param)\n self.workbook = workbook_param\n self.workflow = workflow_param\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n wf_name = config.get(\"args\", {}).get(self.workflow)\n if wf_name:\n wb_path = config.get(\"args\", {}).get(self.workbook)\n wb_path = os.path.expanduser(wb_path)\n self._file_access_ok(wb_path, mode=os.R_OK,\n param_name=self.workbook)\n\n with open(wb_path, \"r\") as wb_def:\n wb_def = yaml.safe_load(wb_def)\n if wf_name not in wb_def[\"workflows\"]:\n self.fail(\"workflow '%s' not found in the definition '%s'\"\n % (wf_name, wb_def))\n\n\[email protected](name=\"required_context_config\", platform=\"openstack\")\nclass RequiredContextConfigValidator(validation.Validator):\n\n def __init__(self, context_name, context_config):\n \"\"\"Validate that context is configured according to requirements.\n\n :param context_name: string efining context name\n :param context_config: dictionary of required key/value pairs\n \"\"\"\n super(RequiredContextConfigValidator, self).__init__()\n self.context_name = context_name\n self.context_config = context_config\n\n def validate(self, context, config, plugin_cls, plugin_cfg):\n if self.context_name not in config.get(\"contexts\", {}):\n # fail silently. if it is required context,\n # `required_contexts` validator should raise proper error\n return\n ctx_config = config[\"contexts\"].get(self.context_name)\n\n for key, value in self.context_config.items():\n if key not in ctx_config or ctx_config[key] != value:\n self.fail(\n f\"The '{self.context_name}' context \"\n f\"expects '{self.context_config}'\")\n" }, { "alpha_fraction": 0.586358368396759, "alphanum_fraction": 0.5979190468788147, "avg_line_length": 40.58654022216797, "blob_id": "413edadc2f229a7932176562b57a52cbf7bd9f8a", "content_id": "c74ec18271cbffd222c482fd3c0e9d1d5fccb142", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4325, "license_type": "permissive", "max_line_length": 78, "num_lines": 104, "path": "/tests/unit/task/contexts/dataplane/test_heat.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport functools\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.dataplane import heat as heat_dataplane\nfrom tests.unit import test\n\nMOD = \"rally_openstack.task.contexts.dataplane.heat.\"\n\n\nclass TestHeatWorkload(test.ScenarioTestCase):\n\n @mock.patch(MOD + \"pkgutil\")\n def test_get_data_resource(self, mock_pkgutil):\n mock_pkgutil.get_data.return_value = \"fake_data\"\n data = heat_dataplane.get_data([1, 2])\n self.assertEqual(\"fake_data\", data)\n mock_pkgutil.get_data.assert_called_once_with(1, 2)\n\n @mock.patch(MOD + \"open\")\n def test_get_data_file(self, mock_open):\n data = heat_dataplane.get_data(1)\n self.assertEqual(mock_open.return_value.read.return_value, data)\n mock_open.assert_called_once_with(1)\n\n def test__get_context_parameter(self):\n user = [1, 2]\n tenant = [3, 4, {\"one\": 1}]\n self.context[\"tenants\"] = {1: tenant}\n ctx = heat_dataplane.HeatDataplane(self.context)\n gcp = functools.partial(ctx._get_context_parameter, user, 1)\n self.assertEqual(1, gcp(\"user.0\"))\n self.assertEqual(2, gcp(\"user.1\"))\n self.assertEqual(3, gcp(\"tenant.0\"))\n self.assertEqual(1, gcp(\"tenant.2.one\"))\n\n @mock.patch(MOD + \"osclients.Clients\")\n def test__get_public_network_id(self, mock_clients):\n fake_net = {\"id\": \"fake_id\"}\n fake_nc = mock.Mock(name=\"fake_neutronclient\")\n fake_nc.list_networks.return_value = {\"networks\": [fake_net]}\n mock_clients.neutron.return_value = fake_nc\n mock_clients.return_value = mock.Mock(\n neutron=mock.Mock(return_value=fake_nc))\n self.context[\"admin\"] = {\"credential\": \"fake_credential\"}\n ctx = heat_dataplane.HeatDataplane(self.context)\n network_id = ctx._get_public_network_id()\n self.assertEqual(\"fake_id\", network_id)\n mock_clients.assert_called_once_with(\"fake_credential\")\n\n @mock.patch(MOD + \"get_data\")\n @mock.patch(MOD + \"HeatDataplane._get_context_parameter\")\n @mock.patch(MOD + \"heat_utils\")\n def test_setup(self,\n mock_heat_utils,\n mock_heat_dataplane__get_context_parameter,\n mock_get_data):\n self.context.update({\n \"config\": {\n \"heat_dataplane\": {\n \"stacks_per_tenant\": 1,\n \"template\": \"tpl.yaml\",\n \"files\": {\"file1\": \"f1.yaml\", \"file2\": \"f2.yaml\"},\n \"parameters\": {\"key\": \"value\"},\n \"context_parameters\": {\"ctx.key\": \"ctx.value\"},\n }\n },\n \"users\": [{\"tenant_id\": \"t1\", \"keypair\": {\"name\": \"kp1\"}}, ],\n \"tenants\": {\"t1\": {\"networks\": [{\"router_id\": \"rid\"}]}},\n })\n mock_heat_dataplane__get_context_parameter.return_value = \"gcp\"\n mock_get_data.side_effect = [\"tpl\", \"sf1\", \"sf2\"]\n ctx = heat_dataplane.HeatDataplane(self.context)\n ctx._get_public_network_id = mock.Mock(return_value=\"fake_net\")\n ctx.setup()\n workloads = self.context[\"tenants\"][\"t1\"][\"stack_dataplane\"]\n self.assertEqual(1, len(workloads))\n wl = workloads[0]\n fake_scenario = mock_heat_utils.HeatScenario.return_value\n self.assertEqual(fake_scenario._create_stack.return_value.id, wl[0])\n self.assertEqual(\"tpl\", wl[1])\n self.assertIn(\"sf1\", wl[2].values())\n self.assertIn(\"sf2\", wl[2].values())\n expected = {\n \"ctx.key\": \"gcp\",\n \"key\": \"value\",\n \"key_name\": \"kp1\",\n \"network_id\": \"fake_net\",\n \"router_id\": \"rid\"}\n self.assertEqual(expected, wl[3])\n" }, { "alpha_fraction": 0.5550521016120911, "alphanum_fraction": 0.5586769580841064, "avg_line_length": 39.87036895751953, "blob_id": "3e5a445a6542e3b29d6caa108fc80f093d003f6f", "content_id": "a8338a7a53d5fbcd672c29204f77072a75c37101", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2207, "license_type": "permissive", "max_line_length": 78, "num_lines": 54, "path": "/tests/unit/common/test_credential.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common import credential\nfrom tests.unit import test\n\n\nclass OpenStackCredentialTestCase(test.TestCase):\n\n def setUp(self):\n super(OpenStackCredentialTestCase, self).setUp()\n self.credential = credential.OpenStackCredential(\n \"foo_url\", \"foo_user\", \"foo_password\",\n tenant_name=\"foo_tenant\")\n\n def test_to_dict(self):\n self.assertEqual({\"auth_url\": \"foo_url\",\n \"username\": \"foo_user\",\n \"password\": \"foo_password\",\n \"tenant_name\": \"foo_tenant\",\n \"region_name\": None,\n \"domain_name\": None,\n \"permission\": None,\n \"endpoint\": None,\n \"endpoint_type\": None,\n \"https_insecure\": False,\n \"https_cacert\": None,\n \"https_cert\": None,\n \"project_domain_name\": None,\n \"user_domain_name\": None,\n \"profiler_hmac_key\": None,\n \"profiler_conn_str\": None,\n \"api_info\": {}},\n self.credential.to_dict())\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_clients(self, mock_clients):\n clients = self.credential.clients()\n mock_clients.assert_called_once_with(self.credential, cache={})\n self.assertIs(mock_clients.return_value, clients)\n" }, { "alpha_fraction": 0.6059423685073853, "alphanum_fraction": 0.6131452322006226, "avg_line_length": 37.298851013183594, "blob_id": "79c17a5e5738df83d9bff529f98e79b95198c5b2", "content_id": "afefacb63dcd6c17e455763e2b731f45fa29d26a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3332, "license_type": "permissive", "max_line_length": 79, "num_lines": 87, "path": "/rally_openstack/task/contexts/heat/stacks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.heat import utils as heat_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"stacks\", platform=\"openstack\", order=435)\nclass StackGenerator(context.OpenStackContext):\n \"\"\"Context class for create temporary stacks with resources.\n\n Stack generator allows to generate arbitrary number of stacks for\n each tenant before test scenarios. In addition, it allows to define\n number of resources (namely OS::Heat::RandomString) that will be created\n inside each stack. After test execution the stacks will be\n automatically removed from heat.\n \"\"\"\n\n # The schema of the context configuration format\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n\n \"properties\": {\n \"stacks_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"resources_per_stack\": {\n \"type\": \"integer\",\n \"minimum\": 1\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"stacks_per_tenant\": 2,\n \"resources_per_stack\": 10\n }\n\n @staticmethod\n def _prepare_stack_template(res_num):\n template = {\n \"heat_template_version\": \"2014-10-16\",\n \"description\": \"Test template for rally\",\n \"resources\": {}\n }\n rand_string = {\"type\": \"OS::Heat::RandomString\"}\n for i in range(res_num):\n template[\"resources\"][\"TestResource%d\" % i] = rand_string\n return template\n\n def setup(self):\n template = self._prepare_stack_template(\n self.config[\"resources_per_stack\"])\n for user, tenant_id in self._iterate_per_tenants():\n heat_scenario = heat_utils.HeatScenario(\n {\"user\": user, \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]})\n self.context[\"tenants\"][tenant_id][\"stacks\"] = []\n for i in range(self.config[\"stacks_per_tenant\"]):\n stack = heat_scenario._create_stack(template)\n self.context[\"tenants\"][tenant_id][\"stacks\"].append(stack.id)\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"heat.stacks\"],\n users=self.context.get(\"users\", []),\n superclass=heat_utils.HeatScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6367133259773254, "alphanum_fraction": 0.6409366726875305, "avg_line_length": 47.12885665893555, "blob_id": "5403d8f38f4b4dbfed6d9b3c085514b7a8617ff9", "content_id": "66696e0911d55a6fdfa7eb24ed45ac90a924133e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26519, "license_type": "permissive", "max_line_length": 79, "num_lines": 551, "path": "/tests/unit/task/scenarios/cinder/test_volumes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013 Huawei Technologies Co.,LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.cinder import volumes\nfrom tests.unit import test\n\nCINDER_VOLUMES = (\"rally_openstack.task.scenarios.cinder.volumes\")\n\n\[email protected]\nclass CinderServersTestCase(test.ScenarioTestCase):\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\",\n \"volumes\": [{\"id\": \"uuid\", \"size\": 1}],\n \"servers\": [1]}})\n return context\n\n def setUp(self):\n super(CinderServersTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.storage.block.BlockStorage\")\n self.addCleanup(patch.stop)\n self.mock_cinder = patch.start()\n\n def test_create_and_list_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndListVolume(self._get_context())\n scenario.run(1, True, fakearg=\"f\")\n\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n mock_service.list_volumes.assert_called_once_with(True)\n\n def test_create_and_get_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndGetVolume(self._get_context())\n scenario.run(1, fakearg=\"f\")\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n mock_service.get_volume.assert_called_once_with(\n mock_service.create_volume.return_value.id)\n\n def test_list_volumes(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.ListVolumes(self._get_context())\n scenario.run(True)\n mock_service.list_volumes.assert_called_once_with(\n True, limit=None, marker=None, search_opts=None, sort=None)\n\n def test_list_types(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.ListTypes(self._get_context())\n scenario.run(None, is_public=None)\n mock_service.list_types.assert_called_once_with(None,\n is_public=None)\n\n def test_list_transfers(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.ListTransfers(self._get_context())\n scenario._list_transfers = mock.MagicMock()\n scenario.run(True, search_opts=None)\n mock_service.list_transfers.assert_called_once_with(\n True, search_opts=None)\n\n @ddt.data({\"update_args\": {\"description\": \"desp\"},\n \"expected\": {\"description\": \"desp\"}},\n {\"update_args\": {\"update_name\": True, \"description\": \"desp\"},\n \"expected\": {\"name\": \"new_name\", \"description\": \"desp\"}})\n @ddt.unpack\n def test_create_and_update_volume(self, update_args, expected):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndUpdateVolume(self._get_context())\n scenario.generate_random_name = mock.MagicMock()\n scenario.generate_random_name.return_value = \"new_name\"\n scenario.run(1, update_volume_kwargs=update_args)\n mock_service.create_volume.assert_called_once_with(1)\n mock_service.update_volume.assert_called_once_with(\n mock_service.create_volume.return_value, **expected)\n if update_args.get(\"update_name\", False):\n scenario.generate_random_name.assert_called_once_with()\n\n def test_create_volume_and_update_readonly_flag(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateVolumeAndUpdateReadonlyFlag(\n self._get_context())\n scenario.run(1, image=None, read_only=True, fakearg=\"f\")\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n mock_service.update_readonly_flag.assert_called_once_with(\n mock_service.create_volume.return_value.id, read_only=True)\n\n def test_create_and_delete_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndDeleteVolume(self._get_context())\n scenario.sleep_between = mock.MagicMock()\n scenario.run(size=1, min_sleep=10, max_sleep=20, fakearg=\"f\")\n\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n scenario.sleep_between.assert_called_once_with(10, 20)\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n\n def test_create_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateVolume(self._get_context())\n scenario.run(1, fakearg=\"f\")\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n\n def test_create_volume_and_modify_metadata(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.ModifyVolumeMetadata(self._get_context())\n scenario.run(sets=5, set_size=4, deletes=3, delete_size=2)\n mock_service.set_metadata.assert_called_once_with(\n \"uuid\", set_size=4, sets=5)\n mock_service.delete_metadata.assert_called_once_with(\n \"uuid\",\n keys=mock_service.set_metadata.return_value,\n deletes=3, delete_size=2)\n\n def test_create_and_extend_volume(self):\n mock_service = self.mock_cinder.return_value\n\n scenario = volumes.CreateAndExtendVolume(self._get_context())\n scenario.sleep_between = mock.MagicMock()\n\n scenario.run(1, 2, 10, 20, fakearg=\"f\")\n mock_service.create_volume.assert_called_once_with(1, fakearg=\"f\")\n mock_service.extend_volume.assert_called_once_with(\n mock_service.create_volume.return_value, new_size=2)\n scenario.sleep_between.assert_called_once_with(10, 20)\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n\n def test_create_from_image_and_delete_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndDeleteVolume(self._get_context())\n scenario.run(1, image=\"fake_image\")\n mock_service.create_volume.assert_called_once_with(\n 1, imageRef=\"fake_image\")\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n\n def test_create_volume_from_image(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateVolume(self._get_context())\n scenario.run(1, image=\"fake_image\")\n mock_service.create_volume.assert_called_once_with(\n 1, imageRef=\"fake_image\")\n\n def test_create_volume_from_image_and_list(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndListVolume(self._get_context())\n scenario.run(1, True, \"fake_image\")\n mock_service.create_volume.assert_called_once_with(\n 1, imageRef=\"fake_image\")\n mock_service.list_volumes.assert_called_once_with(True)\n\n def test_create_from_volume_and_delete_volume(self):\n mock_service = self.mock_cinder.return_value\n vol_size = 1\n scenario = volumes.CreateFromVolumeAndDeleteVolume(self._get_context())\n scenario.run(vol_size)\n mock_service.create_volume.assert_called_once_with(\n 1, source_volid=\"uuid\")\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n\n @mock.patch(\"%s.CreateAndDeleteSnapshot.sleep_between\" % CINDER_VOLUMES)\n def test_create_and_delete_snapshot(self, mock_sleep_between):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndDeleteSnapshot(self._get_context())\n scenario.run(False, 10, 20, fakearg=\"f\")\n\n mock_service.create_snapshot.assert_called_once_with(\"uuid\",\n force=False,\n fakearg=\"f\")\n mock_sleep_between.assert_called_once_with(10, 20)\n mock_service.delete_snapshot.assert_called_once_with(\n mock_service.create_snapshot.return_value)\n\n def test_create_and_list_snapshots(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndListSnapshots(self._get_context())\n scenario.run(False, True, fakearg=\"f\")\n mock_service.create_snapshot.assert_called_once_with(\"uuid\",\n force=False,\n fakearg=\"f\")\n mock_service.list_snapshots.assert_called_once_with(True)\n\n def test_create_and_attach_volume(self):\n fake_server = mock.MagicMock()\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndAttachVolume(self._get_context())\n\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n scenario._delete_server = mock.MagicMock()\n scenario._attach_volume = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n\n volume_args = {\"some_key\": \"some_val\"}\n vm_args = {\"some_key\": \"some_val\"}\n\n scenario.run(10, \"img\", \"0\",\n create_volume_params=volume_args,\n create_vm_params=vm_args)\n\n mock_service.create_volume.assert_called_once_with(\n 10, **volume_args)\n scenario._attach_volume.assert_called_once_with(\n fake_server, mock_service.create_volume.return_value)\n scenario._detach_volume.assert_called_once_with(\n fake_server, mock_service.create_volume.return_value)\n\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n scenario._delete_server.assert_called_once_with(fake_server)\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test_create_and_upload_volume_to_image(self, mock_image):\n mock_volume_service = self.mock_cinder.return_value\n mock_image_service = mock_image.return_value\n scenario = volumes.CreateAndUploadVolumeToImage(self._get_context())\n\n scenario.run(2, image=\"img\", container_format=\"fake\",\n disk_format=\"disk\", do_delete=False, fakeargs=\"fakeargs\")\n\n mock_volume_service.create_volume.assert_called_once_with(\n 2, imageRef=\"img\", fakeargs=\"fakeargs\")\n mock_volume_service.upload_volume_to_image.assert_called_once_with(\n mock_volume_service.create_volume.return_value,\n container_format=\"fake\", disk_format=\"disk\", force=False)\n\n mock_volume_service.create_volume.reset_mock()\n mock_volume_service.upload_volume_to_image.reset_mock()\n\n scenario.run(1, image=None, do_delete=True, fakeargs=\"fakeargs\")\n\n mock_volume_service.create_volume.assert_called_once_with(\n 1, fakeargs=\"fakeargs\")\n mock_volume_service.upload_volume_to_image.assert_called_once_with(\n mock_volume_service.create_volume.return_value,\n container_format=\"bare\", disk_format=\"raw\", force=False)\n mock_volume_service.delete_volume.assert_called_once_with(\n mock_volume_service.create_volume.return_value)\n mock_image_service.delete_image.assert_called_once_with(\n mock_volume_service.upload_volume_to_image.return_value.id)\n\n def test_create_snapshot_and_attach_volume(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context())\n scenario._boot_server = mock.MagicMock()\n scenario._attach_volume = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n scenario.run(\"img\", \"flavor\")\n\n self.assertTrue(mock_service.create_volume.called)\n volume = mock_service.create_volume.return_value\n snapshot = mock_service.create_snapshot.return_value\n mock_service.create_snapshot.assert_called_once_with(volume.id,\n force=False)\n mock_service.delete_snapshot.assert_called_once_with(snapshot)\n scenario._attach_volume.assert_called_once_with(\n scenario._boot_server.return_value, volume)\n scenario._detach_volume.assert_called_once_with(\n scenario._boot_server.return_value, volume)\n mock_service.delete_volume.assert_called_once_with(volume)\n\n @mock.patch(\"random.choice\")\n def test_create_snapshot_and_attach_volume_use_volume_type_with_name(\n self, mock_choice):\n mock_service = self.mock_cinder.return_value\n\n scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context())\n scenario._boot_server = mock.MagicMock()\n scenario._attach_volume = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n scenario.run(\"img\", \"flavor\", volume_type=\"type\")\n\n fake_volume = mock_service.create_volume.return_value\n fake_server = scenario._boot_server.return_value\n fake_snapshot = mock_service.create_snapshot.return_value\n\n mock_service.create_volume.assert_called_once_with(\n {\"min\": 1, \"max\": 5}, volume_type=\"type\")\n mock_service.create_snapshot.assert_called_once_with(fake_volume.id,\n force=False)\n mock_service.delete_snapshot.assert_called_once_with(fake_snapshot)\n scenario._attach_volume.assert_called_once_with(fake_server,\n fake_volume)\n scenario._detach_volume.assert_called_once_with(fake_server,\n fake_volume)\n mock_service.delete_volume.assert_called_once_with(fake_volume)\n\n @mock.patch(\"random.randint\")\n def test_create_nested_snapshots_and_attach_volume(self, mock_randint):\n mock_service = self.mock_cinder.return_value\n mock_randint.return_value = 2\n volume_kwargs = {\"volume_type\": \"type1\"}\n snapshot_kwargs = {\"name\": \"snapshot1\", \"description\": \"snaphot one\"}\n\n scenario = volumes.CreateNestedSnapshotsAndAttachVolume(\n context=self._get_context())\n scenario._boot_server = mock.MagicMock()\n scenario._attach_volume = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n scenario.run(\"img\", \"flavor\", create_volume_kwargs=volume_kwargs,\n create_snapshot_kwargs=snapshot_kwargs)\n\n mock_service.create_volume.assert_called_once_with(\n mock_randint.return_value, **volume_kwargs)\n mock_service.create_snapshot.assert_called_once_with(\n mock_service.create_volume.return_value.id, force=False,\n **snapshot_kwargs)\n scenario._attach_volume(scenario._boot_server.return_value,\n mock_service.create_volume.return_value)\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n mock_service.delete_snapshot.assert_called_once_with(\n mock_service.create_snapshot.return_value)\n scenario._detach_volume.assert_called_once_with(\n scenario._boot_server.return_value,\n mock_service.create_volume.return_value)\n\n @mock.patch(\"random.randint\")\n def test_create_nested_snapshots_and_attach_volume_2(self, mock_randint):\n mock_service = self.mock_cinder.return_value\n mock_randint.return_value = 2\n nested_level = 3\n volume_size = mock_randint.return_value\n fake_volumes = [mock.Mock(size=volume_size)\n for i in range(nested_level)]\n fake_snapshots = [mock.Mock()\n for i in range(nested_level)]\n mock_service.create_volume.side_effect = fake_volumes\n mock_service.create_snapshot.side_effect = fake_snapshots\n\n scenario = volumes.CreateNestedSnapshotsAndAttachVolume(\n context=self._get_context())\n scenario._boot_server = mock.MagicMock()\n scenario._attach_volume = mock.MagicMock()\n scenario._detach_volume = mock.MagicMock()\n scenario.run(\"img\", \"flavor\", nested_level=nested_level)\n\n expected_volumes = [mock.call(volume_size)]\n expected_snapshots = [mock.call(fake_volumes[0].id, force=False)]\n expected_attachs = [mock.call(scenario._boot_server.return_value,\n fake_volumes[0])]\n for i in range(nested_level - 1):\n expected_volumes.append(\n mock.call(volume_size, snapshot_id=fake_snapshots[i].id))\n expected_snapshots.append(\n mock.call(fake_volumes[i + 1].id, force=False))\n expected_attachs.append(\n mock.call(scenario._boot_server.return_value,\n fake_volumes[i + 1]))\n\n mock_service.create_volume.assert_has_calls(expected_volumes)\n mock_service.create_snapshot.assert_has_calls(expected_snapshots)\n scenario._attach_volume.assert_has_calls(expected_attachs)\n fake_volumes.reverse()\n fake_snapshots.reverse()\n mock_service.delete_volume.assert_has_calls(\n [mock.call(volume) for volume in fake_volumes])\n mock_service.delete_snapshot.assert_has_calls(\n [mock.call(snapshot) for snapshot in fake_snapshots])\n scenario._detach_volume.assert_has_calls(\n [mock.call(scenario._boot_server.return_value,\n fake_volumes[i])\n for i in range(len(fake_volumes))])\n\n def test_create_volume_backup(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateVolumeBackup(self._get_context())\n\n volume_kwargs = {\"some_var\": \"zaq\"}\n scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs)\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(\n mock_service.create_volume.return_value.id)\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n mock_service.delete_backup.assert_called_once_with(\n mock_service.create_backup.return_value)\n\n def test_create_volume_backup_no_delete(self):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateVolumeBackup(self._get_context())\n\n volume_kwargs = {\"some_var\": \"zaq\"}\n scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs)\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(\n mock_service.create_volume.return_value.id)\n self.assertFalse(mock_service.delete_volume.called)\n self.assertFalse(mock_service.delete_backup.called)\n\n def test_create_and_restore_volume_backup(self):\n mock_service = self.mock_cinder.return_value\n volume_kwargs = {\"some_var\": \"zaq\"}\n\n scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context())\n scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs)\n\n fake_volume = mock_service.create_volume.return_value\n fake_backup = mock_service.create_backup.return_value\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(fake_volume.id)\n mock_service.restore_backup.assert_called_once_with(fake_backup.id)\n mock_service.delete_volume.assert_called_once_with(fake_volume)\n mock_service.delete_backup.assert_called_once_with(fake_backup)\n\n def test_create_and_restore_volume_backup_no_delete(self):\n mock_service = self.mock_cinder.return_value\n volume_kwargs = {\"some_var\": \"zaq\"}\n scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context())\n scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs)\n\n fake_volume = mock_service.create_volume.return_value\n fake_backup = mock_service.create_backup.return_value\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(fake_volume.id)\n mock_service.restore_backup.assert_called_once_with(fake_backup.id)\n self.assertFalse(mock_service.delete_volume.called)\n self.assertFalse(mock_service.delete_backup.called)\n\n def test_create_and_list_volume_backups(self):\n mock_service = self.mock_cinder.return_value\n volume_kwargs = {\"some_var\": \"zaq\"}\n scenario = volumes.CreateAndListVolumeBackups(self._get_context())\n scenario.run(1, detailed=True, do_delete=True,\n create_volume_kwargs=volume_kwargs)\n\n fake_volume = mock_service.create_volume.return_value\n fake_backup = mock_service.create_backup.return_value\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(fake_volume.id)\n mock_service.list_backups.assert_called_once_with(True)\n mock_service.delete_volume.assert_called_once_with(fake_volume)\n mock_service.delete_backup.assert_called_once_with(fake_backup)\n\n def test_create_and_list_volume_backups_no_delete(self):\n mock_service = self.mock_cinder.return_value\n volume_kwargs = {\"some_var\": \"zaq\"}\n scenario = volumes.CreateAndListVolumeBackups(self._get_context())\n scenario.run(1, detailed=True, do_delete=False,\n create_volume_kwargs=volume_kwargs)\n\n fake_volume = mock_service.create_volume.return_value\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.create_backup.assert_called_once_with(fake_volume.id)\n mock_service.list_backups.assert_called_once_with(True)\n self.assertFalse(mock_service.delete_volume.called)\n self.assertFalse(mock_service.delete_backup.called)\n\n @ddt.data({},\n {\"nested_level\": 2},\n {\"image\": \"img\"})\n @ddt.unpack\n def test_create_volume_and_clone(self, nested_level=1,\n image=None):\n create_volumes_count = nested_level + 1\n fake_volumes = [mock.Mock(size=1)\n for i in range(create_volumes_count)]\n mock_service = self.mock_cinder.return_value\n mock_service.create_volume.side_effect = fake_volumes\n\n scenario = volumes.CreateVolumeAndClone(self._get_context())\n scenario.run(1, image=image, nested_level=nested_level,\n fakearg=\"fake\")\n\n expected = [mock.call(1, imageRef=image, fakearg=\"fake\")\n if image else mock.call(1, fakearg=\"fake\")]\n for i in range(nested_level):\n expected.append(mock.call(fake_volumes[i].size,\n source_volid=fake_volumes[i].id,\n fakearg=\"fake\")\n )\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"cinder.clone_volume\",\n count=nested_level)\n mock_service.create_volume.assert_has_calls(expected)\n\n def test_create_volume_from_snapshot(self):\n mock_service = self.mock_cinder.return_value\n create_snapshot_args = {\"force\": False}\n\n scenario = volumes.CreateVolumeFromSnapshot(self._get_context())\n scenario.run(fakearg=\"f\")\n\n fake_snapshot = mock_service.create_snapshot.return_value\n fake_volume = mock_service.create_volume.return_value\n mock_service.create_snapshot.assert_called_once_with(\"uuid\")\n mock_service.create_volume.assert_called_once_with(\n 1, snapshot_id=fake_snapshot.id, fakearg=\"f\")\n mock_service.delete_snapshot.assert_called_once_with(fake_snapshot)\n mock_service.delete_volume.assert_called_once_with(fake_volume)\n\n mock_service.create_snapshot.reset_mock()\n mock_service.create_volume.reset_mock()\n mock_service.delete_snapshot.reset_mock()\n mock_service.delete_volume.reset_mock()\n\n scenario.run(do_delete=False,\n create_snapshot_kwargs=create_snapshot_args,\n fakearg=\"f\")\n\n mock_service.create_snapshot.assert_called_once_with(\n \"uuid\", **create_snapshot_args)\n mock_service.create_volume.assert_called_once_with(\n 1, snapshot_id=fake_snapshot.id, fakearg=\"f\")\n self.assertFalse(mock_service.delete_snapshot.called)\n self.assertFalse(mock_service.delete_volume.called)\n\n @ddt.data({},\n {\"image\": \"img\"})\n @ddt.unpack\n def test_create_and_accept_transfer(self, image=None):\n mock_service = self.mock_cinder.return_value\n scenario = volumes.CreateAndAcceptTransfer(self._get_context())\n scenario.run(1, image=image, fakearg=\"fake\")\n\n expected = [mock.call(1, imageRef=image, fakearg=\"fake\")\n if image else mock.call(1, fakearg=\"fake\")]\n mock_service.create_volume.assert_has_calls(expected)\n mock_service.transfer_create.assert_called_once_with(\n mock_service.create_volume.return_value.id)\n mock_service.transfer_accept.assert_called_once_with(\n mock_service.transfer_create.return_value.id,\n auth_key=mock_service.transfer_create.return_value.auth_key)\n" }, { "alpha_fraction": 0.6197466254234314, "alphanum_fraction": 0.6205509901046753, "avg_line_length": 42.622806549072266, "blob_id": "a9d777d4a8163a73d46d3287b4ce02f5fb7f7264", "content_id": "f1a03cda9b8b52e5b6e4feafc6c629d50d09f964", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9946, "license_type": "permissive", "max_line_length": 79, "num_lines": 228, "path": "/tests/unit/task/scenarios/murano/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\n\nfrom rally_openstack.task.scenarios.murano import utils\nfrom tests.unit import test\n\nMRN_UTILS = \"rally_openstack.task.scenarios.murano.utils\"\nCONF = cfg.CONF\n\n\nclass MuranoScenarioTestCase(test.ScenarioTestCase):\n\n def test_list_environments(self):\n self.clients(\"murano\").environments.list.return_value = []\n scenario = utils.MuranoScenario(context=self.context)\n return_environments_list = scenario._list_environments()\n self.assertEqual([], return_environments_list)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.list_environments\")\n\n def test_create_environments(self):\n self.clients(\"murano\").environments.create = mock.Mock()\n scenario = utils.MuranoScenario(context=self.context)\n scenario.generate_random_name = mock.Mock()\n\n create_env = scenario._create_environment()\n self.assertEqual(\n create_env,\n self.clients(\"murano\").environments.create.return_value)\n self.clients(\"murano\").environments.create.assert_called_once_with(\n {\"name\": scenario.generate_random_name.return_value})\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.create_environment\")\n\n def test_delete_environment(self):\n environment = mock.Mock(id=\"id\")\n self.clients(\"murano\").environments.delete.return_value = \"ok\"\n scenario = utils.MuranoScenario(context=self.context)\n scenario._delete_environment(environment)\n self.clients(\"murano\").environments.delete.assert_called_once_with(\n environment.id\n )\n\n def test_create_session(self):\n self.clients(\"murano\").sessions.configure.return_value = \"sess\"\n scenario = utils.MuranoScenario(context=self.context)\n create_sess = scenario._create_session(\"id\")\n self.assertEqual(\"sess\", create_sess)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.create_session\")\n\n def test__create_service(self,):\n self.clients(\"murano\").services.post.return_value = \"app\"\n mock_env = mock.Mock(id=\"ip\")\n mock_sess = mock.Mock(id=\"ip\")\n scenario = utils.MuranoScenario(context=self.context)\n\n create_app = scenario._create_service(mock_env, mock_sess,\n \"fake_full_name\")\n\n self.assertEqual(\"app\", create_app)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.create_service\")\n\n def test_deploy_environment(self):\n environment = mock.Mock(id=\"id\")\n session = mock.Mock(id=\"id\")\n self.clients(\"murano\").sessions.deploy.return_value = \"ok\"\n scenario = utils.MuranoScenario(context=self.context)\n scenario._deploy_environment(environment, session)\n\n self.clients(\"murano\").sessions.deploy.assert_called_once_with(\n environment.id, session.id\n )\n\n config = CONF.openstack\n self.mock_wait_for_status.mock.assert_called_once_with(\n environment,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"READY\"],\n check_interval=config.murano_deploy_environment_check_interval,\n timeout=config.murano_deploy_environment_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with(\n [\"DEPLOY FAILURE\"])\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.deploy_environment\")\n\n @mock.patch(MRN_UTILS + \".open\",\n side_effect=mock.mock_open(read_data=\"Key: value\"),\n create=True)\n def test_read_from_file(self, mock_open):\n utility = utils.MuranoPackageManager({\"uuid\": \"fake_task_id\"})\n data = utility._read_from_file(\"filename\")\n expected_data = {\"Key\": \"value\"}\n self.assertEqual(expected_data, data)\n\n @mock.patch(MRN_UTILS + \".MuranoPackageManager._read_from_file\")\n @mock.patch(MRN_UTILS + \".MuranoPackageManager._write_to_file\")\n def test_change_app_fullname(\n self, mock_murano_package_manager__write_to_file,\n mock_murano_package_manager__read_from_file):\n manifest = {\"FullName\": \"app.name_abc\",\n \"Classes\": {\"app.name_abc\": \"app_class.yaml\"}}\n mock_murano_package_manager__read_from_file.side_effect = (\n [manifest])\n utility = utils.MuranoPackageManager({\"uuid\": \"fake_task_id\"})\n utility._change_app_fullname(\"tmp/tmpfile/\")\n mock_murano_package_manager__read_from_file.assert_has_calls(\n [mock.call(\"tmp/tmpfile/manifest.yaml\")]\n )\n mock_murano_package_manager__write_to_file.assert_has_calls(\n [mock.call(manifest, \"tmp/tmpfile/manifest.yaml\")]\n )\n\n @mock.patch(\"zipfile.is_zipfile\")\n @mock.patch(\"tempfile.mkdtemp\")\n @mock.patch(\"shutil.copytree\")\n @mock.patch(MRN_UTILS + \".MuranoPackageManager._change_app_fullname\")\n @mock.patch(\"%s.pack_dir\" % MRN_UTILS)\n @mock.patch(\"shutil.rmtree\")\n def test_prepare_zip_if_not_zip(\n self, mock_shutil_rmtree, mock_pack_dir,\n mock_murano_package_manager__change_app_fullname,\n mock_shutil_copytree, mock_tempfile_mkdtemp,\n mock_zipfile_is_zipfile):\n utility = utils.MuranoPackageManager({\"uuid\": \"fake_task_id\"})\n package_path = \"tmp/tmpfile\"\n\n mock_zipfile_is_zipfile.return_value = False\n mock_tempfile_mkdtemp.return_value = \"tmp/tmpfile\"\n mock_pack_dir.return_value = \"tmp/tmpzipfile\"\n\n zip_file = utility._prepare_package(package_path)\n\n self.assertEqual(\"tmp/tmpzipfile\", zip_file)\n mock_tempfile_mkdtemp.assert_called_once_with()\n mock_shutil_copytree.assert_called_once_with(\n \"tmp/tmpfile\",\n \"tmp/tmpfile/package/\"\n )\n (mock_murano_package_manager__change_app_fullname.\n assert_called_once_with(\"tmp/tmpfile/package/\"))\n mock_shutil_rmtree.assert_called_once_with(\"tmp/tmpfile\")\n\n @mock.patch(\"zipfile.is_zipfile\")\n def test_prepare_zip_if_zip(self, mock_zipfile_is_zipfile):\n utility = utils.MuranoPackageManager({\"uuid\": \"fake_task_id\"})\n package_path = \"tmp/tmpfile.zip\"\n mock_zipfile_is_zipfile.return_value = True\n zip_file = utility._prepare_package(package_path)\n self.assertEqual(\"tmp/tmpfile.zip\", zip_file)\n\n def test_list_packages(self):\n scenario = utils.MuranoScenario()\n self.assertEqual(self.clients(\"murano\").packages.list.return_value,\n scenario._list_packages())\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.list_packages\")\n\n @mock.patch(MRN_UTILS + \".open\", create=True)\n def test_import_package(self, mock_open):\n self.clients(\"murano\").packages.create.return_value = (\n \"created_foo_package\"\n )\n scenario = utils.MuranoScenario()\n mock_open.return_value = \"opened_foo_package.zip\"\n imp_package = scenario._import_package(\"foo_package.zip\")\n self.assertEqual(\"created_foo_package\", imp_package)\n self.clients(\"murano\").packages.create.assert_called_once_with(\n {}, {\"file\": \"opened_foo_package.zip\"})\n mock_open.assert_called_once_with(\"foo_package.zip\")\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.import_package\")\n\n def test_delete_package(self):\n package = mock.Mock(id=\"package_id\")\n scenario = utils.MuranoScenario()\n scenario._delete_package(package)\n self.clients(\"murano\").packages.delete.assert_called_once_with(\n \"package_id\"\n )\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.delete_package\")\n\n def test_update_package(self):\n package = mock.Mock(id=\"package_id\")\n self.clients(\"murano\").packages.update.return_value = \"updated_package\"\n scenario = utils.MuranoScenario()\n upd_package = scenario._update_package(\n package, {\"tags\": [\"tag\"]}, \"add\"\n )\n self.assertEqual(\"updated_package\", upd_package)\n self.clients(\"murano\").packages.update.assert_called_once_with(\n \"package_id\",\n {\"tags\": [\"tag\"]},\n \"add\"\n )\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.update_package\")\n\n def test_filter_packages(self):\n self.clients(\"murano\").packages.filter.return_value = []\n scenario = utils.MuranoScenario()\n return_apps_list = scenario._filter_applications(\n {\"category\": \"Web\"}\n )\n self.assertEqual([], return_apps_list)\n self.clients(\"murano\").packages.filter.assert_called_once_with(\n category=\"Web\"\n )\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"murano.filter_applications\")\n" }, { "alpha_fraction": 0.7158836722373962, "alphanum_fraction": 0.7188665270805359, "avg_line_length": 35.24324417114258, "blob_id": "27cc2f5a617de01666e34d0e96feadea2c673657", "content_id": "7bec1463ba55e295f0ba7eac5ff453cd06ee91ab", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "permissive", "max_line_length": 78, "num_lines": 37, "path": "/rally_openstack/task/scenarios/monasca/metrics.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.monasca import utils as monascautils\n\n\n\"\"\"Scenarios for monasca Metrics API.\"\"\"\n\n\[email protected](\"required_services\",\n services=[consts.Service.MONASCA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"MonascaMetrics.list_metrics\", platform=\"openstack\")\nclass ListMetrics(monascautils.MonascaScenario):\n\n def run(self, **kwargs):\n \"\"\"Fetch user's metrics.\n\n :param kwargs: optional arguments for list query:\n name, dimensions, start_time, etc\n \"\"\"\n self._list_metrics(**kwargs)\n" }, { "alpha_fraction": 0.6124250292778015, "alphanum_fraction": 0.6154563426971436, "avg_line_length": 40.578948974609375, "blob_id": "e3015e5c35e31444d869543f8feb214cddba56c5", "content_id": "e6b1bf2f134ba35deb5209f3081ec3e1b59e35c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30020, "license_type": "permissive", "max_line_length": 79, "num_lines": 722, "path": "/tests/unit/common/services/storage/test_cinder_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\nimport uuid\n\nimport ddt\n\nfrom rally.common import cfg\nfrom rally import exceptions\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.storage import block\nfrom rally_openstack.common.services.storage import cinder_common\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nBASE_PATH = \"rally_openstack.common.services.storage\"\nCONF = cfg.CONF\n\n\nclass FullCinder(service.Service, cinder_common.CinderMixin):\n \"\"\"Implementation of CinderMixin with Service base class.\"\"\"\n pass\n\n\[email protected]\nclass CinderMixinTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(CinderMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.cinder = self.clients.cinder.return_value\n self.name_generator = uuid.uuid1\n self.version = \"some\"\n self.service = FullCinder(\n clients=self.clients, name_generator=self.name_generator)\n self.service.version = self.version\n\n def atomic_actions(self):\n return self.service._atomic_actions\n\n def test__get_client(self):\n self.assertEqual(self.cinder,\n self.service._get_client())\n\n def test__update_resource_with_manage(self):\n resource = mock.MagicMock(id=1, manager=mock.MagicMock())\n self.assertEqual(resource.manager.get.return_value,\n self.service._update_resource(resource))\n resource.manager.get.assert_called_once_with(\n resource.id)\n\n @ddt.data({\"resource\": block.Volume(id=1, name=\"vol\",\n size=1, status=\"st\"),\n \"attr\": \"volumes\"},\n {\"resource\": block.VolumeSnapshot(id=2, name=\"snapshot\",\n volume_id=1, status=\"st\"),\n \"attr\": \"volume_snapshots\"},\n {\"resource\": block.VolumeBackup(id=3, name=\"backup\",\n volume_id=1, status=\"st\"),\n \"attr\": \"backups\"})\n @ddt.unpack\n def test__update_resource_with_no_manage(self, resource, attr):\n self.assertEqual(getattr(self.cinder, attr).get.return_value,\n self.service._update_resource(resource))\n getattr(self.cinder, attr).get.assert_called_once_with(\n resource.id)\n\n def test__update_resource_with_not_found(self):\n manager = mock.MagicMock()\n resource = fakes.FakeResource(manager=manager, status=\"ERROR\")\n\n class NotFoundException(Exception):\n http_status = 404\n\n manager.get = mock.MagicMock(side_effect=NotFoundException)\n self.assertRaises(exceptions.GetResourceNotFound,\n self.service._update_resource, resource)\n\n def test__update_resource_with_http_exception(self):\n manager = mock.MagicMock()\n resource = fakes.FakeResource(manager=manager, status=\"ERROR\")\n\n class HTTPException(Exception):\n pass\n\n manager.get = mock.MagicMock(side_effect=HTTPException)\n self.assertRaises(exceptions.GetResourceFailure,\n self.service._update_resource, resource)\n\n def test__wait_available_volume(self):\n volume = fakes.FakeVolume()\n self.assertEqual(self.mock_wait_for_status.mock.return_value,\n self.service._wait_available_volume(volume))\n\n self.mock_wait_for_status.mock.assert_called_once_with(\n volume,\n ready_statuses=[\"available\"],\n update_resource=self.service._update_resource,\n timeout=CONF.openstack.cinder_volume_create_timeout,\n check_interval=CONF.openstack.cinder_volume_create_poll_interval\n )\n\n def test_get_volume(self):\n self.assertEqual(self.cinder.volumes.get.return_value,\n self.service.get_volume(1))\n self.cinder.volumes.get.assert_called_once_with(1)\n\n @mock.patch(\"%s.block.BlockStorage.create_volume\" % BASE_PATH)\n def test_delete_volume(self, mock_create_volume):\n volume = mock_create_volume.return_value\n self.service.delete_volume(volume)\n\n self.cinder.volumes.delete.assert_called_once_with(volume)\n self.mock_wait_for_status.mock.assert_called_once_with(\n volume,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.service._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=CONF.openstack.cinder_volume_delete_poll_interval\n )\n\n @mock.patch(\"%s.block.BlockStorage.create_volume\" % BASE_PATH)\n def test_extend_volume(self, mock_create_volume):\n volume = mock_create_volume.return_value\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n\n self.assertEqual(self.service._wait_available_volume.return_value,\n self.service.extend_volume(volume, 1))\n\n self.cinder.volumes.extend.assert_called_once_with(volume, 1)\n self.service._wait_available_volume.assert_called_once_with(volume)\n\n def test_list_snapshots(self):\n self.assertEqual(self.cinder.volume_snapshots.list.return_value,\n self.service.list_snapshots())\n self.cinder.volume_snapshots.list.assert_called_once_with(True)\n\n def test_set_metadata(self):\n volume = fakes.FakeVolume()\n\n self.service.set_metadata(volume, sets=2, set_size=4)\n calls = self.cinder.volumes.set_metadata.call_args_list\n self.assertEqual(2, len(calls))\n for call in calls:\n call_volume, metadata = call[0]\n self.assertEqual(volume, call_volume)\n self.assertEqual(4, len(metadata))\n\n def test_delete_metadata(self):\n volume = fakes.FakeVolume()\n\n keys = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\"]\n self.service.delete_metadata(volume, keys, deletes=3, delete_size=4)\n calls = self.cinder.volumes.delete_metadata.call_args_list\n self.assertEqual(3, len(calls))\n all_deleted = []\n for call in calls:\n call_volume, del_keys = call[0]\n self.assertEqual(volume, call_volume)\n self.assertEqual(4, len(del_keys))\n for key in del_keys:\n self.assertIn(key, keys)\n self.assertNotIn(key, all_deleted)\n all_deleted.append(key)\n\n def test_delete_metadata_not_enough_keys(self):\n volume = fakes.FakeVolume()\n\n keys = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n self.assertRaises(exceptions.InvalidArgumentsException,\n self.service.delete_metadata,\n volume, keys, deletes=2, delete_size=3)\n\n def test_update_readonly_flag(self):\n fake_volume = mock.MagicMock()\n self.service.update_readonly_flag(fake_volume, \"fake_flag\")\n self.cinder.volumes.update_readonly_flag.assert_called_once_with(\n fake_volume, \"fake_flag\")\n\n @mock.patch(\"rally_openstack.common.services.image.image.Image\")\n def test_upload_volume_to_image(self, mock_image):\n volume = mock.Mock()\n image = {\"os-volume_upload_image\": {\"image_id\": 1}}\n self.cinder.volumes.upload_to_image.return_value = (None, image)\n glance = mock_image.return_value\n\n self.service.generate_random_name = mock.Mock(\n return_value=\"test_vol\")\n self.service.upload_volume_to_image(volume, False,\n \"container\", \"disk\")\n\n self.cinder.volumes.upload_to_image.assert_called_once_with(\n volume, False, \"test_vol\", \"container\", \"disk\")\n self.mock_wait_for_status.mock.assert_has_calls([\n mock.call(\n volume,\n ready_statuses=[\"available\"],\n update_resource=self.service._update_resource,\n timeout=CONF.openstack.cinder_volume_create_timeout,\n check_interval=CONF.openstack.\n cinder_volume_create_poll_interval),\n mock.call(\n glance.get_image.return_value,\n ready_statuses=[\"active\"],\n update_resource=glance.get_image,\n timeout=CONF.openstack.glance_image_create_timeout,\n check_interval=CONF.openstack.\n glance_image_create_poll_interval)\n ])\n glance.get_image.assert_called_once_with(1)\n\n def test_create_qos(self):\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n random_name = \"random_name\"\n self.service.generate_random_name = mock.MagicMock(\n return_value=random_name)\n\n result = self.service.create_qos(specs)\n self.assertEqual(\n self.cinder.qos_specs.create.return_value,\n result\n )\n self.cinder.qos_specs.create.assert_called_once_with(random_name,\n specs)\n\n def test_list_qos(self):\n result = self.service.list_qos(True)\n self.assertEqual(\n self.cinder.qos_specs.list.return_value,\n result\n )\n self.cinder.qos_specs.list.assert_called_once_with(True)\n\n def test_get_qos(self):\n result = self.service.get_qos(\"qos\")\n self.assertEqual(\n self.cinder.qos_specs.get.return_value,\n result)\n self.cinder.qos_specs.get.assert_called_once_with(\"qos\")\n\n def test_set_qos(self):\n set_specs_args = {\"test\": \"foo\"}\n result = self.service.set_qos(\"qos\", set_specs_args)\n self.assertEqual(\n self.cinder.qos_specs.set_keys.return_value,\n result)\n self.cinder.qos_specs.set_keys.assert_called_once_with(\"qos\",\n set_specs_args)\n\n def test_qos_associate_type(self):\n self.service.qos_associate_type(\"qos\", \"type_id\")\n self.cinder.qos_specs.associate.assert_called_once_with(\n \"qos\", \"type_id\")\n\n def test_qos_disassociate_type(self):\n self.service.qos_disassociate_type(\"qos\", \"type_id\")\n self.cinder.qos_specs.disassociate.assert_called_once_with(\n \"qos\", \"type_id\")\n\n def test_delete_snapshot(self):\n snapshot = mock.Mock()\n self.service.delete_snapshot(snapshot)\n self.cinder.volume_snapshots.delete.assert_called_once_with(snapshot)\n self.mock_wait_for_status.mock.assert_called_once_with(\n snapshot,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.service._update_resource,\n timeout=cfg.CONF.openstack.cinder_volume_create_timeout,\n check_interval=cfg.CONF.openstack\n .cinder_volume_create_poll_interval)\n\n def test_delete_backup(self):\n backup = mock.Mock()\n self.service.delete_backup(backup)\n self.cinder.backups.delete.assert_called_once_with(backup)\n self.mock_wait_for_status.mock.assert_called_once_with(\n backup,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self.service._update_resource,\n timeout=cfg.CONF.openstack.cinder_volume_create_timeout,\n check_interval=cfg.CONF.openstack\n .cinder_volume_create_poll_interval)\n\n def test_restore_backup(self):\n backup = mock.Mock()\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = mock.Mock()\n\n return_restore = self.service.restore_backup(backup.id, None)\n\n self.cinder.restores.restore.assert_called_once_with(backup.id, None)\n self.cinder.volumes.get.assert_called_once_with(\n self.cinder.restores.restore.return_value.volume_id)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.volumes.get.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_restore)\n\n def test_list_backups(self):\n return_backups_list = self.service.list_backups()\n self.assertEqual(\n self.cinder.backups.list.return_value,\n return_backups_list)\n\n def test_list_transfers(self):\n return_transfers_list = self.service.list_transfers()\n self.assertEqual(\n self.cinder.transfers.list.return_value,\n return_transfers_list)\n\n def test_get_volume_type(self):\n self.assertEqual(self.cinder.volume_types.get.return_value,\n self.service.get_volume_type(\"volume_type\"))\n self.cinder.volume_types.get.assert_called_once_with(\n \"volume_type\")\n\n def test_delete_volume_type(self):\n volume_type = mock.Mock()\n self.service.delete_volume_type(volume_type)\n self.cinder.volume_types.delete.assert_called_once_with(\n volume_type)\n\n def test_set_volume_type_keys(self):\n volume_type = mock.Mock()\n self.assertEqual(volume_type.set_keys.return_value,\n self.service.set_volume_type_keys(\n volume_type, metadata=\"metadata\"))\n\n volume_type.set_keys.assert_called_once_with(\"metadata\")\n\n def test_transfer_create(self):\n fake_volume = mock.MagicMock()\n random_name = \"random_name\"\n self.service.generate_random_name = mock.MagicMock(\n return_value=random_name)\n result = self.service.transfer_create(fake_volume.id)\n self.assertEqual(\n self.cinder.transfers.create.return_value,\n result)\n self.cinder.transfers.create.assert_called_once_with(\n fake_volume.id, name=random_name)\n\n def test_transfer_create_with_name(self):\n fake_volume = mock.MagicMock()\n result = self.service.transfer_create(fake_volume.id, name=\"t\")\n self.assertEqual(\n self.cinder.transfers.create.return_value,\n result)\n self.cinder.transfers.create.assert_called_once_with(\n fake_volume.id, name=\"t\")\n\n def test_transfer_accept(self):\n fake_transfer = mock.MagicMock()\n result = self.service.transfer_accept(fake_transfer.id, \"fake_key\")\n self.assertEqual(\n self.cinder.transfers.accept.return_value,\n result)\n self.cinder.transfers.accept.assert_called_once_with(\n fake_transfer.id, \"fake_key\")\n\n def test_create_encryption_type(self):\n volume_type = mock.Mock()\n specs = {\n \"provider\": \"foo_pro\",\n \"cipher\": \"foo_cip\",\n \"key_size\": 512,\n \"control_location\": \"foo_con\"\n }\n result = self.service.create_encryption_type(volume_type, specs)\n\n self.assertEqual(\n self.cinder.volume_encryption_types.create.return_value, result)\n self.cinder.volume_encryption_types.create.assert_called_once_with(\n volume_type, specs)\n\n def test_get_encryption_type(self):\n volume_type = mock.Mock()\n result = self.service.get_encryption_type(volume_type)\n\n self.assertEqual(\n self.cinder.volume_encryption_types.get.return_value, result)\n self.cinder.volume_encryption_types.get.assert_called_once_with(\n volume_type)\n\n def test_list_encryption_type(self):\n return_encryption_types_list = self.service.list_encryption_type()\n self.assertEqual(self.cinder.volume_encryption_types.list.return_value,\n return_encryption_types_list)\n\n def test_delete_encryption_type(self):\n resp = mock.MagicMock(status_code=202)\n self.cinder.volume_encryption_types.delete.return_value = [resp]\n self.service.delete_encryption_type(\"type\")\n self.cinder.volume_encryption_types.delete.assert_called_once_with(\n \"type\")\n\n def test_delete_encryption_type_raise(self):\n resp = mock.MagicMock(status_code=404)\n self.cinder.volume_encryption_types.delete.return_value = [resp]\n self.assertRaises(exceptions.RallyException,\n self.service.delete_encryption_type, \"type\")\n self.cinder.volume_encryption_types.delete.assert_called_once_with(\n \"type\")\n\n def test_update_encryption_type(self):\n volume_type = mock.Mock()\n specs = {\n \"provider\": \"foo_pro\",\n \"cipher\": \"foo_cip\",\n \"key_size\": 512,\n \"control_location\": \"foo_con\"\n }\n result = self.service.update_encryption_type(volume_type, specs)\n\n self.assertEqual(\n self.cinder.volume_encryption_types.update.return_value, result)\n self.cinder.volume_encryption_types.update.assert_called_once_with(\n volume_type, specs)\n\n\nclass FullUnifiedCinder(cinder_common.UnifiedCinderMixin,\n service.Service):\n \"\"\"Implementation of UnifiedCinderMixin with Service base class.\"\"\"\n pass\n\n\nclass UnifiedCinderMixinTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedCinderMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.impl = mock.MagicMock()\n self.version = \"some\"\n self.service = FullUnifiedCinder(\n clients=self.clients, name_generator=self.name_generator)\n self.service._impl = self.impl\n self.service.version = self.version\n\n def test__unify_backup(self):\n class SomeBackup(object):\n id = 1\n name = \"backup\"\n volume_id = \"volume\"\n status = \"st\"\n backup = self.service._unify_backup(SomeBackup())\n self.assertEqual(1, backup.id)\n self.assertEqual(\"backup\", backup.name)\n self.assertEqual(\"volume\", backup.volume_id)\n self.assertEqual(\"st\", backup.status)\n\n def test__unify_transfer(self):\n class SomeTransfer(object):\n id = 1\n name = \"transfer\"\n volume_id = \"volume\"\n status = \"st\"\n transfer = self.service._unify_backup(SomeTransfer())\n self.assertEqual(1, transfer.id)\n self.assertEqual(\"transfer\", transfer.name)\n self.assertEqual(\"volume\", transfer.volume_id)\n self.assertEqual(\"st\", transfer.status)\n\n def test__unify_qos(self):\n class Qos(object):\n id = 1\n name = \"qos\"\n specs = {\"key1\": \"value1\"}\n qos = self.service._unify_qos(Qos())\n self.assertEqual(1, qos.id)\n self.assertEqual(\"qos\", qos.name)\n self.assertEqual({\"key1\": \"value1\"}, qos.specs)\n\n def test__unify_encryption_type(self):\n class SomeEncryptionType(object):\n encryption_id = 1\n volume_type_id = \"volume_type\"\n encryption_type = self.service._unify_encryption_type(\n SomeEncryptionType())\n self.assertEqual(1, encryption_type.id)\n self.assertEqual(\"volume_type\", encryption_type.volume_type_id)\n\n def test_delete_volume(self):\n self.service.delete_volume(\"volume\")\n self.service._impl.delete_volume.assert_called_once_with(\"volume\")\n\n def test_set_metadata(self):\n self.assertEqual(\n self.service._impl.set_metadata.return_value,\n self.service.set_metadata(\"volume\", sets=10, set_size=3))\n self.service._impl.set_metadata.assert_called_once_with(\n \"volume\", set_size=3, sets=10)\n\n def test_delete_metadata(self):\n keys = [\"a\", \"b\"]\n self.service.delete_metadata(\"volume\", keys=keys, deletes=10,\n delete_size=3)\n self.service._impl.delete_metadata.assert_called_once_with(\n \"volume\", keys=keys, delete_size=3, deletes=10)\n\n def test_update_readonly_flag(self):\n self.assertEqual(\n self.service._impl.update_readonly_flag.return_value,\n self.service.update_readonly_flag(\"volume\", read_only=True))\n self.service._impl.update_readonly_flag.assert_called_once_with(\n \"volume\", read_only=True)\n\n def test_upload_volume_to_image(self):\n self.assertEqual(\n self.service._impl.upload_volume_to_image.return_value,\n self.service.upload_volume_to_image(\"volume\",\n force=False,\n container_format=\"bare\",\n disk_format=\"raw\"))\n self.service._impl.upload_volume_to_image.assert_called_once_with(\n \"volume\", container_format=\"bare\", disk_format=\"raw\", force=False)\n\n def test_create_qos(self):\n specs = {\"consumer\": \"both\",\n \"write_iops_sec\": \"10\",\n \"read_iops_sec\": \"1000\"}\n self.service._unify_qos = mock.MagicMock()\n self.assertEqual(\n self.service._unify_qos.return_value,\n self.service.create_qos(specs)\n )\n self.service._impl.create_qos.assert_called_once_with(specs)\n self.service._unify_qos.assert_called_once_with(\n self.service._impl.create_qos.return_value\n )\n\n def test_list_qos(self):\n self.service._unify_qos = mock.MagicMock()\n self.service._impl.list_qos.return_value = [\"qos\"]\n self.assertEqual(\n [self.service._unify_qos.return_value],\n self.service.list_qos(True)\n )\n self.service._impl.list_qos.assert_called_once_with(True)\n self.service._unify_qos.assert_called_once_with(\"qos\")\n\n def test_get_qos(self):\n self.service._unify_qos = mock.MagicMock()\n self.assertEqual(\n self.service._unify_qos.return_value,\n self.service.get_qos(\"qos\"))\n self.service._impl.get_qos.assert_called_once_with(\"qos\")\n self.service._unify_qos.assert_called_once_with(\n self.service._impl.get_qos.return_value\n )\n\n def test_set_qos(self):\n set_specs_args = {\"test\": \"foo\"}\n self.service._unify_qos = mock.MagicMock()\n qos = mock.MagicMock()\n self.assertEqual(\n self.service._unify_qos.return_value,\n self.service.set_qos(qos, set_specs_args))\n self.service._impl.set_qos.assert_called_once_with(qos.id,\n set_specs_args)\n self.service._unify_qos.assert_called_once_with(qos)\n\n def test_qos_associate_type(self):\n self.service._unify_qos = mock.MagicMock()\n self.assertEqual(\n self.service._unify_qos.return_value,\n self.service.qos_associate_type(\"qos\", \"type_id\"))\n self.service._impl.qos_associate_type.assert_called_once_with(\n \"qos\", \"type_id\")\n self.service._unify_qos.assert_called_once_with(\"qos\")\n\n def test_qos_disassociate_type(self):\n self.service._unify_qos = mock.MagicMock()\n self.assertEqual(\n self.service._unify_qos.return_value,\n self.service.qos_disassociate_type(\"qos\", \"type_id\"))\n self.service._impl.qos_disassociate_type.assert_called_once_with(\n \"qos\", \"type_id\")\n self.service._unify_qos.assert_called_once_with(\"qos\")\n\n def test_delete_snapshot(self):\n self.service.delete_snapshot(\"snapshot\")\n self.service._impl.delete_snapshot.assert_called_once_with(\"snapshot\")\n\n def test_delete_backup(self):\n self.service.delete_backup(\"backup\")\n self.service._impl.delete_backup.assert_called_once_with(\"backup\")\n\n def test_list_backups(self):\n self.service._unify_backup = mock.MagicMock()\n self.service._impl.list_backups.return_value = [\"backup\"]\n self.assertEqual([self.service._unify_backup.return_value],\n self.service.list_backups(detailed=True))\n self.service._impl.list_backups.assert_called_once_with(detailed=True)\n self.service._unify_backup.assert_called_once_with(\n \"backup\")\n\n def test_list_transfers(self):\n self.service._unify_transfer = mock.MagicMock()\n self.service._impl.list_transfers.return_value = [\"transfer\"]\n self.assertEqual(\n [self.service._unify_transfer.return_value],\n self.service.list_transfers(detailed=True, search_opts=None))\n self.service._impl.list_transfers.assert_called_once_with(\n detailed=True, search_opts=None)\n self.service._unify_transfer.assert_called_once_with(\n \"transfer\")\n\n def test_update_volume_type(self):\n self.assertEqual(self.service._impl.update_volume_type.return_value,\n self.service.update_volume_type(\"volume_type\"))\n self.service._impl.update_volume_type.assert_called_once_with(\n volume_type=\"volume_type\", name=None, description=None,\n is_public=None\n )\n\n def test_get_volume_type(self):\n self.assertEqual(self.service._impl.get_volume_type.return_value,\n self.service.get_volume_type(\"volume_type\"))\n self.service._impl.get_volume_type.assert_called_once_with(\n \"volume_type\")\n\n def test_delete_volume_type(self):\n self.assertEqual(self.service._impl.delete_volume_type.return_value,\n self.service.delete_volume_type(\"volume_type\"))\n self.service._impl.delete_volume_type.assert_called_once_with(\n \"volume_type\")\n\n def test_add_type_access(self):\n self.assertEqual(self.service._impl.add_type_access.return_value,\n self.service.add_type_access(volume_type=\"some_type\",\n project=\"some_project\"))\n self.service._impl.add_type_access.assert_called_once_with(\n volume_type=\"some_type\", project=\"some_project\")\n\n def test_list_type_access(self):\n self.assertEqual(self.service._impl.list_type_access.return_value,\n self.service.list_type_access(\"some_type\"))\n self.service._impl.list_type_access.assert_called_once_with(\n \"some_type\")\n\n def test_set_volume_type_keys(self):\n self.assertEqual(self.service._impl.set_volume_type_keys.return_value,\n self.service.set_volume_type_keys(\n \"volume_type\", metadata=\"metadata\"))\n self.service._impl.set_volume_type_keys.assert_called_once_with(\n \"volume_type\", \"metadata\")\n\n def test_transfer_create(self):\n self.service._unify_transfer = mock.MagicMock()\n self.assertEqual(self.service._unify_transfer.return_value,\n self.service.transfer_create(1))\n self.service._impl.transfer_create.assert_called_once_with(\n 1, name=None)\n self.service._unify_transfer.assert_called_once_with(\n self.service._impl.transfer_create.return_value)\n\n def test_transfer_accept(self):\n self.service._unify_transfer = mock.MagicMock()\n self.assertEqual(self.service._unify_transfer.return_value,\n self.service.transfer_accept(1, auth_key=2))\n self.service._impl.transfer_accept.assert_called_once_with(\n 1, auth_key=2)\n self.service._unify_transfer.assert_called_once_with(\n self.service._impl.transfer_accept.return_value)\n\n def test_create_encryption_type(self):\n self.service._unify_encryption_type = mock.MagicMock()\n self.assertEqual(\n self.service._unify_encryption_type.return_value,\n self.service.create_encryption_type(\"type\", specs=2))\n self.service._impl.create_encryption_type.assert_called_once_with(\n \"type\", specs=2)\n self.service._unify_encryption_type.assert_called_once_with(\n self.service._impl.create_encryption_type.return_value)\n\n def test_get_encryption_type(self):\n self.service._unify_encryption_type = mock.MagicMock()\n self.assertEqual(\n self.service._unify_encryption_type.return_value,\n self.service.get_encryption_type(\"type\"))\n self.service._impl.get_encryption_type.assert_called_once_with(\n \"type\")\n self.service._unify_encryption_type.assert_called_once_with(\n self.service._impl.get_encryption_type.return_value)\n\n def test_list_encryption_type(self):\n self.service._unify_encryption_type = mock.MagicMock()\n self.service._impl.list_encryption_type.return_value = [\"encryption\"]\n self.assertEqual([self.service._unify_encryption_type.return_value],\n self.service.list_encryption_type(search_opts=None))\n self.service._impl.list_encryption_type.assert_called_once_with(\n search_opts=None)\n self.service._unify_encryption_type.assert_called_once_with(\n \"encryption\")\n\n def test_delete_encryption_type(self):\n self.service.delete_encryption_type(\"type\")\n self.service._impl.delete_encryption_type.assert_called_once_with(\n \"type\")\n\n def test_update_encryption_type(self):\n self.service.update_encryption_type(\"type\", specs=3)\n self.service._impl.update_encryption_type.assert_called_once_with(\n \"type\", specs=3)\n" }, { "alpha_fraction": 0.6805292963981628, "alphanum_fraction": 0.6857277750968933, "avg_line_length": 41.31999969482422, "blob_id": "f2927126374078ecde453d3d520342fe1e5e3185", "content_id": "4fa410bc435581079b86f43e9e2e32d41f8e2b68", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2116, "license_type": "permissive", "max_line_length": 79, "num_lines": 50, "path": "/rally_openstack/task/scenarios/neutron/loadbalancer_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils\n\n\n\"\"\"Scenarios for Neutron Loadbalancer v2.\"\"\"\n\n\[email protected](\"required_neutron_extensions\", extensions=[\"lbaasv2\"])\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"network\"))\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronLoadbalancerV2.create_and_list_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndListLoadbalancers(utils.NeutronScenario):\n\n def run(self, lb_create_args=None):\n \"\"\"Create a loadbalancer(v2) and then list loadbalancers(v2).\n\n Measure the \"neutron lbaas-loadbalancer-list\" command performance.\n The scenario creates a loadbalancer for every subnet and then lists\n loadbalancers.\n\n :param lb_create_args: dict, POST /lbaas/loadbalancers\n request options\n \"\"\"\n lb_create_args = lb_create_args or {}\n subnets = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n self._create_lbaasv2_loadbalancer(subnet_id, **lb_create_args)\n self._list_lbaasv2_loadbalancers()\n" }, { "alpha_fraction": 0.5992449522018433, "alphanum_fraction": 0.599784255027771, "avg_line_length": 38.326786041259766, "blob_id": "e16562621ed9f570273c6278c72f63275a46b3aa", "content_id": "ba62d5bb2b2d9cb2e4e0bf8826ae2373c4a11e9d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50063, "license_type": "permissive", "max_line_length": 79, "num_lines": 1273, "path": "/rally_openstack/task/scenarios/nova/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.common.services.image import image as image_service\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__file__)\n\n\nclass NovaScenario(neutron_utils.NeutronBaseScenario,\n scenario.OpenStackScenario):\n \"\"\"Base class for Nova scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"nova.list_servers\")\n def _list_servers(self, detailed=True):\n \"\"\"Returns user servers list.\"\"\"\n return self.clients(\"nova\").servers.list(detailed)\n\n def _pick_random_nic(self):\n \"\"\"Choose one network from existing ones.\"\"\"\n ctxt = self.context\n nets = [net[\"id\"]\n for net in ctxt.get(\"tenant\", {}).get(\"networks\", [])]\n if nets:\n # NOTE(amaretskiy): Balance servers among networks.\n net_idx = self.context[\"iteration\"] % len(nets)\n return [{\"net-id\": nets[net_idx]}]\n\n def _get_network_id(self, net_name):\n networks = getattr(self, \"existed_networks\", [])\n if not networks:\n networks = self.clients(\"neutron\").list_networks()[\"networks\"]\n self.existed_networks = networks\n\n for net in networks:\n if net[\"name\"] == net_name:\n return net[\"id\"]\n raise exceptions.NotFoundException(\n message=\"Network %s not found.\" % net_name)\n\n def _boot_server(self, image, flavor,\n auto_assign_nic=False, **kwargs):\n \"\"\"Boot a server.\n\n Returns when the server is actually booted and in \"ACTIVE\" state.\n\n If multiple networks created by Network context are present, the first\n network found that isn't associated with a floating IP pool is used.\n\n :param image: image ID or instance for server creation\n :param flavor: int, flavor ID or instance for server creation\n :param auto_assign_nic: bool, whether or not to auto assign NICs\n :param kwargs: other optional parameters to initialize the server\n :returns: nova Server instance\n \"\"\"\n server_name = self.generate_random_name()\n secgroup = self.context.get(\"user\", {}).get(\"secgroup\")\n if secgroup:\n if \"security_groups\" not in kwargs:\n kwargs[\"security_groups\"] = [secgroup[\"name\"]]\n elif secgroup[\"name\"] not in kwargs[\"security_groups\"]:\n kwargs[\"security_groups\"].append(secgroup[\"name\"])\n\n if auto_assign_nic and not kwargs.get(\"nics\", False):\n nic = self._pick_random_nic()\n if nic:\n kwargs[\"nics\"] = nic\n\n if \"nics\" not in kwargs and\\\n \"tenant\" in self.context and\\\n \"networks\" in self.context[\"tenant\"]:\n kwargs[\"nics\"] = [\n {\"net-id\": self.context[\"tenant\"][\"networks\"][0][\"id\"]}]\n\n for nic in kwargs.get(\"nics\", []):\n if not nic.get(\"net-id\") and nic.get(\"net-name\"):\n nic[\"net-id\"] = self._get_network_id(nic[\"net-name\"])\n\n with atomic.ActionTimer(self, \"nova.boot_server\"):\n server = self.clients(\"nova\").servers.create(\n server_name, image, flavor, **kwargs)\n\n self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)\n server = utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_boot_timeout,\n check_interval=CONF.openstack.nova_server_boot_poll_interval\n )\n return server\n\n def _do_server_reboot(self, server, reboottype):\n server.reboot(reboot_type=reboottype)\n self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_reboot_timeout,\n check_interval=CONF.openstack.nova_server_reboot_poll_interval\n )\n\n @atomic.action_timer(\"nova.soft_reboot_server\")\n def _soft_reboot_server(self, server):\n \"\"\"Reboot a server with soft reboot.\n\n A soft reboot will be issued on the given server upon which time\n this method will wait for the server to become active.\n\n :param server: The server to reboot.\n \"\"\"\n self._do_server_reboot(server, \"SOFT\")\n\n @atomic.action_timer(\"nova.show_server\")\n def _show_server(self, server):\n \"\"\"Show server details.\n\n :param server: The server to get details for.\n\n :returns: Server details\n \"\"\"\n return self.clients(\"nova\").servers.get(server)\n\n @atomic.action_timer(\"nova.get_console_output_server\")\n def _get_server_console_output(self, server, length=None):\n \"\"\"Get text of a console log output from a server.\n\n :param server: The server whose console output to retrieve\n :param length: The number of tail log lines you would like to retrieve.\n\n :returns: Text console output from server\n \"\"\"\n return self.clients(\"nova\").servers.get_console_output(server,\n length=length)\n\n @atomic.action_timer(\"nova.get_console_url_server\")\n def _get_console_url_server(self, server, console_type):\n \"\"\"Retrieve a console url of a server.\n\n :param server: server to get console url for\n :param console_type: type can be novnc/xvpvnc for protocol vnc;\n spice-html5 for protocol spice; rdp-html5 for\n protocol rdp; serial for protocol serial.\n webmks for protocol mks (since version 2.8).\n\n :returns: An instance of novaclient.base.DictWithMeta\n \"\"\"\n return self.clients(\"nova\").servers.get_console_url(server,\n console_type)\n\n @atomic.action_timer(\"nova.reboot_server\")\n def _reboot_server(self, server):\n \"\"\"Reboot a server with hard reboot.\n\n A reboot will be issued on the given server upon which time\n this method will wait for the server to become active.\n\n :param server: The server to reboot.\n \"\"\"\n self._do_server_reboot(server, \"HARD\")\n\n @atomic.action_timer(\"nova.rebuild_server\")\n def _rebuild_server(self, server, image, **kwargs):\n \"\"\"Rebuild a server with a new image.\n\n :param server: The server to rebuild.\n :param image: The new image to rebuild the server with.\n :param kwargs: Optional additional arguments to pass to the rebuild\n \"\"\"\n server.rebuild(image, **kwargs)\n self.sleep_between(CONF.openstack.nova_server_rebuild_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_rebuild_timeout,\n check_interval=CONF.openstack.nova_server_rebuild_poll_interval\n )\n\n @atomic.action_timer(\"nova.start_server\")\n def _start_server(self, server):\n \"\"\"Start the given server.\n\n A start will be issued for the given server upon which time\n this method will wait for it to become ACTIVE.\n\n :param server: The server to start and wait to become ACTIVE.\n \"\"\"\n server.start()\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_start_timeout,\n check_interval=CONF.openstack.nova_server_start_poll_interval\n )\n\n @atomic.action_timer(\"nova.stop_server\")\n def _stop_server(self, server):\n \"\"\"Stop the given server.\n\n Issues a stop on the given server and waits for the server\n to become SHUTOFF.\n\n :param server: The server to stop.\n \"\"\"\n server.stop()\n utils.wait_for_status(\n server,\n ready_statuses=[\"SHUTOFF\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_stop_timeout,\n check_interval=CONF.openstack.nova_server_stop_poll_interval\n )\n\n @atomic.action_timer(\"nova.rescue_server\")\n def _rescue_server(self, server):\n \"\"\"Rescue the given server.\n\n Returns when the server is actually rescue and is in the \"Rescue\"\n state.\n\n :param server: Server object\n \"\"\"\n server.rescue()\n self.sleep_between(CONF.openstack.nova_server_rescue_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"RESCUE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_rescue_timeout,\n check_interval=CONF.openstack.nova_server_rescue_poll_interval\n )\n\n @atomic.action_timer(\"nova.unrescue_server\")\n def _unrescue_server(self, server):\n \"\"\"Unrescue the given server.\n\n Returns when the server is unrescue and waits to become ACTIVE\n\n :param server: Server object\n \"\"\"\n server.unrescue()\n self.sleep_between(CONF.openstack.nova_server_unrescue_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_unrescue_timeout,\n check_interval=CONF.openstack.nova_server_unrescue_poll_interval\n )\n\n @atomic.action_timer(\"nova.suspend_server\")\n def _suspend_server(self, server):\n \"\"\"Suspends the given server.\n\n Returns when the server is actually suspended and is in the \"Suspended\"\n state.\n\n :param server: Server object\n \"\"\"\n server.suspend()\n self.sleep_between(CONF.openstack.nova_server_suspend_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"SUSPENDED\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_suspend_timeout,\n check_interval=CONF.openstack.nova_server_suspend_poll_interval\n )\n\n @atomic.action_timer(\"nova.resume_server\")\n def _resume_server(self, server):\n \"\"\"Resumes the suspended server.\n\n Returns when the server is actually resumed and is in the \"ACTIVE\"\n state.\n\n :param server: Server object\n \"\"\"\n server.resume()\n self.sleep_between(CONF.openstack.nova_server_resume_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_resume_timeout,\n check_interval=CONF.openstack.nova_server_resume_poll_interval\n )\n\n @atomic.action_timer(\"nova.pause_server\")\n def _pause_server(self, server):\n \"\"\"Pause the live server.\n\n Returns when the server is actually paused and is in the \"PAUSED\"\n state.\n\n :param server: Server object\n \"\"\"\n server.pause()\n self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"PAUSED\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_pause_timeout,\n check_interval=CONF.openstack.nova_server_pause_poll_interval\n )\n\n @atomic.action_timer(\"nova.unpause_server\")\n def _unpause_server(self, server):\n \"\"\"Unpause the paused server.\n\n Returns when the server is actually unpaused and is in the \"ACTIVE\"\n state.\n\n :param server: Server object\n \"\"\"\n server.unpause()\n self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_unpause_timeout,\n check_interval=CONF.openstack.nova_server_unpause_poll_interval\n )\n\n @atomic.action_timer(\"nova.shelve_server\")\n def _shelve_server(self, server):\n \"\"\"Shelve the given server.\n\n Returns when the server is actually shelved and is in the\n \"SHELVED_OFFLOADED\" state.\n\n :param server: Server object\n \"\"\"\n server.shelve()\n self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"SHELVED_OFFLOADED\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_shelve_timeout,\n check_interval=CONF.openstack.nova_server_shelve_poll_interval\n )\n utils.wait_for_status(\n server,\n ready_statuses=[\"None\"],\n status_attr=\"OS-EXT-STS:task_state\",\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_shelve_timeout,\n check_interval=CONF.openstack.nova_server_shelve_poll_interval\n )\n\n @atomic.action_timer(\"nova.unshelve_server\")\n def _unshelve_server(self, server):\n \"\"\"Unshelve the given server.\n\n Returns when the server is unshelved and is in the \"ACTIVE\" state.\n\n :param server: Server object\n \"\"\"\n server.unshelve()\n\n self.sleep_between(CONF.openstack. nova_server_unshelve_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_unshelve_timeout,\n check_interval=CONF.openstack.nova_server_unshelve_poll_interval\n )\n\n def _delete_server(self, server, force=False):\n \"\"\"Delete the given server.\n\n Returns when the server is actually deleted.\n\n :param server: Server object\n :param force: If True, force_delete will be used instead of delete.\n \"\"\"\n atomic_name = (\"nova.%sdelete_server\") % (force and \"force_\" or \"\")\n with atomic.ActionTimer(self, atomic_name):\n if force:\n server.force_delete()\n else:\n server.delete()\n\n utils.wait_for_status(\n server,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_delete_timeout,\n check_interval=CONF.openstack.nova_server_delete_poll_interval\n )\n\n def _delete_servers(self, servers, force=False):\n \"\"\"Delete multiple servers.\n\n :param servers: A list of servers to delete\n :param force: If True, force_delete will be used instead of delete.\n \"\"\"\n atomic_name = (\"nova.%sdelete_servers\") % (force and \"force_\" or \"\")\n with atomic.ActionTimer(self, atomic_name):\n for server in servers:\n if force:\n server.force_delete()\n else:\n server.delete()\n\n for server in servers:\n utils.wait_for_status(\n server,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_delete_timeout,\n check_interval=(\n CONF.openstack.nova_server_delete_poll_interval)\n )\n\n @atomic.action_timer(\"nova.create_server_group\")\n def _create_server_group(self, **kwargs):\n \"\"\"Create (allocate) a server group.\n\n :param kwargs: Optional additional arguments for Server group creating\n\n :returns: Nova server group\n \"\"\"\n group_name = self.generate_random_name()\n return self.clients(\"nova\").server_groups.create(name=group_name,\n **kwargs)\n\n @atomic.action_timer(\"nova.get_server_group\")\n def _get_server_group(self, id):\n \"\"\"Get a specific server group.\n\n :param id: Unique ID of the server group to get\n\n :rtype: :class:`ServerGroup`\n \"\"\"\n return self.clients(\"nova\").server_groups.get(id)\n\n @atomic.action_timer(\"nova.list_server_groups\")\n def _list_server_groups(self, all_projects=False):\n \"\"\"Get a list of all server groups.\n\n :param all_projects: If True, display server groups from all\n projects(Admin only)\n\n :rtype: list of :class:`ServerGroup`.\n \"\"\"\n if all_projects:\n return self.admin_clients(\"nova\").server_groups.list(all_projects)\n else:\n return self.clients(\"nova\").server_groups.list(all_projects)\n\n @atomic.action_timer(\"nova.delete_server_group\")\n def _delete_server_group(self, group_id):\n \"\"\"Delete a specific server group.\n\n :param id: The ID of the :class:`ServerGroup` to delete\n\n :returns: An instance of novaclient.base.TupleWithMeta\n \"\"\"\n return self.clients(\"nova\").server_groups.delete(group_id)\n\n @atomic.action_timer(\"nova.delete_image\")\n def _delete_image(self, image):\n \"\"\"Delete the given image.\n\n Returns when the image is actually deleted.\n\n :param image: Image object\n \"\"\"\n LOG.warning(\"Method '_delete_image' of NovaScenario class is \"\n \"deprecated since Rally 0.10.0. Use GlanceUtils instead.\")\n glance = image_service.Image(self._clients,\n atomic_inst=self.atomic_actions())\n glance.delete_image(image.id)\n check_interval = CONF.openstack.nova_server_image_delete_poll_interval\n with atomic.ActionTimer(self, \"glance.wait_for_delete\"):\n utils.wait_for_status(\n image,\n ready_statuses=[\"deleted\", \"pending_delete\"],\n check_deletion=True,\n update_resource=glance.get_image,\n timeout=CONF.openstack.nova_server_image_delete_timeout,\n check_interval=check_interval\n )\n\n @atomic.action_timer(\"nova.snapshot_server\")\n def _create_image(self, server):\n \"\"\"Create an image from the given server\n\n Uses the server name to name the created image. Returns when the image\n is actually created and is in the \"Active\" state.\n\n :param server: Server object for which the image will be created\n\n :returns: Created image object\n \"\"\"\n image_uuid = self.clients(\"nova\").servers.create_image(server,\n server.name)\n glance = image_service.Image(self._clients,\n atomic_inst=self.atomic_actions())\n image = glance.get_image(image_uuid)\n check_interval = CONF.openstack.nova_server_image_create_poll_interval\n with atomic.ActionTimer(self, \"glance.wait_for_image\"):\n image = utils.wait_for_status(\n image,\n ready_statuses=[\"ACTIVE\"],\n update_resource=glance.get_image,\n timeout=CONF.openstack.nova_server_image_create_timeout,\n check_interval=check_interval\n )\n with atomic.ActionTimer(self, \"nova.wait_for_server\"):\n utils.wait_for_status(\n server,\n ready_statuses=[\"None\"],\n status_attr=\"OS-EXT-STS:task_state\",\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_image_create_timeout,\n check_interval=check_interval\n )\n return image\n\n @atomic.action_timer(\"nova.get_keypair\")\n def _get_keypair(self, keypair):\n \"\"\"Get a keypair.\n\n :param keypair: The ID of the keypair to get.\n :rtype: :class:`Keypair`\n \"\"\"\n return self.clients(\"nova\").keypairs.get(keypair)\n\n @atomic.action_timer(\"nova.create_keypair\")\n def _create_keypair(self, **kwargs):\n \"\"\"Create a keypair\n\n :returns: Created keypair name\n \"\"\"\n keypair_name = self.generate_random_name()\n keypair = self.clients(\"nova\").keypairs.create(keypair_name, **kwargs)\n return keypair.name\n\n @atomic.action_timer(\"nova.list_keypairs\")\n def _list_keypairs(self):\n \"\"\"Return user keypairs list.\"\"\"\n return self.clients(\"nova\").keypairs.list()\n\n @atomic.action_timer(\"nova.delete_keypair\")\n def _delete_keypair(self, keypair_name):\n \"\"\"Delete keypair\n\n :param keypair_name: The keypair name to delete.\n \"\"\"\n self.clients(\"nova\").keypairs.delete(keypair_name)\n\n def _boot_servers(self, image_id, flavor_id, requests, instances_amount=1,\n auto_assign_nic=False, **kwargs):\n \"\"\"Boot multiple servers.\n\n Returns when all the servers are actually booted and are in the\n \"Active\" state.\n\n :param image_id: ID of the image to be used for server creation\n :param flavor_id: ID of the flavor to be used for server creation\n :param requests: Number of booting requests to perform\n :param instances_amount: Number of instances to boot per each request\n :param auto_assign_nic: bool, whether or not to auto assign NICs\n :param kwargs: other optional parameters to initialize the servers\n\n :returns: List of created server objects\n \"\"\"\n if auto_assign_nic and not kwargs.get(\"nics\", False):\n nic = self._pick_random_nic()\n if nic:\n kwargs[\"nics\"] = nic\n\n for nic in kwargs.get(\"nics\", []):\n if not nic.get(\"net-id\") and nic.get(\"net-name\"):\n nic[\"net-id\"] = self._get_network_id(nic[\"net-name\"])\n\n name_prefix = self.generate_random_name()\n with atomic.ActionTimer(self, \"nova.boot_servers\"):\n for i in range(requests):\n self.clients(\"nova\").servers.create(\n \"%s_%d\" % (name_prefix, i),\n image_id, flavor_id,\n min_count=instances_amount,\n max_count=instances_amount,\n **kwargs)\n # NOTE(msdubov): Nova python client returns only one server even\n # when min_count > 1, so we have to rediscover\n # all the created servers manually.\n servers = [s for s in self.clients(\"nova\").servers.list()\n if s.name.startswith(name_prefix)]\n self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)\n servers = [utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.\n get_from_manager(),\n timeout=CONF.openstack.nova_server_boot_timeout,\n check_interval=CONF.openstack.nova_server_boot_poll_interval\n ) for server in servers]\n return servers\n\n @atomic.action_timer(\"nova.associate_floating_ip\")\n def _associate_floating_ip(self, server, address, fixed_address=None):\n \"\"\"Add floating IP to an instance\n\n :param server: The :class:`Server` to add an IP to.\n :param address: The dict-like representation of FloatingIP to add\n to the instance\n :param fixed_address: The fixedIP address the FloatingIP is to be\n associated with (optional)\n \"\"\"\n if isinstance(address, dict):\n floating_ip = self.neutron.associate_floatingip(\n device_id=server.id, fixed_ip_address=fixed_address,\n floatingip_id=address[\"id\"])\n else:\n floating_ip = self.neutron.associate_floatingip(\n device_id=server.id, fixed_ip_address=fixed_address,\n floating_ip_address=address)\n\n utils.wait_for(server,\n is_ready=self.check_ip_address(\n floating_ip[\"floating_ip_address\"]),\n update_resource=utils.get_from_manager())\n # Update server data\n server.addresses = server.manager.get(server.id).addresses\n\n @atomic.action_timer(\"nova.dissociate_floating_ip\")\n def _dissociate_floating_ip(self, server, address):\n \"\"\"Remove floating IP from an instance\n\n :param server: The :class:`Server` to add an IP to.\n :param address: The dict-like representation of FloatingIP to remove\n \"\"\"\n if isinstance(address, dict):\n floating_ip = self.neutron.dissociate_floatingip(\n floatingip_id=address[\"id\"]\n )\n else:\n floating_ip = self.neutron.dissociate_floatingip(\n floating_ip_address=address\n )\n\n utils.wait_for(\n server,\n is_ready=self.check_ip_address(\n floating_ip[\"floating_ip_address\"], must_exist=False),\n update_resource=utils.get_from_manager()\n )\n # Update server data\n server.addresses = server.manager.get(server.id).addresses\n\n @staticmethod\n def check_ip_address(address, must_exist=True):\n ip_to_check = getattr(address, \"ip\", address)\n\n def _check_addr(resource):\n for network, addr_list in resource.addresses.items():\n for addr in addr_list:\n if ip_to_check == addr[\"addr\"]:\n return must_exist\n return not must_exist\n return _check_addr\n\n @atomic.action_timer(\"nova.resize\")\n def _resize(self, server, flavor):\n server.resize(flavor)\n utils.wait_for_status(\n server,\n ready_statuses=[\"VERIFY_RESIZE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_resize_timeout,\n check_interval=CONF.openstack.nova_server_resize_poll_interval\n )\n\n @atomic.action_timer(\"nova.resize_confirm\")\n def _resize_confirm(self, server, status=\"ACTIVE\"):\n server.confirm_resize()\n utils.wait_for_status(\n server,\n ready_statuses=[status],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_resize_confirm_timeout,\n check_interval=(\n CONF.openstack.nova_server_resize_confirm_poll_interval)\n )\n\n @atomic.action_timer(\"nova.resize_revert\")\n def _resize_revert(self, server, status=\"ACTIVE\"):\n server.revert_resize()\n utils.wait_for_status(\n server,\n ready_statuses=[status],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_resize_revert_timeout,\n check_interval=(\n CONF.openstack.nova_server_resize_revert_poll_interval)\n )\n\n def _update_volume_resource(self, resource):\n cinder_service = cinder_utils.CinderBasic(self.context)\n return cinder_service.cinder.get_volume(resource.id)\n\n @atomic.action_timer(\"nova.attach_volume\")\n def _attach_volume(self, server, volume, device=None):\n server_id = server.id\n volume_id = volume.id\n attachment = self.clients(\"nova\").volumes.create_server_volume(\n server_id, volume_id, device)\n utils.wait_for_status(\n volume,\n ready_statuses=[\"in-use\"],\n update_resource=self._update_volume_resource,\n timeout=CONF.openstack.nova_server_resize_revert_timeout,\n check_interval=(\n CONF.openstack.nova_server_resize_revert_poll_interval)\n )\n return attachment\n\n @atomic.action_timer(\"nova.list_attachments\")\n def _list_attachments(self, server_id):\n \"\"\"Get a list of all the attached volumes for the given server ID.\n\n :param server_id: The ID of the server\n :rtype: list of :class:`Volume`\n \"\"\"\n return self.clients(\"nova\").volumes.get_server_volumes(server_id)\n\n @atomic.action_timer(\"nova.detach_volume\")\n def _detach_volume(self, server, volume, attachment=None):\n \"\"\"Detach volume from the server.\n\n :param server: A server object to detach volume from.\n :param volume: A volume object to detach from the server.\n :param attachment: DEPRECATED\n \"\"\"\n if attachment:\n LOG.warning(\"An argument `attachment` of `_detach_volume` is \"\n \"deprecated in favor of `volume` argument since \"\n \"Rally 0.10.0\")\n\n server_id = server.id\n\n self.clients(\"nova\").volumes.delete_server_volume(server_id,\n volume.id)\n utils.wait_for_status(\n volume,\n ready_statuses=[\"available\"],\n update_resource=self._update_volume_resource,\n timeout=CONF.openstack.nova_detach_volume_timeout,\n check_interval=CONF.openstack.nova_detach_volume_poll_interval\n )\n\n @atomic.action_timer(\"nova.live_migrate\")\n def _live_migrate(self, server, block_migration=False,\n disk_over_commit=False, skip_compute_nodes_check=False,\n skip_host_check=False):\n \"\"\"Run live migration of the given server.\n\n :param server: Server object\n :param block_migration: Specifies the migration type\n :param disk_over_commit: Specifies whether to overcommit migrated\n instance or not\n :param skip_compute_nodes_check: Specifies whether to verify the number\n of compute nodes\n :param skip_host_check: Specifies whether to verify the targeted host\n availability\n \"\"\"\n if not skip_compute_nodes_check:\n compute_nodes = len(self._list_hypervisors())\n if compute_nodes < 2:\n raise exceptions.RallyException(\"Less than 2 compute nodes,\"\n \" skipping Live Migration\")\n\n server_admin = self.admin_clients(\"nova\").servers.get(server.id)\n host_pre_migrate = getattr(server_admin, \"OS-EXT-SRV-ATTR:host\")\n server_admin.live_migrate(block_migration=block_migration,\n disk_over_commit=disk_over_commit)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_live_migrate_timeout,\n check_interval=(\n CONF.openstack.nova_server_live_migrate_poll_interval)\n )\n if not skip_host_check:\n server_admin = self.admin_clients(\"nova\").servers.get(server.id)\n host_after_migrate = getattr(server_admin, \"OS-EXT-SRV-ATTR:host\")\n if host_pre_migrate == host_after_migrate:\n raise exceptions.RallyException(\n \"Live Migration failed: Migration complete \"\n \"but instance did not change host: %s\" % host_pre_migrate)\n\n @atomic.action_timer(\"nova.migrate\")\n def _migrate(self, server, skip_compute_nodes_check=False,\n skip_host_check=False):\n \"\"\"Run migration of the given server.\n\n :param server: Server object\n :param skip_compute_nodes_check: Specifies whether to verify the number\n of compute nodes\n :param skip_host_check: Specifies whether to verify the targeted host\n availability\n \"\"\"\n if not skip_compute_nodes_check:\n compute_nodes = len(self._list_hypervisors())\n if compute_nodes < 2:\n raise exceptions.RallyException(\"Less than 2 compute nodes,\"\n \" skipping Migration\")\n\n server_admin = self.admin_clients(\"nova\").servers.get(server.id)\n host_pre_migrate = getattr(server_admin, \"OS-EXT-SRV-ATTR:host\")\n server_admin.migrate()\n utils.wait_for_status(\n server,\n ready_statuses=[\"VERIFY_RESIZE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_migrate_timeout,\n check_interval=(\n CONF.openstack.nova_server_migrate_poll_interval)\n )\n if not skip_host_check:\n server_admin = self.admin_clients(\"nova\").servers.get(server.id)\n host_after_migrate = getattr(server_admin, \"OS-EXT-SRV-ATTR:host\")\n if host_pre_migrate == host_after_migrate:\n raise exceptions.RallyException(\n \"Migration failed: Migration complete but instance\"\n \" did not change host: %s\" % host_pre_migrate)\n\n @atomic.action_timer(\"nova.add_server_secgroups\")\n def _add_server_secgroups(self, server, security_group,\n atomic_action=False):\n \"\"\"add security group to a server.\n\n :param server: Server object\n :returns: An instance of novaclient.base.DictWithMeta\n \"\"\"\n return self.clients(\"nova\").servers.add_security_group(server,\n security_group)\n\n @atomic.action_timer(\"nova.list_hypervisors\")\n def _list_hypervisors(self, detailed=True):\n \"\"\"List hypervisors.\"\"\"\n return self.admin_clients(\"nova\").hypervisors.list(detailed)\n\n @atomic.action_timer(\"nova.statistics_hypervisors\")\n def _statistics_hypervisors(self):\n \"\"\"Get hypervisor statistics over all compute nodes.\n\n :returns: Hypervisor statistics\n \"\"\"\n return self.admin_clients(\"nova\").hypervisors.statistics()\n\n @atomic.action_timer(\"nova.get_hypervisor\")\n def _get_hypervisor(self, hypervisor):\n \"\"\"Get a specific hypervisor.\n\n :param hypervisor: Hypervisor to get.\n :returns: Hypervisor object\n \"\"\"\n return self.admin_clients(\"nova\").hypervisors.get(hypervisor)\n\n @atomic.action_timer(\"nova.search_hypervisors\")\n def _search_hypervisors(self, hypervisor_match, servers=False):\n \"\"\"List all servers belonging to specific hypervisor.\n\n :param hypervisor_match: Hypervisor's host name.\n :param servers: If True, server information is also retrieved.\n :returns: Hypervisor object\n \"\"\"\n return self.admin_clients(\"nova\").hypervisors.search(hypervisor_match,\n servers=servers)\n\n @atomic.action_timer(\"nova.lock_server\")\n def _lock_server(self, server):\n \"\"\"Lock the given server.\n\n :param server: Server to lock\n \"\"\"\n server.lock()\n\n @atomic.action_timer(\"nova.uptime_hypervisor\")\n def _uptime_hypervisor(self, hypervisor):\n \"\"\"Display the uptime of the specified hypervisor.\n\n :param hypervisor: Hypervisor to get.\n :returns: Hypervisor object\n \"\"\"\n return self.admin_clients(\"nova\").hypervisors.uptime(hypervisor)\n\n @atomic.action_timer(\"nova.unlock_server\")\n def _unlock_server(self, server):\n \"\"\"Unlock the given server.\n\n :param server: Server to unlock\n \"\"\"\n server.unlock()\n\n @atomic.action_timer(\"nova.delete_network\")\n def _delete_network(self, net_id):\n \"\"\"Delete nova network.\n\n :param net_id: The nova-network ID to delete\n \"\"\"\n return self.admin_clients(\"nova\").networks.delete(net_id)\n\n @atomic.action_timer(\"nova.list_flavors\")\n def _list_flavors(self, detailed=True, **kwargs):\n \"\"\"List all flavors.\n\n :param kwargs: Optional additional arguments for flavor listing\n :param detailed: True if the image listing\n should contain detailed information\n :returns: flavors list\n \"\"\"\n return self.clients(\"nova\").flavors.list(detailed, **kwargs)\n\n @atomic.action_timer(\"nova.set_flavor_keys\")\n def _set_flavor_keys(self, flavor, extra_specs):\n \"\"\"set flavor keys\n\n :param flavor: flavor to set keys\n :param extra_specs: additional arguments for flavor set keys\n \"\"\"\n return flavor.set_keys(extra_specs)\n\n @atomic.action_timer(\"nova.list_agents\")\n def _list_agents(self, hypervisor=None):\n \"\"\"List all nova-agent builds.\n\n :param hypervisor: The nova-hypervisor ID on which we need to list all\n the builds\n :returns: Nova-agent build list\n \"\"\"\n return self.admin_clients(\"nova\").agents.list(hypervisor)\n\n @atomic.action_timer(\"nova.list_aggregates\")\n def _list_aggregates(self):\n \"\"\"Returns list of all os-aggregates.\"\"\"\n return self.admin_clients(\"nova\").aggregates.list()\n\n @atomic.action_timer(\"nova.list_availability_zones\")\n def _list_availability_zones(self, detailed=True):\n \"\"\"List availability-zones.\n\n :param detailed: True if the availability-zone listing should contain\n detailed information\n :returns: Availability-zone list\n \"\"\"\n return self.admin_clients(\"nova\").availability_zones.list(detailed)\n\n @atomic.action_timer(\"nova.list_interfaces\")\n def _list_interfaces(self, server):\n \"\"\"List interfaces attached to a server.\n\n :param server:Instance or ID of server.\n :returns: Server interface list\n \"\"\"\n return self.clients(\"nova\").servers.interface_list(server)\n\n @atomic.action_timer(\"nova.list_services\")\n def _list_services(self, host=None, binary=None):\n \"\"\"return all nova service details\n\n :param host: List all nova services on host\n :param binary: List all nova services matching given binary\n \"\"\"\n return self.admin_clients(\"nova\").services.list(host, binary)\n\n @atomic.action_timer(\"nova.create_flavor\")\n def _create_flavor(self, ram, vcpus, disk, **kwargs):\n \"\"\"Create a flavor\n\n :param ram: Memory in MB for the flavor\n :param vcpus: Number of VCPUs for the flavor\n :param disk: Size of local disk in GB\n :param kwargs: Optional additional arguments for flavor creation\n \"\"\"\n name = self.generate_random_name()\n return self.admin_clients(\"nova\").flavors.create(name, ram, vcpus,\n disk, **kwargs)\n\n @atomic.action_timer(\"nova.delete_flavor\")\n def _delete_flavor(self, flavor):\n \"\"\"Delete a flavor\n\n :param flavor: The ID of the :class:`Flavor`\n :returns: An instance of novaclient.base.TupleWithMeta\n \"\"\"\n return self.admin_clients(\"nova\").flavors.delete(flavor)\n\n @atomic.action_timer(\"nova.list_flavor_access\")\n def _list_flavor_access(self, flavor):\n \"\"\"List access-rules for non-public flavor.\n\n :param flavor: List access rules for flavor instance or flavor ID\n \"\"\"\n return self.admin_clients(\"nova\").flavor_access.list(flavor=flavor)\n\n @atomic.action_timer(\"nova.add_tenant_access\")\n def _add_tenant_access(self, flavor, tenant):\n \"\"\"Add a tenant to the given flavor access list.\n\n :param flavor: name or id of the object flavor\n :param tenant: id of the object tenant\n :returns: access rules for flavor instance or flavor ID\n \"\"\"\n return self.admin_clients(\"nova\").flavor_access.add_tenant_access(\n flavor, tenant)\n\n @atomic.action_timer(\"nova.update_server\")\n def _update_server(self, server, description=None):\n \"\"\"update the server's name and description.\n\n :param server: Server object\n :param description: update the server description\n :returns: The updated server\n \"\"\"\n new_name = self.generate_random_name()\n if description:\n return server.update(name=new_name,\n description=description)\n else:\n return server.update(name=new_name)\n\n @atomic.action_timer(\"nova.get_flavor\")\n def _get_flavor(self, flavor_id):\n \"\"\"Show a flavor\n\n :param flavor_id: The flavor ID to get\n \"\"\"\n return self.admin_clients(\"nova\").flavors.get(flavor_id)\n\n @atomic.action_timer(\"nova.create_aggregate\")\n def _create_aggregate(self, availability_zone):\n \"\"\"Create a new aggregate.\n\n :param availability_zone: The availability zone of the aggregate\n :returns: The created aggregate\n \"\"\"\n aggregate_name = self.generate_random_name()\n return self.admin_clients(\"nova\").aggregates.create(aggregate_name,\n availability_zone)\n\n @atomic.action_timer(\"nova.get_aggregate_details\")\n def _get_aggregate_details(self, aggregate):\n \"\"\"Get details of the specified aggregate.\n\n :param aggregate: The aggregate to get details\n :returns: Detailed information of aggregate\n \"\"\"\n return self.admin_clients(\"nova\").aggregates.get_details(aggregate)\n\n @atomic.action_timer(\"nova.delete_aggregate\")\n def _delete_aggregate(self, aggregate):\n \"\"\"Delete the specified aggregate.\n\n :param aggregate: The aggregate to delete\n :returns: An instance of novaclient.base.TupleWithMeta\n \"\"\"\n return self.admin_clients(\"nova\").aggregates.delete(aggregate)\n\n def _bind_actions(self):\n actions = [\"hard_reboot\", \"soft_reboot\", \"stop_start\",\n \"rescue_unrescue\", \"pause_unpause\", \"suspend_resume\",\n \"lock_unlock\", \"shelve_unshelve\"]\n action_builder = utils.ActionBuilder(actions)\n action_builder.bind_action(\"hard_reboot\", self._reboot_server)\n action_builder.bind_action(\"soft_reboot\", self._soft_reboot_server)\n action_builder.bind_action(\"stop_start\",\n self._stop_and_start_server)\n action_builder.bind_action(\"rescue_unrescue\",\n self._rescue_and_unrescue_server)\n action_builder.bind_action(\"pause_unpause\",\n self._pause_and_unpause_server)\n action_builder.bind_action(\"suspend_resume\",\n self._suspend_and_resume_server)\n action_builder.bind_action(\"lock_unlock\",\n self._lock_and_unlock_server)\n action_builder.bind_action(\"shelve_unshelve\",\n self._shelve_and_unshelve_server)\n\n return action_builder\n\n @atomic.action_timer(\"nova.stop_and_start_server\")\n def _stop_and_start_server(self, server):\n \"\"\"Stop and then start the given server.\n\n A stop will be issued on the given server upon which time\n this method will wait for the server to become 'SHUTOFF'.\n Once the server is SHUTOFF a start will be issued and this\n method will wait for the server to become 'ACTIVE' again.\n\n :param server: The server to stop and then start.\n\n \"\"\"\n self._stop_server(server)\n self._start_server(server)\n\n @atomic.action_timer(\"nova.rescue_and_unrescue_server\")\n def _rescue_and_unrescue_server(self, server):\n \"\"\"Rescue and then unrescue the given server.\n\n A rescue will be issued on the given server upon which time\n this method will wait for the server to become 'RESCUE'.\n Once the server is RESCUE an unrescue will be issued and\n this method will wait for the server to become 'ACTIVE'\n again.\n\n :param server: The server to rescue and then unrescue.\n\n \"\"\"\n self._rescue_server(server)\n self._unrescue_server(server)\n\n @atomic.action_timer(\"nova.pause_and_unpause_server\")\n def _pause_and_unpause_server(self, server):\n \"\"\"Pause and then unpause the given server.\n\n A pause will be issued on the given server upon which time\n this method will wait for the server to become 'PAUSED'.\n Once the server is PAUSED an unpause will be issued and\n this method will wait for the server to become 'ACTIVE'\n again.\n\n :param server: The server to pause and then unpause.\n\n \"\"\"\n self._pause_server(server)\n self._unpause_server(server)\n\n @atomic.action_timer(\"nova.suspend_and_resume_server\")\n def _suspend_and_resume_server(self, server):\n \"\"\"Suspend and then resume the given server.\n\n A suspend will be issued on the given server upon which time\n this method will wait for the server to become 'SUSPENDED'.\n Once the server is SUSPENDED an resume will be issued and\n this method will wait for the server to become 'ACTIVE'\n again.\n\n :param server: The server to suspend and then resume.\n\n \"\"\"\n self._suspend_server(server)\n self._resume_server(server)\n\n @atomic.action_timer(\"nova.lock_and_unlock_server\")\n def _lock_and_unlock_server(self, server):\n \"\"\"Lock and then unlock the given server.\n\n A lock will be issued on the given server upon which time\n this method will wait for the server to become locked'.\n Once the server is locked an unlock will be issued.\n\n :param server: The server to lock and then unlock.\n\n \"\"\"\n self._lock_server(server)\n self._unlock_server(server)\n\n @atomic.action_timer(\"nova.shelve_and_unshelve_server\")\n def _shelve_and_unshelve_server(self, server):\n \"\"\"Shelve and then unshelve the given server.\n\n A shelve will be issued on the given server upon which time\n this method will wait for the server to become 'SHELVED'.\n Once the server is SHELVED an unshelve will be issued and\n this method will wait for the server to become 'ACTIVE'\n again.\n\n :param server: The server to shelve and then unshelve.\n\n \"\"\"\n self._shelve_server(server)\n self._unshelve_server(server)\n\n @atomic.action_timer(\"nova.update_aggregate\")\n def _update_aggregate(self, aggregate):\n \"\"\"Update the aggregate's name and availability_zone.\n\n :param aggregate: The aggregate to update\n :return: The updated aggregate\n \"\"\"\n aggregate_name = self.generate_random_name()\n availability_zone = self.generate_random_name()\n values = {\"name\": aggregate_name,\n \"availability_zone\": availability_zone}\n return self.admin_clients(\"nova\").aggregates.update(aggregate,\n values)\n\n @atomic.action_timer(\"nova.aggregate_add_host\")\n def _aggregate_add_host(self, aggregate, host):\n \"\"\"Add a host into the Host Aggregate.\n\n :param aggregate: The aggregate add host to\n :param host: The host add to aggregate\n :returns: The aggregate that has been added host to\n \"\"\"\n return self.admin_clients(\"nova\").aggregates.add_host(aggregate,\n host)\n\n @atomic.action_timer(\"nova.aggregate_remove_host\")\n def _aggregate_remove_host(self, aggregate, host):\n \"\"\"Remove a host from an aggregate.\n\n :param aggregate: The aggregate remove host from\n :param host: The host to remove\n :returns: The aggregate that has been removed host from\n \"\"\"\n return self.admin_clients(\"nova\").aggregates.remove_host(aggregate,\n host)\n\n @atomic.action_timer(\"nova.aggregate_set_metadata\")\n def _aggregate_set_metadata(self, aggregate, metadata):\n \"\"\"Set metadata to an aggregate\n\n :param aggregate: The aggregate to set metadata to\n :param metadata: The metadata to be set\n :return: The aggregate that has the set metadata\n \"\"\"\n return self.admin_clients(\"nova\").aggregates.set_metadata(aggregate,\n metadata)\n\n @atomic.action_timer(\"nova.attach_interface\")\n def _attach_interface(self, server, port_id=None,\n net_id=None, fixed_ip=None):\n \"\"\"Attach a network_interface to an instance.\n\n :param server: The :class:`Server` (or its ID) to attach to.\n :param port_id: The port to attach.\n :param network_id: the Network to attach\n :param fixed_ip: the Fix_ip to attach\n :returns the server that has attach interface\n \"\"\"\n return self.clients(\"nova\").servers.interface_attach(server,\n port_id, net_id,\n fixed_ip)\n" }, { "alpha_fraction": 0.3487069010734558, "alphanum_fraction": 0.34913793206214905, "avg_line_length": 45.400001525878906, "blob_id": "60ad9ee0eb1c366aaf1acc88fd398612a0a788ad", "content_id": "8382eda610d3499c3997a0918e1af118d15ee98f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2320, "license_type": "permissive", "max_line_length": 79, "num_lines": 50, "path": "/tasks/openstack/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "============================\nOpenStack Certification Task\n============================\n\nHow To Validate & Run Task\n--------------------------\n\nTo validate task with your own parameters run:\n\n.. code-block:: console\n\n $ rally task validate task.yaml --task-args-file task_arguments.yaml\n\n\nTo start task with your own parameters run:\n\n.. code-block:: console\n\n $ rally task start task.yaml --task-args-file task_arguments.yaml\n\n\nTask Arguments\n--------------\n\nFile task_arguments.yaml contains all task options:\n\n+------------------------+----------------------------------------------------+\n| Name | Description |\n+========================+====================================================+\n| service_list | List of services which should be tested |\n+------------------------+----------------------------------------------------+\n| smoke | Dry run without load from 1 user |\n+------------------------+----------------------------------------------------+\n| use_existing_users | In case of testing cloud with r/o Keystone e.g. AD |\n+------------------------+----------------------------------------------------+\n| image_name | Images name that exist in cloud |\n+------------------------+----------------------------------------------------+\n| flavor_name | Flavor name that exist in cloud |\n+------------------------+----------------------------------------------------+\n| glance_image_location | URL of image that is used to test Glance upload |\n+------------------------+----------------------------------------------------+\n| users_amount | Expected amount of users |\n+------------------------+----------------------------------------------------+\n| tenants_amount | Expected amount of tenants |\n+------------------------+----------------------------------------------------+\n| controllers_amount | Amount of OpenStack API nodes (controllers) |\n+------------------------+----------------------------------------------------+\n\nAll options have default values, hoverer user should change them to reflect\nconfiguration and size of tested OpenStack cloud.\n" }, { "alpha_fraction": 0.5512894988059998, "alphanum_fraction": 0.5654450058937073, "avg_line_length": 31.639240264892578, "blob_id": "d3f991e38e6a9e14d16d9ae43b19c099b1bfae10", "content_id": "02b898d901a1ec55c00cd94a7f92c698a920eaac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5157, "license_type": "permissive", "max_line_length": 78, "num_lines": 158, "path": "/rally_openstack/task/contexts/network/allow_ssh.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\n\n# This method is simplified version to what neutron has\ndef _rule_to_key(rule):\n def _normalize_rule_value(key, value):\n # This string is used as a placeholder for str(None), but shorter.\n none_char = \"+\"\n\n default = {\n \"port_range_min\": \"1\",\n \"port_range_max\": \"65535\"\n }\n\n if key == \"remote_ip_prefix\":\n all_address = [\"0.0.0.0/0\", \"::/0\", None]\n if value in all_address:\n return none_char\n elif value is None:\n return default.get(key, none_char)\n return str(value)\n\n # NOTE(andreykurilin): there are more actual comparison keys, but this set\n # should be enough for us.\n comparison_keys = [\n \"ethertype\",\n \"direction\",\n \"port_range_max\",\n \"port_range_min\",\n \"protocol\",\n \"remote_ip_prefix\"\n ]\n return \"_\".join([_normalize_rule_value(x, rule.get(x))\n for x in comparison_keys])\n\n\n_RULES_TO_ADD = [\n {\n \"ethertype\": \"IPv4\",\n \"protocol\": \"tcp\",\n \"port_range_max\": 65535,\n \"port_range_min\": 1,\n \"remote_ip_prefix\": \"0.0.0.0/0\",\n \"direction\": \"ingress\"\n },\n {\n \"ethertype\": \"IPv6\",\n \"protocol\": \"tcp\",\n \"port_range_max\": 65535,\n \"port_range_min\": 1,\n \"remote_ip_prefix\": \"::/0\",\n \"direction\": \"ingress\"\n },\n {\n \"ethertype\": \"IPv4\",\n \"protocol\": \"udp\",\n \"port_range_max\": 65535,\n \"port_range_min\": 1,\n \"remote_ip_prefix\": \"0.0.0.0/0\",\n \"direction\": \"ingress\"\n },\n {\n \"ethertype\": \"IPv6\",\n \"protocol\": \"udp\",\n \"port_range_max\": 65535,\n \"port_range_min\": 1,\n \"remote_ip_prefix\": \"::/0\",\n \"direction\": \"ingress\"\n },\n {\n \"ethertype\": \"IPv4\",\n \"protocol\": \"icmp\",\n \"remote_ip_prefix\": \"0.0.0.0/0\",\n \"direction\": \"ingress\"\n },\n {\n \"ethertype\": \"IPv6\",\n \"protocol\": \"ipv6-icmp\",\n \"remote_ip_prefix\": \"::/0\",\n \"direction\": \"ingress\"\n }\n]\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"allow_ssh\", platform=\"openstack\", order=320)\nclass AllowSSH(context.OpenStackContext):\n \"\"\"Sets up security groups for all users to access VM via SSH.\"\"\"\n\n def setup(self):\n client = neutron.NeutronService(\n clients=self.context[\"users\"][0][\"credential\"].clients(),\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n\n if not client.supports_extension(\"security-group\", silent=True):\n LOG.info(\"Security group context is disabled.\")\n return\n\n secgroup_name = self.generate_random_name()\n secgroups_per_tenant = {}\n for user, tenant_id in self._iterate_per_tenants():\n client = neutron.NeutronService(\n clients=user[\"credential\"].clients(),\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n secgroup = client.create_security_group(\n name=secgroup_name,\n description=\"Allow ssh access to VMs created by Rally\")\n secgroups_per_tenant[tenant_id] = secgroup\n\n existing_rules = set(\n _rule_to_key(rule)\n for rule in secgroup.get(\"security_group_rules\", []))\n for new_rule in _RULES_TO_ADD:\n if _rule_to_key(new_rule) not in existing_rules:\n secgroup.setdefault(\"security_group_rules\", [])\n secgroup[\"security_group_rules\"].append(\n client.create_security_group_rule(\n security_group_id=secgroup[\"id\"], **new_rule)\n )\n\n for user in self.context[\"users\"]:\n user[\"secgroup\"] = secgroups_per_tenant[user[\"tenant_id\"]]\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"neutron.security_group\"],\n admin=self.context.get(\"admin\"),\n users=self.context[\"users\"],\n task_id=self.get_owner_id(),\n superclass=self.__class__\n )\n" }, { "alpha_fraction": 0.4556109309196472, "alphanum_fraction": 0.47568368911743164, "avg_line_length": 36.8724479675293, "blob_id": "537023fcd287757b3949116ee8b1b3af566ff346", "content_id": "113a14c8f39c90e68b339beb28da1b268494f04d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7423, "license_type": "permissive", "max_line_length": 79, "num_lines": 196, "path": "/tests/unit/task/contexts/swift/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.swift import utils\nfrom tests.unit import test\n\n\nclass SwiftContext(utils.SwiftObjectMixin, context.OpenStackContext):\n def __init__(self, context):\n self.context = context\n\n def setup(self):\n pass\n\n def cleanup(self):\n pass\n\n\nclass SwiftObjectMixinTestCase(test.TestCase):\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test__create_containers(self, mock_clients):\n tenants = 2\n containers_per_tenant = 2\n context = test.get_test_context()\n c = [mock.MagicMock(), mock.MagicMock()]\n context.update({\n \"tenants\": {\n \"1001\": {\"name\": \"t1_name\"},\n \"1002\": {\"name\": \"t2_name\"}\n },\n \"users\": [\n {\"id\": \"u1\", \"tenant_id\": \"1001\", \"credential\": c[0]},\n {\"id\": \"u2\", \"tenant_id\": \"1002\", \"credential\": c[1]}\n ]\n })\n\n mixin = SwiftContext(context)\n containers = mixin._create_containers(containers_per_tenant, 15)\n\n self.assertEqual(tenants * containers_per_tenant, len(containers))\n for index, container in enumerate(sorted(containers)):\n offset = int(index / containers_per_tenant) + 1\n self.assertEqual(str(1000 + offset), container[0])\n\n for index, tenant_id in enumerate(sorted(context[\"tenants\"]), start=1):\n containers = context[\"tenants\"][tenant_id][\"containers\"]\n self.assertEqual(containers_per_tenant, len(containers))\n for container in containers:\n self.assertEqual(\"u%d\" % index, container[\"user\"][\"id\"])\n self.assertEqual(c[index - 1],\n container[\"user\"][\"credential\"])\n self.assertEqual(0, len(container[\"objects\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test__create_objects(self, mock_clients):\n tenants = 2\n containers_per_tenant = 1\n objects_per_container = 5\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"1001\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u1\", \"tenant_id\": \"1001\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c1\",\n \"objects\": []}\n ]\n },\n \"1002\": {\n \"name\": \"t2_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u2\", \"tenant_id\": \"1002\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c2\",\n \"objects\": []}\n ]\n }\n }\n })\n\n mixin = SwiftContext(context)\n objects_list = mixin._create_objects(objects_per_container, 1024, 25)\n\n self.assertEqual(\n tenants * containers_per_tenant * objects_per_container,\n len(objects_list))\n chunk = containers_per_tenant * objects_per_container\n for index, obj in enumerate(sorted(objects_list)):\n offset = int(index / chunk) + 1\n self.assertEqual(str(1000 + offset), obj[0])\n self.assertEqual(\"c%d\" % offset, obj[1])\n\n for tenant_id in context[\"tenants\"]:\n for container in context[\"tenants\"][tenant_id][\"containers\"]:\n self.assertEqual(objects_per_container,\n len(container[\"objects\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test__delete_containers(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"1001\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u1\", \"tenant_id\": \"1001\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c1\",\n \"objects\": []}\n ]\n },\n \"1002\": {\n \"name\": \"t2_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u2\", \"tenant_id\": \"1002\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c2\",\n \"objects\": []}\n ]\n }\n }\n })\n\n SwiftContext(context)._delete_containers(1)\n\n mock_swift = mock_clients.return_value.swift.return_value\n expected_containers = [\"c1\", \"c2\"]\n mock_swift.delete_container.assert_has_calls(\n [mock.call(con) for con in expected_containers], any_order=True)\n\n for tenant_id in context[\"tenants\"]:\n self.assertEqual(0,\n len(context[\"tenants\"][tenant_id][\"containers\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test__delete_objects(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"1001\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u1\", \"tenant_id\": \"1001\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c1\",\n \"objects\": [\"o1\", \"o2\", \"o3\"]}\n ]\n },\n \"1002\": {\n \"name\": \"t2_name\",\n \"containers\": [\n {\"user\": {\n \"id\": \"u2\", \"tenant_id\": \"1002\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c2\",\n \"objects\": [\"o4\", \"o5\", \"o6\"]}\n ]\n }\n }\n })\n\n SwiftContext(context)._delete_objects(1)\n\n mock_swift = mock_clients.return_value.swift.return_value\n expected_objects = [(\"c1\", \"o1\"), (\"c1\", \"o2\"), (\"c1\", \"o3\"),\n (\"c2\", \"o4\"), (\"c2\", \"o5\"), (\"c2\", \"o6\")]\n mock_swift.delete_object.assert_has_calls(\n [mock.call(con, obj) for con, obj in expected_objects],\n any_order=True)\n\n for tenant_id in context[\"tenants\"]:\n for container in context[\"tenants\"][tenant_id][\"containers\"]:\n self.assertEqual(0, len(container[\"objects\"]))\n" }, { "alpha_fraction": 0.5142137408256531, "alphanum_fraction": 0.5246491432189941, "avg_line_length": 24.971961975097656, "blob_id": "033967a644e4bf6606321c0354322c5a42e5ea0e", "content_id": "e15bd0453011a8caa54d4f0187606197e2cfd616", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2779, "license_type": "permissive", "max_line_length": 91, "num_lines": 107, "path": "/rally-jobs/extra/instance_test.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# Load server and output JSON results ready to be processed\n# by Rally scenario\n\nfor ex in awk top grep free tr df dc dd gzip\ndo\n if ! type ${ex} >/dev/null\n then\n echo \"Executable is required by script but not available on a server: ${ex}\" >&2\n return 1\n fi\ndone\n\nget_used_cpu_percent() {\n echo 100 $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %) - p | dc\n}\n\nget_used_ram_percent() {\n local total=$(free | grep Mem: | awk '{print $2}')\n local used=$(free | grep -- -/+\\ buffers | awk '{print $3}')\n echo ${used} 100 \\* ${total} / p | dc\n}\n\nget_used_disk_percent() {\n df -P / | grep -v Filesystem | awk '{print $5}' | tr -d %\n}\n\nget_seconds() {\n (time -p ${1}) 2>&1 | awk '/real/{print $2}'\n}\n\ncomplete_load() {\n local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh}\n local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop}\n local processes_num=${LOAD_PROCESSES_COUNT:-20}\n local size=${LOAD_SIZE_MB:-5}\n\n cat << EOF > ${script_file}\nuntil test -e ${stop_file}\ndo dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done\nEOF\n\n local sep\n local cpu\n local ram\n local dis\n rm -f ${stop_file}\n for i in $(seq ${processes_num})\n do\n i=$((i-1))\n sh ${script_file} &\n cpu=\"${cpu}${sep}[${i}, $(get_used_cpu_percent)]\"\n ram=\"${ram}${sep}[${i}, $(get_used_ram_percent)]\"\n dis=\"${dis}${sep}[${i}, $(get_used_disk_percent)]\"\n sep=\", \"\n done\n > ${stop_file}\n cat << EOF\n {\n \"title\": \"Generate load by spawning processes\",\n \"description\": \"Each process runs gzip for ${size}M urandom data in a loop\",\n \"chart_plugin\": \"Lines\",\n \"axis_label\": \"Number of processes\",\n \"label\": \"Usage, %\",\n \"data\": [\n [\"CPU\", [${cpu}]],\n [\"Memory\", [${ram}]],\n [\"Disk\", [${dis}]]]\n }\nEOF\n}\n\nadditive_dd() {\n local c=${1:-50} # Megabytes\n local file=/tmp/dd_test.img\n local write=$(get_seconds \"dd if=/dev/urandom of=${file} bs=1M count=${c}\")\n local read=$(get_seconds \"dd if=${file} of=/dev/null bs=1M count=${c}\")\n local gzip=$(get_seconds \"gzip ${file}\")\n rm ${file}.gz\n cat << EOF\n {\n \"title\": \"Write, read and gzip file\",\n \"description\": \"Using file '${file}', size ${c}Mb.\",\n \"chart_plugin\": \"StackedArea\",\n \"data\": [\n [\"write_${c}M\", ${write}],\n [\"read_${c}M\", ${read}],\n [\"gzip_${c}M\", ${gzip}]]\n },\n {\n \"title\": \"Statistics for write/read/gzip\",\n \"chart_plugin\": \"StatsTable\",\n \"data\": [\n [\"write_${c}M\", ${write}],\n [\"read_${c}M\", ${read}],\n [\"gzip_${c}M\", ${gzip}]]\n }\n\nEOF\n}\n\ncat << EOF\n{\n \"additive\": [$(additive_dd)],\n \"complete\": [$(complete_load)]\n}\nEOF\n" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.5975308418273926, "avg_line_length": 37.20754623413086, "blob_id": "8529016909391f9988ed6beaf131a377fa20153a", "content_id": "d44877f1c91a9c1b942f927bc6f6ada7b363d0f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2025, "license_type": "permissive", "max_line_length": 78, "num_lines": 53, "path": "/tests/unit/task/scenarios/cinder/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common import credential\nfrom rally_openstack.task.scenarios.cinder import utils\nfrom tests.unit import test\n\n\nclass CinderBasicTestCase(test.ScenarioTestCase):\n\n def _get_context(self):\n context = test.get_test_context()\n\n cred = credential.OpenStackCredential(auth_url=\"url\",\n username=\"user\",\n password=\"pass\")\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": cred\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": cred},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\",\n \"volumes\": [{\"id\": \"uuid\", \"size\": 1}],\n \"servers\": [1]}})\n return context\n\n def setUp(self):\n super(CinderBasicTestCase, self).setUp()\n\n @mock.patch(\"random.choice\")\n def test_get_random_server(self, mock_choice):\n basic = utils.CinderBasic(self._get_context())\n server_id = mock_choice(basic.context[\"tenant\"][\"servers\"])\n return_server = basic.get_random_server()\n basic.clients(\"nova\").servers.get.assert_called_once_with(server_id)\n self.assertEqual(basic.clients(\"nova\").servers.get.return_value,\n return_server)\n" }, { "alpha_fraction": 0.629523515701294, "alphanum_fraction": 0.6307297945022583, "avg_line_length": 39.68711471557617, "blob_id": "a0d0dffccb138a7bdc0b224cff58d1999f94da30", "content_id": "53c0a02f9bc53371d65831c2e70e01e8cd77f615", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6632, "license_type": "permissive", "max_line_length": 78, "num_lines": 163, "path": "/rally_openstack/task/scenarios/octavia/pools.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.octavia import utils\n\n\"\"\"Scenarios for Octavia Loadbalancer pools.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_list_pools\",\n platform=\"openstack\")\nclass CreateAndListPools(utils.OctaviaBase):\n\n def run(self, protocol, lb_algorithm):\n \"\"\"Create a loadbalancer pool per each subnet and then pools.\n\n :param protocol: protocol for which the pool listens\n :param lb_algorithm: loadbalancer algorithm\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n project_id=project_id,\n subnet_id=subnet_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.pool_create(\n lb_id=loadbalancer[\"id\"],\n protocol=protocol, lb_algorithm=lb_algorithm)\n self.octavia.pool_list()\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_delete_pools\",\n platform=\"openstack\")\nclass CreateAndDeletePools(utils.OctaviaBase):\n\n def run(self, protocol, lb_algorithm):\n \"\"\"Create a pool per each subnet and then delete pool\n\n :param protocol: protocol for which the pool listens\n :param lb_algorithm: loadbalancer algorithm\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n project_id=project_id,\n subnet_id=subnet_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n pools = self.octavia.pool_create(\n lb_id=loadbalancer[\"id\"],\n protocol=protocol, lb_algorithm=lb_algorithm)\n self.octavia.pool_delete(pools[\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_update_pools\",\n platform=\"openstack\")\nclass CreateAndUpdatePools(utils.OctaviaBase):\n\n def run(self, protocol, lb_algorithm):\n \"\"\"Create a pool per each subnet and then update\n\n :param protocol: protocol for which the pool listens\n :param lb_algorithm: loadbalancer algorithm\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n project_id=project_id,\n subnet_id=subnet_id)\n loadbalancers.append(lb)\n\n update_pool = {\n \"name\": self.generate_random_name()\n }\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n pools = self.octavia.pool_create(\n lb_id=loadbalancer[\"id\"],\n protocol=protocol, lb_algorithm=lb_algorithm)\n self.octavia.pool_set(\n pool_id=pools[\"id\"], pool_update_args=update_pool)\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_show_pools\",\n platform=\"openstack\")\nclass CreateAndShowPools(utils.OctaviaBase):\n\n def run(self, protocol, lb_algorithm):\n \"\"\"Create a pool per each subnet and show it\n\n :param protocol: protocol for which the pool listens\n :param lb_algorithm: loadbalancer algorithm\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n project_id=project_id,\n subnet_id=subnet_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n pools = self.octavia.pool_create(\n lb_id=loadbalancer[\"id\"],\n protocol=protocol, lb_algorithm=lb_algorithm)\n self.octavia.pool_show(pools[\"id\"])\n" }, { "alpha_fraction": 0.6114392280578613, "alphanum_fraction": 0.6136928796768188, "avg_line_length": 37.759037017822266, "blob_id": "a15466b7278239d9e5fc9b2de112bdfafca2cfd2", "content_id": "385e594c29081f84ce43417a79cfee0e5e40009d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12868, "license_type": "permissive", "max_line_length": 79, "num_lines": 332, "path": "/rally_openstack/task/scenarios/heat/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\nimport requests\n\nfrom rally_openstack.task import scenario\n\n\nLOG = logging.getLogger(__name__)\n\n\nCONF = cfg.CONF\n\n\nclass HeatScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Heat scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"heat.list_stacks\")\n def _list_stacks(self):\n \"\"\"Return user stack list.\"\"\"\n\n return list(self.clients(\"heat\").stacks.list())\n\n @atomic.action_timer(\"heat.create_stack\")\n def _create_stack(self, template, parameters=None,\n files=None, environment=None):\n \"\"\"Create a new stack.\n\n :param template: template with stack description.\n :param parameters: template parameters used during stack creation\n :param files: additional files used in template\n :param environment: stack environment definition\n\n :returns: object of stack\n \"\"\"\n stack_name = self.generate_random_name()\n kw = {\n \"stack_name\": stack_name,\n \"disable_rollback\": True,\n \"parameters\": parameters or {},\n \"template\": template,\n \"files\": files or {},\n \"environment\": environment or {}\n }\n\n # heat client returns body instead manager object, so we should\n # get manager object using stack_id\n stack_id = self.clients(\"heat\").stacks.create(**kw)[\"stack\"][\"id\"]\n stack = self.clients(\"heat\").stacks.get(stack_id)\n\n self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay)\n\n stack = utils.wait_for_status(\n stack,\n ready_statuses=[\"CREATE_COMPLETE\"],\n failure_statuses=[\"CREATE_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_create_timeout,\n check_interval=CONF.openstack.heat_stack_create_poll_interval)\n\n return stack\n\n @atomic.action_timer(\"heat.update_stack\")\n def _update_stack(self, stack, template, parameters=None,\n files=None, environment=None):\n \"\"\"Update an existing stack\n\n :param stack: stack that need to be updated\n :param template: Updated template\n :param parameters: template parameters for stack update\n :param files: additional files used in template\n :param environment: stack environment definition\n\n :returns: object of updated stack\n \"\"\"\n\n kw = {\n \"stack_name\": stack.stack_name,\n \"disable_rollback\": True,\n \"parameters\": parameters or {},\n \"template\": template,\n \"files\": files or {},\n \"environment\": environment or {}\n }\n self.clients(\"heat\").stacks.update(stack.id, **kw)\n\n self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay)\n\n stack = utils.wait_for_status(\n stack,\n ready_statuses=[\"UPDATE_COMPLETE\"],\n failure_statuses=[\"UPDATE_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_update_timeout,\n check_interval=CONF.openstack.heat_stack_update_poll_interval)\n return stack\n\n @atomic.action_timer(\"heat.check_stack\")\n def _check_stack(self, stack):\n \"\"\"Check given stack.\n\n Check the stack and stack resources.\n\n :param stack: stack that needs to be checked\n \"\"\"\n self.clients(\"heat\").actions.check(stack.id)\n utils.wait_for_status(\n stack,\n ready_statuses=[\"CHECK_COMPLETE\"],\n failure_statuses=[\"CHECK_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager([\"CHECK_FAILED\"]),\n timeout=CONF.openstack.heat_stack_check_timeout,\n check_interval=CONF.openstack.heat_stack_check_poll_interval)\n\n @atomic.action_timer(\"heat.delete_stack\")\n def _delete_stack(self, stack):\n \"\"\"Delete given stack.\n\n Returns when the stack is actually deleted.\n\n :param stack: stack object\n \"\"\"\n stack.delete()\n utils.wait_for_status(\n stack,\n ready_statuses=[\"DELETE_COMPLETE\"],\n failure_statuses=[\"DELETE_FAILED\", \"ERROR\"],\n check_deletion=True,\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_delete_timeout,\n check_interval=CONF.openstack.heat_stack_delete_poll_interval)\n\n @atomic.action_timer(\"heat.suspend_stack\")\n def _suspend_stack(self, stack):\n \"\"\"Suspend given stack.\n\n :param stack: stack that needs to be suspended\n \"\"\"\n\n self.clients(\"heat\").actions.suspend(stack.id)\n utils.wait_for_status(\n stack,\n ready_statuses=[\"SUSPEND_COMPLETE\"],\n failure_statuses=[\"SUSPEND_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_suspend_timeout,\n check_interval=CONF.openstack.heat_stack_suspend_poll_interval)\n\n @atomic.action_timer(\"heat.resume_stack\")\n def _resume_stack(self, stack):\n \"\"\"Resume given stack.\n\n :param stack: stack that needs to be resumed\n \"\"\"\n\n self.clients(\"heat\").actions.resume(stack.id)\n utils.wait_for_status(\n stack,\n ready_statuses=[\"RESUME_COMPLETE\"],\n failure_statuses=[\"RESUME_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_resume_timeout,\n check_interval=CONF.openstack.heat_stack_resume_poll_interval)\n\n @atomic.action_timer(\"heat.snapshot_stack\")\n def _snapshot_stack(self, stack):\n \"\"\"Creates a snapshot for given stack.\n\n :param stack: stack that will be used as base for snapshot\n :returns: snapshot created for given stack\n \"\"\"\n snapshot = self.clients(\"heat\").stacks.snapshot(\n stack.id)\n utils.wait_for_status(\n stack,\n ready_statuses=[\"SNAPSHOT_COMPLETE\"],\n failure_statuses=[\"SNAPSHOT_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_snapshot_timeout,\n check_interval=CONF.openstack.heat_stack_snapshot_poll_interval)\n return snapshot\n\n @atomic.action_timer(\"heat.restore_stack\")\n def _restore_stack(self, stack, snapshot_id):\n \"\"\"Restores stack from given snapshot.\n\n :param stack: stack that will be restored from snapshot\n :param snapshot_id: id of given snapshot\n \"\"\"\n self.clients(\"heat\").stacks.restore(stack.id, snapshot_id)\n utils.wait_for_status(\n stack,\n ready_statuses=[\"RESTORE_COMPLETE\"],\n failure_statuses=[\"RESTORE_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_restore_timeout,\n check_interval=CONF.openstack.heat_stack_restore_poll_interval\n )\n\n @atomic.action_timer(\"heat.show_output\")\n def _stack_show_output(self, stack, output_key):\n \"\"\"Execute output_show for specified \"output_key\".\n\n This method uses new output API call.\n :param stack: stack with output_key output.\n :param output_key: The name of the output.\n \"\"\"\n output = self.clients(\"heat\").stacks.output_show(stack.id, output_key)\n return output\n\n @atomic.action_timer(\"heat.show_output_via_API\")\n def _stack_show_output_via_API(self, stack, output_key):\n \"\"\"Execute output_show for specified \"output_key\".\n\n This method uses old way for getting output value.\n It gets whole stack object and then finds necessary \"output_key\".\n :param stack: stack with output_key output.\n :param output_key: The name of the output.\n \"\"\"\n # this code copy-pasted and adopted for rally from old client version\n # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/\n # v1/shell.py#L682-L699\n stack = self.clients(\"heat\").stacks.get(stack_id=stack.id)\n for output in stack.to_dict().get(\"outputs\", []):\n if output[\"output_key\"] == output_key:\n return output\n\n @atomic.action_timer(\"heat.list_output\")\n def _stack_list_output(self, stack):\n \"\"\"Execute output_list for specified \"stack\".\n\n This method uses new output API call.\n :param stack: stack to call output-list.\n \"\"\"\n output_list = self.clients(\"heat\").stacks.output_list(stack.id)\n return output_list\n\n @atomic.action_timer(\"heat.list_output_via_API\")\n def _stack_list_output_via_API(self, stack):\n \"\"\"Execute output_list for specified \"stack\".\n\n This method uses old way for getting output value.\n It gets whole stack object and then prints all outputs\n belongs this stack.\n :param stack: stack to call output-list.\n \"\"\"\n # this code copy-pasted and adopted for rally from old client version\n # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/\n # v1/shell.py#L649-L663\n stack = self.clients(\"heat\").stacks.get(stack_id=stack.id)\n output_list = stack.to_dict()[\"outputs\"]\n return output_list\n\n def _count_instances(self, stack):\n \"\"\"Count instances in a Heat stack.\n\n :param stack: stack to count instances in.\n \"\"\"\n return len([\n r for r in self.clients(\"heat\").resources.list(stack.id,\n nested_depth=1)\n if r.resource_type == \"OS::Nova::Server\"])\n\n def _scale_stack(self, stack, output_key, delta):\n \"\"\"Scale a stack up or down.\n\n Calls the webhook given in the output value identified by\n 'output_key', and waits for the stack size to change by\n 'delta'.\n\n :param stack: stack to scale up or down\n :param output_key: The name of the output to get the URL from\n :param delta: The expected change in number of instances in\n the stack (signed int)\n \"\"\"\n num_instances = self._count_instances(stack)\n expected_instances = num_instances + delta\n LOG.debug(\"Scaling stack %s from %s to %s instances with %s\"\n % (stack.id, num_instances, expected_instances, output_key))\n with atomic.ActionTimer(self, \"heat.scale_with_%s\" % output_key):\n self._stack_webhook(stack, output_key)\n utils.wait_for(\n stack,\n is_ready=lambda s: (\n self._count_instances(s) == expected_instances),\n failure_statuses=[\"UPDATE_FAILED\", \"ERROR\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.heat_stack_scale_timeout,\n check_interval=CONF.openstack.heat_stack_scale_poll_interval)\n\n def _stack_webhook(self, stack, output_key):\n \"\"\"POST to the URL given in the output value identified by output_key.\n\n This can be used to scale stacks up and down, for instance.\n\n :param stack: stack to call a webhook on\n :param output_key: The name of the output to get the URL from\n :raises InvalidConfigException: if the output key is not found\n \"\"\"\n url = None\n for output in stack.outputs:\n if output[\"output_key\"] == output_key:\n url = output[\"output_value\"]\n break\n else:\n raise exceptions.InvalidConfigException(\n \"No output key %(key)s found in stack %(id)s\" %\n {\"key\": output_key, \"id\": stack.id})\n\n platform_params = self.context[\"env\"][\"spec\"][\"existing@openstack\"]\n verify = (platform_params.get(\"https_cacert\")\n if not platform_params.get(\"https_insecure\")\n else False)\n with atomic.ActionTimer(self, \"heat.%s_webhook\" % output_key):\n requests.post(url, verify=verify).raise_for_status()\n" }, { "alpha_fraction": 0.6015084981918335, "alphanum_fraction": 0.6084223985671997, "avg_line_length": 32.8510627746582, "blob_id": "fd8033cd140c910ef5544f7dffe7868dc1ccf7f1", "content_id": "afee8acd145d8c175a6333a45ff6f265fb90b210", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1591, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/tests/unit/task/test_context.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally_openstack.task import context\nfrom tests.unit import test\n\n\nclass TenantIteratorTestCase(test.TestCase):\n\n def test__iterate_per_tenant(self):\n\n class DummyContext(context.OpenStackContext):\n def __init__(self, ctx):\n self.context = ctx\n\n def setup(self):\n pass\n\n def cleanup(self):\n pass\n\n users = []\n tenants_count = 2\n users_per_tenant = 5\n for tenant_id in range(tenants_count):\n for user_id in range(users_per_tenant):\n users.append({\"id\": str(user_id),\n \"tenant_id\": str(tenant_id)})\n\n expected_result = [\n ({\"id\": \"0\", \"tenant_id\": str(i)}, str(i)) for i in range(\n tenants_count)]\n real_result = [\n i for i in DummyContext({\"users\": users})._iterate_per_tenants()]\n\n self.assertEqual(expected_result, real_result)\n" }, { "alpha_fraction": 0.6436682343482971, "alphanum_fraction": 0.6469742655754089, "avg_line_length": 46.32653045654297, "blob_id": "55512b53a19e23940d3cb2e18dd0d958c5f80c3a", "content_id": "11a7f12bed2829214e1f97ccc8c4fd16d48f929d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6957, "license_type": "permissive", "max_line_length": 79, "num_lines": 147, "path": "/rally_openstack/task/scenarios/grafana/metrics.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.task import types\nfrom rally.task import utils\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services.grafana import grafana as grafana_service\nfrom rally_openstack.task import scenario\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\"\"\"Scenarios for Pushgateway and Grafana metrics.\"\"\"\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"GrafanaMetrics.push_metric_from_instance\",\n platform=\"openstack\")\nclass PushMetricsInstance(scenario.OpenStackScenario):\n \"\"\"Test monitoring system by pushing metric from nova server and check it.\n\n Scenario tests monitoring system, which uses Pushgateway as metric exporter\n and Grafana as metrics monitoring.\n\n The goal of the test is to check that monitoring system works correctly\n with nova instance. Test case is the following: we deploy some env with\n nodes on Openstack nova instances, add metric exporter (using Pushgateway\n in this test) inside nodes (i.e. nova instances) for some interested\n metrics (e.g. CPU, memory etc.). We want to check that metrics successfully\n sends to metrics storage (e.g. Prometheus) by requesting Grafana. Create\n nova instance, add Pushgateway push random metric to userdata and after\n instance would be available, check Grafana datasource that pushed metric in\n data.\n \"\"\"\n\n def _metric_from_instance(self, seed, image, flavor, monitor_vip,\n pushgateway_port, job_name):\n push_cmd = (\n \"echo %(seed)s 12345 | curl --data-binary \"\n \"@- http://%(monitor_vip)s:%(pgtw_port)s/metrics/job\"\n \"/%(job_name)s\" % {\"seed\": seed,\n \"monitor_vip\": monitor_vip,\n \"pgtw_port\": pushgateway_port,\n \"job_name\": job_name})\n userdata = (\"#!/bin/bash\\n%s\" % push_cmd)\n server = self.clients(\"nova\").servers.create(seed,\n image, flavor,\n userdata=userdata)\n LOG.info(\"Server %s create started\" % seed)\n self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)\n utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.nova_server_boot_timeout,\n check_interval=CONF.openstack.nova_server_boot_poll_interval\n )\n LOG.info(\"Server %s with pushing metric script (metric exporter) is \"\n \"active\" % seed)\n\n def run(self, image, flavor, monitor_vip, pushgateway_port,\n grafana, datasource_id, job_name, sleep_time=5,\n retries_total=30):\n \"\"\"Create nova instance with pushing metric script as userdata.\n\n Push metric to metrics storage using Pushgateway and check it in\n Grafana.\n\n :param image: image for server with userdata script\n :param flavor: flavor for server with userdata script\n :param monitor_vip: monitoring system IP to push metric\n :param pushgateway_port: Pushgateway port to use for pushing metric\n :param grafana: Grafana dict with creds and port to use for checking\n metric. Format: {user: admin, password: pass, port: 9902}\n :param datasource_id: metrics storage datasource ID in Grafana\n :param job_name: job name to push metric in it\n :param sleep_time: sleep time between checking metrics in seconds\n :param retries_total: total number of retries to check metric in\n Grafana\n \"\"\"\n seed = self.generate_random_name()\n\n grafana_svc = grafana_service.GrafanaService(\n dict(monitor_vip=monitor_vip, pushgateway_port=pushgateway_port,\n grafana=grafana, datasource_id=datasource_id,\n job_name=job_name),\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n\n self._metric_from_instance(seed, image, flavor, monitor_vip,\n pushgateway_port, job_name)\n checked = grafana_svc.check_metric(seed, sleep_time=sleep_time,\n retries_total=retries_total)\n self.assertTrue(checked)\n\n\[email protected](name=\"GrafanaMetrics.push_metric_locally\")\nclass PushMetricLocal(scenario.OpenStackScenario):\n \"\"\"Test monitoring system availability with local pushing random metric.\"\"\"\n\n def run(self, monitor_vip, pushgateway_port, grafana, datasource_id,\n job_name, sleep_time=5, retries_total=30):\n \"\"\"Push random metric to Pushgateway locally and check it in Grafana.\n\n :param monitor_vip: monitoring system IP to push metric\n :param pushgateway_port: Pushgateway port to use for pushing metric\n :param grafana: Grafana dict with creds and port to use for checking\n metric. Format: {user: admin, password: pass, port: 9902}\n :param datasource_id: metrics storage datasource ID in Grafana\n :param job_name: job name to push metric in it\n :param sleep_time: sleep time between checking metrics in seconds\n :param retries_total: total number of retries to check metric in\n Grafana\n \"\"\"\n seed = self.generate_random_name()\n\n grafana_svc = grafana_service.GrafanaService(\n dict(monitor_vip=monitor_vip, pushgateway_port=pushgateway_port,\n grafana=grafana, datasource_id=datasource_id,\n job_name=job_name),\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions())\n\n pushed = grafana_svc.push_metric(seed)\n self.assertTrue(pushed)\n checked = grafana_svc.check_metric(seed, sleep_time=sleep_time,\n retries_total=retries_total)\n self.assertTrue(checked)\n" }, { "alpha_fraction": 0.6736048460006714, "alphanum_fraction": 0.6757164597511292, "avg_line_length": 40.9620246887207, "blob_id": "7a14ca8856c3bb958b54f365a80388656b3dc6f3", "content_id": "44bbcb6591ba0723d9d10393ff4870bb459d621e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3315, "license_type": "permissive", "max_line_length": 79, "num_lines": 79, "path": "/rally_openstack/task/scenarios/magnum/clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.magnum import utils\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\n\n\"\"\"Scenarios for Magnum clusters.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.MAGNUM])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"magnum.clusters\"]},\n name=\"MagnumClusters.list_clusters\",\n platform=\"openstack\")\nclass ListClusters(utils.MagnumScenario):\n\n def run(self, **kwargs):\n \"\"\"List all clusters.\n\n Measure the \"magnum clusters-list\" command performance.\n :param limit: (Optional) The maximum number of results to return\n per request, if:\n\n 1) limit > 0, the maximum number of clusters to return.\n 2) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Magnum API\n (see Magnum's api.max_limit option).\n\n :param kwargs: optional additional arguments for clusters listing\n \"\"\"\n self._list_clusters(**kwargs)\n\n\[email protected](\"required_services\", services=[consts.Service.MAGNUM])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"magnum.clusters\", \"nova.keypairs\"]},\n name=\"MagnumClusters.create_and_list_clusters\",\n platform=\"openstack\")\nclass CreateAndListClusters(utils.MagnumScenario, nova_utils.NovaScenario):\n\n def run(self, node_count, **kwargs):\n \"\"\"create cluster and then list all clusters.\n\n :param node_count: the cluster node count.\n :param cluster_template_uuid: optional, if user want to use an existing\n cluster_template\n :param kwargs: optional additional arguments for cluster creation\n \"\"\"\n cluster_template_uuid = kwargs.get(\"cluster_template_uuid\", None)\n if cluster_template_uuid is None:\n cluster_template_uuid = self.context[\"tenant\"][\"cluster_template\"]\n else:\n del kwargs[\"cluster_template_uuid\"]\n\n keypair = self._create_keypair()\n\n new_cluster = self._create_cluster(cluster_template_uuid, node_count,\n keypair=keypair, **kwargs)\n self.assertTrue(new_cluster, \"Failed to create new cluster\")\n clusters = self._list_clusters(**kwargs)\n self.assertIn(new_cluster.uuid, [cluster.uuid for cluster in clusters],\n \"New cluster not found in a list of clusters\")\n" }, { "alpha_fraction": 0.5533866286277771, "alphanum_fraction": 0.5671687722206116, "avg_line_length": 40.97419357299805, "blob_id": "302b08b93d21d1061af0a3dcd97107ee8bfefedc", "content_id": "5a7fc1602c13e821573d7b78a591a49f83944b3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19518, "license_type": "permissive", "max_line_length": 78, "num_lines": 465, "path": "/tests/unit/task/test_types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task import types\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nclass OpenStackResourceTypeTestCase(test.TestCase):\n def test__find_resource(self):\n\n @types.configure(name=self.id())\n class FooType(types.OpenStackResourceType):\n def pre_process(self, resource_spec, config):\n pass\n\n ftype = FooType({})\n\n resources = dict(\n (name, fakes.FakeResource(name=name))\n for name in [\"Fake1\", \"Fake2\", \"Fake3\"])\n # case #1: 100% name match\n self.assertEqual(\n resources[\"Fake2\"],\n ftype._find_resource({\"name\": \"Fake2\"}, resources.values()))\n\n # case #2: pick the latest one\n self.assertEqual(\n resources[\"Fake3\"],\n ftype._find_resource({\"name\": \"Fake\"}, resources.values()))\n\n # case #3: regex one match\n self.assertEqual(\n resources[\"Fake2\"],\n ftype._find_resource({\"regex\": \".ake2\"}, resources.values()))\n\n # case #4: regex, pick the latest one\n self.assertEqual(\n resources[\"Fake3\"],\n ftype._find_resource({\"regex\": \"Fake\"}, resources.values()))\n\n def test__find_resource_negative(self):\n\n @types.configure(name=self.id())\n class FooType(types.OpenStackResourceType):\n def pre_process(self, resource_spec, config):\n pass\n\n ftype = FooType({})\n # case #1: the wrong resource spec\n e = self.assertRaises(exceptions.InvalidScenarioArgument,\n ftype._find_resource, {}, [])\n self.assertIn(\"'id', 'name', or 'regex' not found\",\n e.format_message())\n\n # case #2: two matches for one name\n resources = [fakes.FakeResource(name=\"Fake1\"),\n fakes.FakeResource(name=\"Fake2\"),\n fakes.FakeResource(name=\"Fake1\")]\n e = self.assertRaises(\n exceptions.InvalidScenarioArgument,\n ftype._find_resource, {\"name\": \"Fake1\"}, resources)\n self.assertIn(\"with name 'Fake1' is ambiguous, possible matches\",\n e.format_message())\n\n # case #3: no matches at all\n resources = [fakes.FakeResource(name=\"Fake1\"),\n fakes.FakeResource(name=\"Fake2\"),\n fakes.FakeResource(name=\"Fake3\")]\n e = self.assertRaises(\n exceptions.InvalidScenarioArgument,\n ftype._find_resource, {\"name\": \"Foo\"}, resources)\n self.assertIn(\"with pattern 'Foo' not found\",\n e.format_message())\n\n # case #4: two matches for one name, but 'accurate' is True\n resources = [fakes.FakeResource(name=\"Fake1\"),\n fakes.FakeResource(name=\"Fake2\"),\n fakes.FakeResource(name=\"Fake3\")]\n e = self.assertRaises(\n exceptions.InvalidScenarioArgument,\n ftype._find_resource, {\"name\": \"Fake\", \"accurate\": True},\n resources)\n self.assertIn(\"with name 'Fake' not found\",\n e.format_message())\n\n # case #5: two matches for one name, but 'accurate' is True\n resources = [fakes.FakeResource(name=\"Fake1\"),\n fakes.FakeResource(name=\"Fake2\"),\n fakes.FakeResource(name=\"Fake3\")]\n e = self.assertRaises(\n exceptions.InvalidScenarioArgument,\n ftype._find_resource, {\"regex\": \"Fake\", \"accurate\": True},\n resources)\n self.assertIn(\"with name 'Fake' is ambiguous, possible matches\",\n e.format_message())\n\n\nclass FlavorTestCase(test.TestCase):\n\n def setUp(self):\n super(FlavorTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n self.clients.nova().flavors._cache(fakes.FakeResource(name=\"m1.tiny\",\n id=\"1\"))\n self.clients.nova().flavors._cache(fakes.FakeResource(name=\"m1.nano\",\n id=\"42\"))\n self.clients.nova().flavors._cache(fakes.FakeResource(name=\"m1.large\",\n id=\"44\"))\n self.clients.nova().flavors._cache(fakes.FakeResource(name=\"m1.large\",\n id=\"45\"))\n self.type_cls = types.Flavor(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_id(self):\n resource_spec = {\"id\": \"42\"}\n flavor_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"42\", flavor_id)\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"m1.nano\"}\n flavor_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"42\", flavor_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"m1.medium\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_name_multiple_match(self):\n resource_spec = {\"name\": \"m1.large\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex(self):\n resource_spec = {\"regex\": r\"m(1|2)\\.nano\"}\n flavor_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"42\", flavor_id)\n\n def test_preprocess_by_regex_multiple_match(self):\n resource_spec = {\"regex\": \"^m1\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex_no_match(self):\n resource_spec = {}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass GlanceImageTestCase(test.TestCase):\n\n def setUp(self):\n super(GlanceImageTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n image1 = fakes.FakeResource(name=\"cirros-0.5.2-uec\", id=\"100\")\n self.clients.glance().images._cache(image1)\n image2 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk\", id=\"101\")\n self.clients.glance().images._cache(image2)\n image3 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"102\")\n self.clients.glance().images._cache(image3)\n image4 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"103\")\n self.clients.glance().images._cache(image4)\n self.type_cls = types.GlanceImage(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_id(self):\n resource_spec = {\"id\": \"100\"}\n image_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"100\", image_id)\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"^cirros-0.5.2-uec$\"}\n image_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"100\", image_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"cirros-0.5.2-uec-boot\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_name_match_multiple(self):\n resource_spec = {\"name\": \"cirros-0.5.2-uec-ramdisk-copy\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex(self):\n resource_spec = {\"regex\": \"-uec$\"}\n image_id = self.type_cls.pre_process(\n resource_spec=resource_spec, config={})\n self.assertEqual(\"100\", image_id)\n\n def test_preprocess_by_regex_match_multiple(self):\n resource_spec = {\"regex\": \"^cirros\"}\n image_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n # matching resources are sorted by the names. It is impossible to\n # predict which resource will be luckiest\n self.assertIn(image_id, [\"102\", \"103\"])\n\n def test_preprocess_by_regex_no_match(self):\n resource_spec = {\"regex\": \"-boot$\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass GlanceImageArgsTestCase(test.TestCase):\n\n def test_preprocess(self):\n self.assertEqual(\n {},\n types.GlanceImageArguments({}).pre_process(\n resource_spec={}, config={}))\n self.assertEqual(\n {\"visibility\": \"public\"},\n types.GlanceImageArguments({}).pre_process(\n config={}, resource_spec={\"visibility\": \"public\"}))\n self.assertEqual(\n {\"visibility\": \"public\"},\n types.GlanceImageArguments({}).pre_process(\n config={}, resource_spec={\"visibility\": \"public\",\n \"is_public\": False}))\n self.assertEqual(\n {\"visibility\": \"private\"},\n types.GlanceImageArguments({}).pre_process(\n config={}, resource_spec={\"is_public\": False}))\n\n\nclass EC2ImageTestCase(test.TestCase):\n\n def setUp(self):\n super(EC2ImageTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n image1 = fakes.FakeResource(name=\"cirros-0.5.2-uec\", id=\"100\")\n self.clients.glance().images._cache(image1)\n image2 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk\", id=\"102\")\n self.clients.glance().images._cache(image2)\n image3 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"102\")\n self.clients.glance().images._cache(image3)\n image4 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"103\")\n self.clients.glance().images._cache(image4)\n\n ec2_image1 = fakes.FakeResource(name=\"cirros-0.5.2-uec\", id=\"200\")\n ec2_image2 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk\",\n id=\"201\")\n ec2_image3 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"202\")\n ec2_image4 = fakes.FakeResource(name=\"cirros-0.5.2-uec-ramdisk-copy\",\n id=\"203\")\n\n self.clients.ec2().get_all_images = mock.Mock(\n return_value=[ec2_image1, ec2_image2, ec2_image3, ec2_image4])\n\n self.type_cls = types.EC2Image(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"^cirros-0.5.2-uec$\"}\n ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(\"200\", ec2_image_id)\n\n def test_preprocess_by_id(self):\n resource_spec = {\"id\": \"100\"}\n ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(\"200\", ec2_image_id)\n\n def test_preprocess_by_id_no_match(self):\n resource_spec = {\"id\": \"101\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"cirros-0.5.2-uec-boot\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_name_match_multiple(self):\n resource_spec = {\"name\": \"cirros-0.5.2-uec-ramdisk-copy\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex(self):\n resource_spec = {\"regex\": \"-uec$\"}\n ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(\"200\", ec2_image_id)\n\n def test_preprocess_by_regex_match_multiple(self):\n resource_spec = {\"regex\": \"^cirros\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex_no_match(self):\n resource_spec = {\"regex\": \"-boot$\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass VolumeTypeTestCase(test.TestCase):\n\n def setUp(self):\n super(VolumeTypeTestCase, self).setUp()\n cinder = mock.patch(\"rally_openstack.task.types.block.BlockStorage\")\n self.service = cinder.start().return_value\n self.addCleanup(cinder.stop)\n\n volume_type1 = fakes.FakeResource(name=\"lvmdriver-1\", id=100)\n\n self.type_cls = types.VolumeType(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.service.list_types.return_value = [volume_type1]\n\n def test_preprocess_by_id(self):\n resource_spec = {\"id\": 100}\n volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(100, volumetype_id)\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"lvmdriver-1\"}\n volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(100, volumetype_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"nomatch-1\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n def test_preprocess_by_regex(self):\n resource_spec = {\"regex\": \"^lvm.*-1\"}\n volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(100, volumetype_id)\n\n def test_preprocess_by_regex_no_match(self):\n resource_spec = {\"regex\": \"dd\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass NeutronNetworkTestCase(test.TestCase):\n\n def setUp(self):\n super(NeutronNetworkTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n net1_data = {\"network\": {\n \"name\": \"net1\"\n }}\n network1 = self.clients.neutron().create_network(net1_data)\n self.net1_id = network1[\"network\"][\"id\"]\n self.type_cls = types.NeutronNetwork(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_id(self):\n resource_spec = {\"id\": self.net1_id}\n network_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(network_id, self.net1_id)\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"net1\"}\n network_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(network_id, self.net1_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"nomatch-1\"}\n self.assertRaises(exceptions.InvalidScenarioArgument,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass WatcherStrategyTestCase(test.TestCase):\n\n def setUp(self):\n super(WatcherStrategyTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n self.strategy = self.clients.watcher().strategy._cache(\n fakes.FakeResource(name=\"dummy\", id=\"1\"))\n\n self.type_cls = types.WatcherStrategy(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"dummy\"}\n strategy_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(self.strategy.uuid, strategy_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"dummy-1\"}\n self.assertRaises(exceptions.RallyException,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n\n\nclass WatcherGoalTestCase(test.TestCase):\n\n def setUp(self):\n super(WatcherGoalTestCase, self).setUp()\n self.clients = fakes.FakeClients()\n self.goal = self.clients.watcher().goal._cache(\n fakes.FakeResource(name=\"dummy\", id=\"1\"))\n self.type_cls = types.WatcherGoal(\n context={\"admin\": {\"credential\": mock.Mock()}})\n self.type_cls._clients = self.clients\n\n def test_preprocess_by_name(self):\n resource_spec = {\"name\": \"dummy\"}\n goal_id = self.type_cls.pre_process(resource_spec=resource_spec,\n config={})\n self.assertEqual(self.goal.uuid, goal_id)\n\n def test_preprocess_by_name_no_match(self):\n resource_spec = {\"name\": \"dummy-1\"}\n self.assertRaises(exceptions.RallyException,\n self.type_cls.pre_process,\n resource_spec=resource_spec, config={})\n" }, { "alpha_fraction": 0.6202457547187805, "alphanum_fraction": 0.6249268651008606, "avg_line_length": 35.36170196533203, "blob_id": "3e74f85805fbd3fd1949aaf25f579bda80f8799c", "content_id": "968539b530730dc245066fc3c3199b72da9bc6ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1709, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/tests/unit/task/scenarios/barbican/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.barbican import utils\nfrom tests.unit import test\n\n\nclass BarbicanBaseTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(BarbicanBaseTestCase, self).setUp()\n self.context = super(BarbicanBaseTestCase, self).get_test_context()\n self.context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant_id\",\n \"name\": \"fake_tenant_name\"}\n })\n m = \"rally_openstack.common.services.key_manager.barbican\"\n patch = mock.patch(\"%s.BarbicanService\" % m)\n self.addCleanup(patch.stop)\n self.mock_service = patch.start()\n\n def test_barbican_base(self):\n base = utils.BarbicanBase(self.context)\n self.assertEqual(base.admin_barbican,\n self.mock_service.return_value)\n" }, { "alpha_fraction": 0.5930328369140625, "alphanum_fraction": 0.5971519351005554, "avg_line_length": 35.467811584472656, "blob_id": "6ef0c7515e3316fafd0d201c9935f618683697e2", "content_id": "4958696bbb0784ba82265ea76cfb19b20bfec9f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8497, "license_type": "permissive", "max_line_length": 79, "num_lines": 233, "path": "/rally_openstack/task/scenarios/vm/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport io\nimport os.path\nimport subprocess\nimport sys\n\nimport netaddr\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.task import atomic\nfrom rally.task import utils\nfrom rally.utils import sshutils\n\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\nLOG = logging.getLogger(__name__)\n\n\nCONF = cfg.CONF\n\n\nclass Host(object):\n\n ICMP_UP_STATUS = \"ICMP UP\"\n ICMP_DOWN_STATUS = \"ICMP DOWN\"\n\n name = \"ip\"\n\n def __init__(self, ip):\n self.ip = netaddr.IPAddress(ip)\n self.status = self.ICMP_DOWN_STATUS\n\n @property\n def id(self):\n return self.ip.format()\n\n @classmethod\n def update_status(cls, server):\n \"\"\"Check ip address is pingable and update status.\"\"\"\n ping = \"ping\" if server.ip.version == 4 else \"ping6\"\n if sys.platform.startswith(\"linux\"):\n cmd = [ping, \"-c1\", \"-w1\", server.ip.format()]\n else:\n cmd = [ping, \"-c1\", server.ip.format()]\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n proc.wait()\n LOG.debug(\"Host %s is ICMP %s\"\n % (server.ip.format(), proc.returncode and \"down\" or \"up\"))\n if proc.returncode == 0:\n server.status = cls.ICMP_UP_STATUS\n else:\n server.status = cls.ICMP_DOWN_STATUS\n return server\n\n def __eq__(self, other):\n if not isinstance(other, Host):\n raise TypeError(\"%s should be an instance of %s\" % (\n other, Host.__class__.__name__))\n return self.ip == other.ip and self.status == other.status\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nclass VMScenario(nova_utils.NovaScenario):\n \"\"\"Base class for VM scenarios with basic atomic actions.\n\n VM scenarios are scenarios executed inside some launched VM instance.\n \"\"\"\n\n USER_RWX_OTHERS_RX_ACCESS_MODE = 0o755\n\n RESOURCE_NAME_PREFIX = \"rally_vm_\"\n\n @atomic.action_timer(\"vm.run_command_over_ssh\")\n def _run_command_over_ssh(self, ssh, command):\n \"\"\"Run command inside an instance.\n\n This is a separate function so that only script execution is timed.\n\n :param ssh: A SSHClient instance.\n :param command: Dictionary specifying command to execute.\n See `rally info find VMTasks.boot_runcommand_delete' parameter\n `command' docstring for explanation.\n\n :returns: tuple (exit_status, stdout, stderr)\n \"\"\"\n cmd, stdin = [], None\n\n interpreter = command.get(\"interpreter\") or []\n if interpreter:\n if isinstance(interpreter, str):\n interpreter = [interpreter]\n elif type(interpreter) != list:\n raise ValueError(\"command 'interpreter' value must be str \"\n \"or list type\")\n cmd.extend(interpreter)\n\n remote_path = command.get(\"remote_path\") or []\n if remote_path:\n if isinstance(remote_path, str):\n remote_path = [remote_path]\n elif type(remote_path) != list:\n raise ValueError(\"command 'remote_path' value must be str \"\n \"or list type\")\n cmd.extend(remote_path)\n if command.get(\"local_path\"):\n ssh.put_file(os.path.expanduser(\n command[\"local_path\"]), remote_path[-1],\n mode=self.USER_RWX_OTHERS_RX_ACCESS_MODE)\n\n if command.get(\"script_file\"):\n stdin = open(os.path.expanduser(command[\"script_file\"]), \"rb\")\n\n elif command.get(\"script_inline\"):\n stdin = io.StringIO(command[\"script_inline\"])\n\n cmd.extend(command.get(\"command_args\") or [])\n\n return ssh.execute(cmd, stdin=stdin)\n\n def _boot_server_with_fip(self, image, flavor, use_floating_ip=True,\n floating_network=None, **kwargs):\n \"\"\"Boot server prepared for SSH actions.\"\"\"\n kwargs[\"auto_assign_nic\"] = True\n server = self._boot_server(image, flavor, **kwargs)\n\n if not server.networks:\n raise RuntimeError(\n \"Server `%s' is not connected to any network. \"\n \"Use network context for auto-assigning networks \"\n \"or provide `nics' argument with specific net-id.\" %\n server.name)\n\n if use_floating_ip:\n fip = self._attach_floating_ip(server, floating_network)\n else:\n internal_network = list(server.networks)[0]\n fip = {\"ip\": server.addresses[internal_network][0][\"addr\"]}\n\n return server, {\"ip\": fip.get(\"ip\"),\n \"id\": fip.get(\"id\"),\n \"is_floating\": use_floating_ip}\n\n def _attach_floating_ip(self, server, floating_network):\n internal_network = list(server.networks)[0]\n fixed_ip = server.addresses[internal_network][0][\"addr\"]\n\n floatingip = self.neutron.create_floatingip(\n floating_network=floating_network)\n self._associate_floating_ip(server, floatingip, fixed_address=fixed_ip)\n\n return {\"id\": floatingip[\"id\"],\n \"ip\": floatingip[\"floating_ip_address\"]}\n\n def _delete_floating_ip(self, server, fip):\n with logging.ExceptionLogger(\n LOG, \"Unable to delete IP: %s\" % fip[\"ip\"]):\n if self.check_ip_address(fip[\"ip\"])(server):\n self._dissociate_floating_ip(server, fip)\n self.neutron.delete_floatingip(fip[\"id\"])\n\n def _delete_server_with_fip(self, server, fip, force_delete=False):\n if fip[\"is_floating\"]:\n self._delete_floating_ip(server, fip)\n return self._delete_server(server, force=force_delete)\n\n @atomic.action_timer(\"vm.wait_for_ssh\")\n def _wait_for_ssh(self, ssh, timeout=120, interval=1):\n ssh.wait(timeout, interval)\n\n @atomic.action_timer(\"vm.wait_for_ping\")\n def _wait_for_ping(self, server_ip):\n server = Host(server_ip)\n utils.wait_for_status(\n server,\n ready_statuses=[Host.ICMP_UP_STATUS],\n update_resource=Host.update_status,\n timeout=CONF.openstack.vm_ping_timeout,\n check_interval=CONF.openstack.vm_ping_poll_interval\n )\n\n def _run_command(self, server_ip, port, username, password, command,\n pkey=None, timeout=120, interval=1):\n \"\"\"Run command via SSH on server.\n\n Create SSH connection for server, wait for server to become available\n (there is a delay between server being set to ACTIVE and sshd being\n available). Then call run_command_over_ssh to actually execute the\n command.\n\n :param server_ip: server ip address\n :param port: ssh port for SSH connection\n :param username: str. ssh username for server\n :param password: Password for SSH authentication\n :param command: Dictionary specifying command to execute.\n See `rally info find VMTasks.boot_runcommand_delete' parameter\n `command' docstring for explanation.\n :param pkey: key for SSH authentication\n :param timeout: wait for ssh timeout. Default is 120 seconds\n :param interval: ssh retry interval. Default is 1 second\n\n :returns: tuple (exit_status, stdout, stderr)\n \"\"\"\n pkey = pkey if pkey else self.context[\"user\"][\"keypair\"][\"private\"]\n ssh = sshutils.SSH(username, server_ip, port=port,\n pkey=pkey, password=password)\n try:\n self._wait_for_ssh(ssh, timeout, interval)\n return self._run_command_over_ssh(ssh, command)\n finally:\n try:\n ssh.close()\n except AttributeError:\n pass\n" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5591778755187988, "avg_line_length": 34.87288284301758, "blob_id": "beb0b6c1489791d28acdf479be64d91012901daa", "content_id": "b1864877d5ebe0cac35a1c185467bc2c3b7b189f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4233, "license_type": "permissive", "max_line_length": 78, "num_lines": 118, "path": "/tests/unit/task/contexts/murano/test_murano_packages.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.murano import murano_packages\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.murano.murano_packages\"\n\n\nclass MuranoPackageGeneratorTestCase(test.TestCase):\n\n def setUp(self):\n super(MuranoPackageGeneratorTestCase, self).setUp()\n\n @staticmethod\n def _get_context():\n return {\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 1,\n \"concurrent\": 1,\n },\n \"murano_packages\": {\n \"app_package\": (\n \"rally-jobs/extra/murano/\"\n \"applications/HelloReporter/\"\n \"io.murano.apps.HelloReporter.zip\")\n }\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"task\": mock.MagicMock(),\n \"owner_id\": \"foo_uuid\",\n \"users\": [\n {\n \"id\": \"user_0\",\n \"tenant_id\": \"tenant_0\",\n \"credential\": \"credential\"\n },\n {\n \"id\": \"user_1\",\n \"tenant_id\": \"tenant_1\",\n \"credential\": \"credential\"\n }\n ],\n \"tenants\": {\n \"tenant_0\": {\"name\": \"tenant_0_name\"},\n \"tenant_1\": {\"name\": \"tenant_1_name\"}\n }\n }\n\n @mock.patch(\"%s.osclients\" % CTX)\n def test_setup(self, mock_osclients):\n mock_app = mock.MagicMock(id=\"fake_app_id\")\n (mock_osclients.Clients().murano().\n packages.create.return_value) = mock_app\n\n murano_ctx = murano_packages.PackageGenerator(self._get_context())\n murano_ctx.setup()\n\n self.assertEqual(2, len(murano_ctx.context[\"tenants\"]))\n tenant_id = murano_ctx.context[\"users\"][0][\"tenant_id\"]\n self.assertEqual([mock_app],\n murano_ctx.context[\"tenants\"][tenant_id][\"packages\"])\n\n @mock.patch(\"%s.osclients\" % CTX)\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup_with_zip(self, mock_cleanup, mock_osclients):\n mock_app = mock.Mock(id=\"fake_app_id\")\n (mock_osclients.Clients().murano().\n packages.create.return_value) = mock_app\n\n murano_ctx = murano_packages.PackageGenerator(self._get_context())\n murano_ctx.setup()\n murano_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"murano.packages\"],\n users=murano_ctx.context[\"users\"],\n superclass=murano_packages.PackageGenerator,\n task_id=\"foo_uuid\")\n\n @mock.patch(\"%s.osclients\" % CTX)\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup_with_dir(self, mock_cleanup, mock_osclients):\n mock_app = mock.Mock(id=\"fake_app_id\")\n (mock_osclients.Clients().murano().\n packages.create.return_value) = mock_app\n ctx_dict = self._get_context()\n app_dir = (\"rally-jobs/extra/murano/applications/\"\n \"HelloReporter/io.murano.apps.HelloReporter/\")\n ctx_dict[\"config\"][\"murano_packages\"][\"app_package\"] = app_dir\n\n murano_ctx = murano_packages.PackageGenerator(ctx_dict)\n murano_ctx.setup()\n murano_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"murano.packages\"],\n users=murano_ctx.context[\"users\"],\n superclass=murano_packages.PackageGenerator,\n task_id=\"foo_uuid\")\n" }, { "alpha_fraction": 0.6269769668579102, "alphanum_fraction": 0.6281342506408691, "avg_line_length": 36.21052551269531, "blob_id": "dde9d30911229b7dc59d4b6fc6214d97523e9037", "content_id": "9424a4ffc0d39aa1d6ed48d41cd798d076b3c871", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7777, "license_type": "permissive", "max_line_length": 78, "num_lines": 209, "path": "/rally_openstack/common/services/gnocchi/metric.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\nfrom rally.task import service\n\n\nclass GnocchiService(service.Service):\n\n @atomic.action_timer(\"gnocchi.create_archive_policy\")\n def create_archive_policy(self, name, definition=None,\n aggregation_methods=None):\n \"\"\"Create an archive policy.\n\n :param name: Archive policy name\n :param definition: Archive policy definition\n :param aggregation_methods: Aggregation method of the archive policy\n \"\"\"\n archive_policy = {\"name\": name}\n if definition is not None:\n archive_policy[\"definition\"] = definition\n if aggregation_methods is not None:\n archive_policy[\"aggregation_methods\"] = aggregation_methods\n\n return self._clients.gnocchi().archive_policy.create(\n archive_policy)\n\n @atomic.action_timer(\"gnocchi.delete_archive_policy\")\n def delete_archive_policy(self, name):\n \"\"\"Delete an archive policy.\n\n :param name: Archive policy name\n \"\"\"\n return self._clients.gnocchi().archive_policy.delete(name)\n\n @atomic.action_timer(\"gnocchi.list_archive_policy\")\n def list_archive_policy(self):\n \"\"\"List archive policies.\"\"\"\n return self._clients.gnocchi().archive_policy.list()\n\n @atomic.action_timer(\"gnocchi.create_archive_policy_rule\")\n def create_archive_policy_rule(self, name, metric_pattern=None,\n archive_policy_name=None):\n \"\"\"Create an archive policy rule.\n\n :param name: Archive policy rule name\n :param metric_pattern: Wildcard of metric name to match\n :param archive_policy_name: Archive policy name\n \"\"\"\n archive_policy_rule = {\"name\": name}\n archive_policy_rule[\"metric_pattern\"] = metric_pattern\n archive_policy_rule[\"archive_policy_name\"] = archive_policy_name\n return self._clients.gnocchi().archive_policy_rule.create(\n archive_policy_rule)\n\n @atomic.action_timer(\"gnocchi.delete_archive_policy_rule\")\n def delete_archive_policy_rule(self, name):\n \"\"\"Delete an archive policy rule.\n\n :param name: Archive policy rule name\n \"\"\"\n return self._clients.gnocchi().archive_policy_rule.delete(name)\n\n @atomic.action_timer(\"gnocchi.list_archive_policy_rule\")\n def list_archive_policy_rule(self):\n \"\"\"List archive policy rules.\"\"\"\n return self._clients.gnocchi().archive_policy_rule.list()\n\n @atomic.action_timer(\"gnocchi.list_capabilities\")\n def list_capabilities(self):\n \"\"\"List capabilities.\"\"\"\n return self._clients.gnocchi().capabilities.list()\n\n @atomic.action_timer(\"gnocchi.get_measures_aggregation\")\n def get_measures_aggregation(self, metrics, aggregation=None,\n refresh=None):\n \"\"\"Get measurements of aggregated metrics.\n\n :param metrics: Metric IDs or name\n :param aggregation: Granularity aggregation function to retrieve\n :param refresh: Force aggregation of all known measures\n \"\"\"\n return self._clients.gnocchi().metric.aggregation(\n metrics=metrics, aggregation=aggregation, refresh=refresh)\n\n @atomic.action_timer(\"gnocchi.get_measures\")\n def get_measures(self, metric, aggregation=None, refresh=None):\n \"\"\"Get measurements of a metric.\n\n :param metric: Metric ID or name\n :param aggregation: Aggregation to retrieve\n :param refresh: Force aggregation of all known measures\n \"\"\"\n return self._clients.gnocchi().metric.get_measures(\n metric=metric, aggregation=aggregation, refresh=refresh)\n\n @atomic.action_timer(\"gnocchi.create_metric\")\n def create_metric(self, name, archive_policy_name=None, resource_id=None,\n unit=None):\n \"\"\"Create a metric.\n\n :param name: Metric name\n :param archive_policy_name: Archive policy name\n :param resource_id: The resource ID to attach the metric to\n :param unit: The unit of the metric\n \"\"\"\n return self._clients.gnocchi().metric.create(\n name=name, archive_policy_name=archive_policy_name,\n resource_id=resource_id, unit=unit)\n\n @atomic.action_timer(\"gnocchi.delete_metric\")\n def delete_metric(self, metric_id):\n \"\"\"Delete a metric.\n\n :param metric_id: metric ID\n \"\"\"\n return self._clients.gnocchi().metric.delete(metric_id)\n\n @atomic.action_timer(\"gnocchi.list_metric\")\n def list_metric(self, limit=None):\n \"\"\"List metrics.\"\"\"\n metrics = []\n marker = None\n limit_val = limit\n while True:\n page = self._clients.gnocchi().metric.list(limit=limit_val,\n marker=marker)\n if not page:\n break\n metrics.extend(page)\n marker = page[-1][\"id\"]\n if limit_val is not None:\n cnt = len(metrics)\n if cnt < limit:\n limit_val = limit - cnt\n else:\n break\n\n return metrics\n\n @atomic.action_timer(\"gnocchi.create_resource\")\n def create_resource(self, name, resource_type=\"generic\"):\n \"\"\"Create a resource.\n\n :param name: Name of the resource\n :param resource_type: Type of the resource\n \"\"\"\n resource = {\"id\": name}\n return self._clients.gnocchi().resource.create(\n resource_type, resource)\n\n @atomic.action_timer(\"gnocchi.delete_resource\")\n def delete_resource(self, resource_id):\n \"\"\"Delete a resource.\n\n :param resource_id: ID of the resource\n \"\"\"\n return self._clients.gnocchi().resource.delete(resource_id)\n\n @atomic.action_timer(\"gnocchi.list_resource\")\n def list_resource(self, resource_type=\"generic\"):\n \"\"\"List resources.\"\"\"\n return self._clients.gnocchi().resource.list(\n resource_type=resource_type)\n\n @atomic.action_timer(\"gnocchi.create_resource_type\")\n def create_resource_type(self, name, attributes=None):\n \"\"\"Create a resource type.\n\n :param name: Name of the resource type\n \"\"\"\n resource_type = {\"name\": name}\n if attributes is not None:\n resource_type[\"attributes\"] = attributes\n\n return self._clients.gnocchi().resource_type.create(\n resource_type)\n\n @atomic.action_timer(\"gnocchi.delete_resource_type\")\n def delete_resource_type(self, name):\n \"\"\"Delete a resource type.\n\n :param name: Name of the resource type\n \"\"\"\n return self._clients.gnocchi().resource_type.delete(name)\n\n @atomic.action_timer(\"gnocchi.list_resource_type\")\n def list_resource_type(self):\n \"\"\"List resource types.\"\"\"\n return self._clients.gnocchi().resource_type.list()\n\n @atomic.action_timer(\"gnocchi.get_status\")\n def get_status(self, detailed=False):\n \"\"\"Get the status of measurements processing.\n\n :param detailed: Get detailed status.\n \"\"\"\n return self._clients.gnocchi().status.get(detailed)\n" }, { "alpha_fraction": 0.7009153962135315, "alphanum_fraction": 0.7270689606666565, "avg_line_length": 32.24844741821289, "blob_id": "8017c1f93e257da5de173cc7604f04c825184187", "content_id": "c7e4c34ec5c6baff000de558da5d02891efb90e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 5353, "license_type": "permissive", "max_line_length": 164, "num_lines": 161, "path": "/tox.ini", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "[tox]\nminversion = 3.1.1\nignore_basepython_conflict = true\nenvlist = py36,py37,py38,pep8\n\n[testenv]\nextras = {env:RALLY_EXTRAS:}\nsetenv = VIRTUAL_ENV={envdir}\n LANG=en_US.UTF-8\n LANGUAGE=en_US:en\n LC_ALL=C\n PYTHONHASHSEED=0\n TOX_ENV_NAME={envname}\nallowlist_externals = find\n rm\n make\ndeps =\n -c ./upper-constraints.txt\n -r{toxinidir}/requirements.txt\n -r{toxinidir}/test-requirements.txt\nusedevelop = True\ncommands =\n find . -type f -name \"*.pyc\" -delete\n python3 {toxinidir}/tests/ci/pytest_launcher.py tests/unit --posargs={posargs}\ndistribute = false\nbasepython = python3\npassenv =\n http_proxy\n HTTP_PROXY\n https_proxy\n HTTPS_PROXY\n no_proxy\n NO_PROXY\n REQUESTS_CA_BUNDLE\n HOME\n\n[testenv:pep8]\ndeps = -r{toxinidir}/test-requirements.txt\nskip_install = true\ncommands = flake8\ndistribute = false\n\n[testenv:py36]\nbasepython = python3.6\n\n[testenv:py37]\nbasepython = python3.7\n\n[testenv:py38]\nbasepython = python3.8\n\n[testenv:py39]\nbasepython = python3.9\n\n[testenv:venv]\nbasepython = python3\ncommands = {posargs}\n\n[testenv:debug]\ncommands = oslo_debug_helper -t tests {posargs}\n\n[testenv:functional]\nbasepython = python3\nsitepackages = True\ncommands =\n find . -type f -name \"*.pyc\" -delete\n {toxinidir}/tests/ci/rally_functional_job.sh {posargs}\nallowlist_externals = find\n rm\n make\n {toxinidir}/tests/ci/rally_functional_job.sh\n\n[testenv:cover]\ncommands = {toxinidir}/tests/ci/cover.sh {posargs}\nallowlist_externals = {toxinidir}/tests/ci/cover.sh\n\n[testenv:genconfig]\nbasepython = python3\ncommands =\n oslo-config-generator --config-file etc/rally/rally-config-generator.conf\n\n[testenv:requirements]\ndeps =\n # do not use upper-constraints file\n requests[security]\n -r{toxinidir}/requirements.txt\ncommands = python {toxinidir}/tests/ci/sync_requirements.py {posargs}\n\n[flake8]\n# H105 Don't use author tags\n# E731 do not assign a lambda expression, use a def\n# W503 line break before binary operator\nignore = H105,E731,W503\nshow-source = true\nexclude=.venv,.git,.tox,dist,*lib/python*,*egg,tools,build,setup.py\n\n[flake8:local-plugins]\nextension =\n N301 = checks:check_assert_methods_from_mock\n N310 = checks:check_import_of_logging\n N311 = checks:check_import_of_config\n N312 = checks:no_use_conf_debug_check\n N313 = checks:check_log_warn\n N320 = checks:assert_true_instance\n N321 = checks:assert_equal_type\n N322 = checks:assert_equal_none\n N323 = checks:assert_true_or_false_with_in\n N324 = checks:assert_equal_in\n N326 = checks:assert_equal_true_or_false\n N340 = checks:check_no_direct_rally_objects_import\n N341 = checks:check_no_oslo_deprecated_import\n N342 = checks:check_opts_import_path\n N350 = checks:check_quotes\n N351 = checks:check_no_constructor_data_struct\n N352 = checks:check_dict_formatting_in_string\n N354 = checks:check_raises\n N355 = checks:check_old_type_class\n N356 = checks:check_datetime_alias\n N360 = checks:check_db_imports_in_cli\n N361 = checks:check_objects_imports_in_cli\npaths = ./tests/hacking\n\n[testenv:bindep]\n# Do not install any requirements. We want this to be fast and work even if\n# system dependencies are missing, since it's used to tell you what system\n# dependencies are missing! This also means that bindep must be installed\n# separately, outside of the requirements files.\ndeps = bindep\ncommands = bindep\n\n[testenv:self]\ncommands = {toxinidir}/tests/ci/rally_self_job.sh {toxinidir}/rally-jobs/self-rally.yaml\n\n[pytest]\nfilterwarnings =\n error\n ignore:invalid escape sequence:DeprecationWarning:.*subunit.*\n ignore:::.*netaddr.strategy.*\n ignore:the imp module is deprecated in favour of importlib.*:DeprecationWarning\n # we do not use anything inner from OptionParser, so we do not care about it's parent\n ignore:The frontend.OptionParser class will be replaced by a subclass of argparse.ArgumentParser in Docutils 0.21 or later.:DeprecationWarning:\n # we do not use Option directly, it is initialized by OptionParser by itself.\n # as soon as docutils team get rid of frontend.Option, they will also fix OptionParser\n ignore: The frontend.Option class will be removed in Docutils 0.21 or later.:DeprecationWarning:\n # raised by designateclient?!\n ignore:dns.hash module will be removed in future versions. Please use hashlib instead.:DeprecationWarning\n # should be fixed at rally framework (raised by functional job)\n ignore:.*EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade*:\n ignore:.*unclosed file <_io.TextIOWrapper name='/tmp/rally.log'::\n ignore:.*mysql_enable_ndb.*::\n ignore:.*distutils Version classes are deprecated.*::\n # pytest-cov\n ignore:The --rsyncdir command line argument and rsyncdirs config variable are deprecated.:DeprecationWarning:\n ignore:::.*requests.*\n # python 3.10\n ignore:The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives:DeprecationWarning:\n ignore:pkg_resources is deprecated as an API:DeprecationWarning:\n # python 3.8\n ignore:Deprecated call to `pkg_resources.declare_namespace*:DeprecationWarning:\n # python 3.7\n ignore:invalid escape sequence:DeprecationWarning:.*prettytable.*\n" }, { "alpha_fraction": 0.6240209937095642, "alphanum_fraction": 0.6276453733444214, "avg_line_length": 42.96337127685547, "blob_id": "6d29d816318acf0b8f5da02e13b549efe18ca218", "content_id": "1b4f20623a1e83d0362d781b4ad9917f99864358", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24004, "license_type": "permissive", "max_line_length": 79, "num_lines": 546, "path": "/rally_openstack/task/scenarios/manila/shares.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally import exceptions\nfrom rally.task import types\nfrom rally.task import utils as rally_utils\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.contexts.manila import consts as manila_consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.manila import utils\nfrom rally_openstack.task.scenarios.vm import utils as vm_utils\n\n\n\"\"\"Scenarios for Manila shares.\"\"\"\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"enum\", param_name=\"share_proto\",\n values=[\"NFS\", \"CIFS\", \"GLUSTERFS\", \"HDFS\", \"CEPHFS\"],\n case_insensitive=True, missed=False)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_and_delete_share\",\n platform=\"openstack\")\nclass CreateAndDeleteShare(utils.ManilaScenario):\n\n def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Create and delete a share.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between share creation and deletion\n (of random duration from [min_sleep, max_sleep]).\n\n :param share_proto: share protocol, valid values are NFS, CIFS,\n GlusterFS and HDFS\n :param size: share size in GB, should be greater than 0\n :param min_sleep: minimum sleep time in seconds (non-negative)\n :param max_sleep: maximum sleep time in seconds (non-negative)\n :param kwargs: optional args to create a share\n \"\"\"\n share = self._create_share(\n share_proto=share_proto,\n size=size,\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._delete_share(share)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", fail_on_404_image=False)\[email protected](\"number\", param_name=\"port\", minval=1, maxval=65535,\n nullable=True, integer_only=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](\"required_services\", services=[consts.Service.MANILA,\n consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\", \"nova\"],\n \"keypair@openstack\": {},\n \"allow_ssh@openstack\": None},\n name=\"ManilaShares.create_share_and_access_from_vm\",\n platform=\"openstack\")\nclass CreateShareAndAccessFromVM(utils.ManilaScenario, vm_utils.VMScenario):\n def run(self, image, flavor, username, size=1, password=None,\n floating_network=None, port=22,\n use_floating_ip=True, force_delete=False, max_log_length=None,\n **kwargs):\n \"\"\"Create a share and access it from a VM.\n\n - create NFS share\n - launch VM\n - authorize VM's fip to access the share\n - mount share iside the VM\n - write to share\n - delete VM\n - delete share\n\n :param size: share size in GB, should be greater than 0\n\n :param image: glance image name to use for the vm\n :param flavor: VM flavor name\n :param username: ssh username on server\n :param password: Password on SSH authentication\n :param floating_network: external network name, for floating ip\n :param port: ssh port for SSH connection\n :param use_floating_ip: bool, floating or fixed IP for SSH connection\n :param force_delete: whether to use force_delete for servers\n :param max_log_length: The number of tail nova console-log lines user\n would like to retrieve\n\n\n :param kwargs: optional args to create a share or a VM\n \"\"\"\n share_proto = \"nfs\"\n share = self._create_share(\n share_proto=share_proto,\n size=size,\n **kwargs)\n location = self._export_location(share)\n\n server, fip = self._boot_server_with_fip(\n image, flavor, use_floating_ip=use_floating_ip,\n floating_network=floating_network,\n key_name=self.context[\"user\"][\"keypair\"][\"name\"],\n userdata=\"#cloud-config\\npackages:\\n - nfs-common\",\n **kwargs)\n self._allow_access_share(share, \"ip\", fip[\"ip\"], \"rw\")\n mount_opt = \"-t nfs -o nfsvers=4.1,proto=tcp\"\n script = f\"sudo cloud-init status -w;\" \\\n f\"sudo mount {mount_opt} {location[0]} /mnt || exit 1;\" \\\n f\"sudo touch /mnt/testfile || exit 2\"\n\n command = {\n \"script_inline\": script,\n \"interpreter\": \"/bin/bash\"\n }\n try:\n rally_utils.wait_for_status(\n server,\n ready_statuses=[\"ACTIVE\"],\n update_resource=rally_utils.get_from_manager(),\n )\n\n code, out, err = self._run_command(\n fip[\"ip\"], port, username, password, command=command)\n if code:\n raise exceptions.ScriptError(\n \"Error running command %(command)s. \"\n \"Error %(code)s: %(error)s\" % {\n \"command\": command, \"code\": code, \"error\": err})\n except (exceptions.TimeoutException,\n exceptions.SSHTimeout):\n console_logs = self._get_server_console_output(server,\n max_log_length)\n LOG.debug(\"VM console logs:\\n%s\" % console_logs)\n raise\n\n finally:\n self._delete_server_with_fip(server, fip,\n force_delete=force_delete)\n self._delete_share(share)\n\n self.add_output(complete={\n \"title\": \"Script StdOut\",\n \"chart_plugin\": \"TextArea\",\n \"data\": str(out).split(\"\\n\")\n })\n if err:\n self.add_output(complete={\n \"title\": \"Script StdErr\",\n \"chart_plugin\": \"TextArea\",\n \"data\": err.split(\"\\n\")\n })\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"ManilaShares.list_shares\", platform=\"openstack\")\nclass ListShares(utils.ManilaScenario):\n\n def run(self, detailed=True, search_opts=None):\n \"\"\"Basic scenario for 'share list' operation.\n\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"name\", \"host\", \"share_type\", etc.\n \"\"\"\n self._list_shares(detailed=detailed, search_opts=search_opts)\n\n\[email protected](\"enum\", param_name=\"share_proto\",\n values=[\"NFS\", \"CIFS\", \"GLUSTERFS\", \"HDFS\", \"CEPHFS\"],\n case_insensitive=True)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_and_extend_share\",\n platform=\"openstack\")\nclass CreateAndExtendShare(utils.ManilaScenario):\n def run(self, share_proto, size=1, new_size=2, snapshot_id=None,\n description=None, metadata=None, share_network=None,\n share_type=None, is_public=False, availability_zone=None,\n share_group_id=None):\n \"\"\"Create and extend a share\n\n :param share_proto: share protocol for new share\n available values are NFS, CIFS, CephFS, GlusterFS and HDFS.\n :param size: size in GiB\n :param new_size: new size of the share in GiB\n :param snapshot_id: ID of the snapshot\n :param description: description of a share\n :param metadata: optional metadata to set on share creation\n :param share_network: either instance of ShareNetwork or text with ID\n :param share_type: either instance of ShareType or text with ID\n :param is_public: whether to set share as public or not.\n :param availability_zone: availability zone of the share\n :param share_group_id: ID of the share group to which the share\n should belong\n \"\"\"\n share = self._create_share(\n share_proto=share_proto,\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n self._extend_share(share, new_size)\n\n\[email protected](\"enum\", param_name=\"share_proto\",\n values=[\"NFS\", \"CIFS\", \"GLUSTERFS\", \"HDFS\", \"CEPHFS\"],\n case_insensitive=True)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_and_shrink_share\",\n platform=\"openstack\")\nclass CreateAndShrinkShare(utils.ManilaScenario):\n def run(self, share_proto, size=2, new_size=1, snapshot_id=None,\n description=None, metadata=None, share_network=None,\n share_type=None, is_public=False, availability_zone=None,\n share_group_id=None):\n \"\"\"Create and shrink a share\n\n :param share_proto: share protocol for new share\n available values are NFS, CIFS, CephFS, GlusterFS and HDFS.\n :param size: size in GiB\n :param new_size: new size of the share in GiB\n :param snapshot_id: ID of the snapshot\n :param description: description of a share\n :param metadata: optional metadata to set on share creation\n :param share_network: either instance of ShareNetwork or text with ID\n :param share_type: either instance of ShareType or text with ID\n :param is_public: whether to set share as public or not.\n :param availability_zone: availability zone of the share\n :param share_group_id: ID of the share group to which the share\n should belong\n \"\"\"\n share = self._create_share(\n share_proto=share_proto,\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n self._shrink_share(share, new_size)\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_share_network_and_delete\",\n platform=\"openstack\")\nclass CreateShareNetworkAndDelete(utils.ManilaScenario):\n\n @logging.log_deprecated_args(\n \"The 'name' argument to create_and_delete_service will be ignored\",\n \"1.1.2\", [\"name\"], once=True)\n def run(self, neutron_net_id=None, neutron_subnet_id=None,\n nova_net_id=None, name=None, description=None):\n \"\"\"Creates share network and then deletes.\n\n :param neutron_net_id: ID of Neutron network\n :param neutron_subnet_id: ID of Neutron subnet\n :param nova_net_id: ID of Nova network\n :param description: share network description\n \"\"\"\n share_network = self._create_share_network(\n neutron_net_id=neutron_net_id,\n neutron_subnet_id=neutron_subnet_id,\n nova_net_id=nova_net_id,\n description=description,\n )\n self._delete_share_network(share_network)\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_share_network_and_list\",\n platform=\"openstack\")\nclass CreateShareNetworkAndList(utils.ManilaScenario):\n\n @logging.log_deprecated_args(\n \"The 'name' argument to create_and_delete_service will be ignored\",\n \"1.1.2\", [\"name\"], once=True)\n def run(self, neutron_net_id=None, neutron_subnet_id=None,\n nova_net_id=None, name=None, description=None,\n detailed=True, search_opts=None):\n \"\"\"Creates share network and then lists it.\n\n :param neutron_net_id: ID of Neutron network\n :param neutron_subnet_id: ID of Neutron subnet\n :param nova_net_id: ID of Nova network\n :param description: share network description\n :param detailed: defines either to return detailed list of\n objects or not.\n :param search_opts: container of search opts such as\n \"name\", \"nova_net_id\", \"neutron_net_id\", etc.\n \"\"\"\n self._create_share_network(\n neutron_net_id=neutron_net_id,\n neutron_subnet_id=neutron_subnet_id,\n nova_net_id=nova_net_id,\n description=description,\n )\n self._list_share_networks(\n detailed=detailed,\n search_opts=search_opts,\n )\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"ManilaShares.list_share_servers\",\n platform=\"openstack\")\nclass ListShareServers(utils.ManilaScenario):\n\n def run(self, search_opts=None):\n \"\"\"Lists share servers.\n\n Requires admin creds.\n\n :param search_opts: container of following search opts:\n \"host\", \"status\", \"share_network\" and \"project_id\".\n \"\"\"\n self._list_share_servers(search_opts=search_opts)\n\n\[email protected](\"enum\", param_name=\"share_proto\",\n values=[\"nfs\", \"cephfs\", \"cifs\", \"glusterfs\", \"hdfs\"],\n missed=False, case_insensitive=True)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_share_then_allow_and_deny_access\")\nclass CreateShareThenAllowAndDenyAccess(utils.ManilaScenario):\n def run(self, share_proto, access_type, access, access_level=\"rw\", size=1,\n snapshot_id=None, description=None, metadata=None,\n share_network=None, share_type=None, is_public=False,\n availability_zone=None, share_group_id=None):\n \"\"\"Create a share and allow and deny access to it\n\n :param share_proto: share protocol for new share\n available values are NFS, CIFS, CephFS, GlusterFS and HDFS.\n :param access_type: represents the access type (e.g: 'ip', 'domain'...)\n :param access: represents the object (e.g: '127.0.0.1'...)\n :param access_level: access level to the share (e.g: 'rw', 'ro')\n :param size: size in GiB\n :param new_size: new size of the share in GiB\n :param snapshot_id: ID of the snapshot\n :param description: description of a share\n :param metadata: optional metadata to set on share creation\n :param share_network: either instance of ShareNetwork or text with ID\n :param share_type: either instance of ShareType or text with ID\n :param is_public: whether to set share as public or not.\n :param availability_zone: availability zone of the share\n :param share_group_id: ID of the share group to which the share\n should belong\n \"\"\"\n share = self._create_share(\n share_proto=share_proto,\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n access_result = self._allow_access_share(share, access_type, access,\n access_level)\n self._deny_access_share(share, access_result[\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_security_service_and_delete\",\n platform=\"openstack\")\nclass CreateSecurityServiceAndDelete(utils.ManilaScenario):\n\n @logging.log_deprecated_args(\n \"The 'name' argument to create_and_delete_service will be ignored\",\n \"1.1.2\", [\"name\"], once=True)\n def run(self, security_service_type, dns_ip=None, server=None,\n domain=None, user=None, password=None,\n name=None, description=None):\n \"\"\"Creates security service and then deletes.\n\n :param security_service_type: security service type, permitted values\n are 'ldap', 'kerberos' or 'active_directory'.\n :param dns_ip: dns ip address used inside tenant's network\n :param server: security service server ip address or hostname\n :param domain: security service domain\n :param user: security identifier used by tenant\n :param password: password used by user\n :param description: security service description\n \"\"\"\n security_service = self._create_security_service(\n security_service_type=security_service_type,\n dns_ip=dns_ip,\n server=server,\n domain=domain,\n user=user,\n password=password,\n description=description,\n )\n self._delete_security_service(security_service)\n\n\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.attach_security_service_to_share_network\",\n platform=\"openstack\")\nclass AttachSecurityServiceToShareNetwork(utils.ManilaScenario):\n\n def run(self, security_service_type=\"ldap\"):\n \"\"\"Attaches security service to share network.\n\n :param security_service_type: type of security service to use.\n Should be one of following: 'ldap', 'kerberos' or\n 'active_directory'.\n \"\"\"\n sn = self._create_share_network()\n ss = self._create_security_service(\n security_service_type=security_service_type)\n self._add_security_service_to_share_network(sn, ss)\n\n\[email protected](\"enum\", param_name=\"share_proto\",\n values=[\"NFS\", \"CIFS\", \"GLUSTERFS\", \"HDFS\", \"CEPHFS\"],\n case_insensitive=True)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.create_and_list_share\",\n platform=\"openstack\")\nclass CreateAndListShare(utils.ManilaScenario):\n\n def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, detailed=True,\n **kwargs):\n \"\"\"Create a share and list all shares.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between share creation and list\n (of random duration from [min_sleep, max_sleep]).\n\n :param share_proto: share protocol, valid values are NFS, CIFS,\n GlusterFS and HDFS\n :param size: share size in GB, should be greater than 0\n :param min_sleep: minimum sleep time in seconds (non-negative)\n :param max_sleep: maximum sleep time in seconds (non-negative)\n :param detailed: defines whether to get detailed list of shares or not\n :param kwargs: optional args to create a share\n \"\"\"\n self._create_share(share_proto=share_proto, size=size, **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._list_shares(detailed=detailed)\n\n\[email protected](\"number\", param_name=\"sets\", minval=1, integer_only=True)\[email protected](\"number\", param_name=\"set_size\", minval=1, integer_only=True)\[email protected](\"number\", param_name=\"key_min_length\", minval=1, maxval=256,\n integer_only=True)\[email protected](\"number\", param_name=\"key_max_length\", minval=1, maxval=256,\n integer_only=True)\[email protected](\"number\", param_name=\"value_min_length\", minval=1, maxval=1024,\n integer_only=True)\[email protected](\"number\", param_name=\"value_max_length\", minval=1, maxval=1024,\n integer_only=True)\[email protected](\"required_services\", services=[consts.Service.MANILA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\",\n contexts=manila_consts.SHARES_CONTEXT_NAME)\[email protected](context={\"cleanup@openstack\": [\"manila\"]},\n name=\"ManilaShares.set_and_delete_metadata\",\n platform=\"openstack\")\nclass SetAndDeleteMetadata(utils.ManilaScenario):\n\n def run(self, sets=10, set_size=3, delete_size=3,\n key_min_length=1, key_max_length=256,\n value_min_length=1, value_max_length=1024):\n \"\"\"Sets and deletes share metadata.\n\n This requires a share to be created with the shares\n context. Additionally, ``sets * set_size`` must be greater\n than or equal to ``deletes * delete_size``.\n\n :param sets: how many set_metadata operations to perform\n :param set_size: number of metadata keys to set in each\n set_metadata operation\n :param delete_size: number of metadata keys to delete in each\n delete_metadata operation\n :param key_min_length: minimal size of metadata key to set\n :param key_max_length: maximum size of metadata key to set\n :param value_min_length: minimal size of metadata value to set\n :param value_max_length: maximum size of metadata value to set\n \"\"\"\n shares = self.context.get(\"tenant\", {}).get(\"shares\", [])\n share = shares[self.context[\"iteration\"] % len(shares)]\n\n keys = self._set_metadata(\n share=share,\n sets=sets,\n set_size=set_size,\n key_min_length=key_min_length,\n key_max_length=key_max_length,\n value_min_length=value_min_length,\n value_max_length=value_max_length)\n\n self._delete_metadata(share=share, keys=keys, delete_size=delete_size)\n" }, { "alpha_fraction": 0.5328983068466187, "alphanum_fraction": 0.5399673581123352, "avg_line_length": 30.169490814208984, "blob_id": "416706e0f8b446eff0ff09e68adf4053b50c7f1e", "content_id": "0c36c45ea386f7f795ff11fde94d5fb4d9caf74a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1839, "license_type": "permissive", "max_line_length": 78, "num_lines": 59, "path": "/rally_openstack/task/contexts/quotas/manila_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nclass ManilaQuotas(object):\n \"\"\"Management of Manila quotas.\"\"\"\n\n QUOTAS_SCHEMA = {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"shares\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"gigabytes\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"snapshots\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"snapshot_gigabytes\": {\n \"type\": \"integer\",\n \"minimum\": -1\n },\n \"share_networks\": {\n \"type\": \"integer\",\n \"minimum\": -1\n }\n }\n }\n\n def __init__(self, clients):\n self.clients = clients\n\n def update(self, tenant_id, **kwargs):\n self.clients.manila().quotas.update(tenant_id, **kwargs)\n\n def delete(self, tenant_id):\n self.clients.manila().quotas.delete(tenant_id)\n\n def get(self, tenant_id):\n response = self.clients.manila().quotas.get(tenant_id)\n return dict([(k, getattr(response, k))\n for k in self.QUOTAS_SCHEMA[\"properties\"]])\n" }, { "alpha_fraction": 0.5554447174072266, "alphanum_fraction": 0.5595586895942688, "avg_line_length": 39.10749816894531, "blob_id": "ad1dcfb8ba96f66b1da3af280618ce11e4589188", "content_id": "f6543b9a98b89ff8a38bfc472ed855287457004f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16043, "license_type": "permissive", "max_line_length": 79, "num_lines": 400, "path": "/tests/unit/task/contexts/manila/test_manila_share_networks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.contexts.manila import manila_share_networks\nfrom tests.unit import test\n\nMANILA_UTILS_PATH = (\n \"rally_openstack.task.scenarios.manila.utils.ManilaScenario.\")\n\nMOCK_USER_CREDENTIAL = mock.MagicMock()\n\n\nclass Fake(object):\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def to_dict(self):\n return self.__dict__\n\n\[email protected]\nclass ShareNetworksTestCase(test.TestCase):\n TENANTS_AMOUNT = 3\n USERS_PER_TENANT = 4\n SECURITY_SERVICES = [\n {\"type\": ss_type,\n \"dns_ip\": \"fake_dns_ip_%s\" % ss_type,\n \"server\": \"fake_server_%s\" % ss_type,\n \"domain\": \"fake_domain_%s\" % ss_type,\n \"user\": \"fake_user_%s\" % ss_type,\n \"password\": \"fake_password_%s\" % ss_type,\n \"name\": \"fake_optional_name_%s\" % ss_type}\n for ss_type in (\"ldap\", \"kerberos\", \"active_directory\")\n ]\n\n def _get_context(self, use_security_services=False, networks_per_tenant=2,\n neutron_network_provider=True):\n tenants = {}\n for t_id in range(self.TENANTS_AMOUNT):\n tenants[str(t_id)] = {\"name\": str(t_id)}\n tenants[str(t_id)][\"networks\"] = []\n for i in range(networks_per_tenant):\n network = {\"id\": \"fake_net_id_%s\" % i}\n if neutron_network_provider:\n network[\"subnets\"] = [\"fake_subnet_id_of_net_%s\" % i]\n else:\n network[\"cidr\"] = \"101.0.5.0/24\"\n tenants[str(t_id)][\"networks\"].append(network)\n users = []\n for t_id in tenants.keys():\n for i in range(self.USERS_PER_TENANT):\n users.append({\n \"id\": i, \"tenant_id\": t_id,\n \"credential\": MOCK_USER_CREDENTIAL})\n context = {\n \"config\": {\n \"users\": {\n \"tenants\": self.TENANTS_AMOUNT,\n \"users_per_tenant\": self.USERS_PER_TENANT,\n \"random_user_choice\": False,\n },\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"use_share_networks\": True,\n \"share_networks\": [],\n },\n consts.SECURITY_SERVICES_CONTEXT_NAME: {\n \"security_services\": (\n self.SECURITY_SERVICES\n if use_security_services else [])\n },\n \"network\": {\n \"networks_per_tenant\": networks_per_tenant,\n \"start_cidr\": \"101.0.5.0/24\",\n },\n },\n \"admin\": {\n \"credential\": mock.MagicMock(),\n },\n \"task\": mock.MagicMock(),\n \"users\": users,\n \"tenants\": tenants,\n \"user_choice_method\": \"random\",\n }\n return context\n\n def setUp(self):\n super(self.__class__, self).setUp()\n self.ctxt_use_existing = {\n \"task\": mock.MagicMock(),\n \"config\": {\n \"existing_users\": {\"foo\": \"bar\"},\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"use_share_networks\": True,\n \"share_networks\": {\n \"tenant_1_id\": [\"sn_1_id\", \"sn_2_name\"],\n \"tenant_2_name\": [\"sn_3_id\", \"sn_4_name\", \"sn_5_id\"],\n },\n },\n },\n \"tenants\": {\n \"tenant_1_id\": {\"id\": \"tenant_1_id\", \"name\": \"tenant_1_name\"},\n \"tenant_2_id\": {\"id\": \"tenant_2_id\", \"name\": \"tenant_2_name\"},\n },\n \"users\": [\n {\"tenant_id\": \"tenant_1_id\", \"credential\": mock.MagicMock()},\n {\"tenant_id\": \"tenant_2_id\", \"credential\": mock.MagicMock()},\n ],\n }\n self.existing_sns = [\n Fake(id=\"sn_%s_id\" % i, name=\"sn_%s_name\" % i) for i in range(1, 6)\n ]\n\n def test_init(self):\n context = {\n \"task\": mock.MagicMock(),\n \"config\": {\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\"foo\": \"bar\"},\n \"not_manila\": {\"not_manila_key\": \"not_manila_value\"},\n },\n }\n\n inst = manila_share_networks.ShareNetworks(context)\n\n self.assertEqual(\n {\"foo\": \"bar\", \"share_networks\": {}, \"use_share_networks\": False},\n inst.config)\n\n def test_setup_share_networks_disabled(self):\n ctxt = {\n \"task\": mock.MagicMock(),\n \"config\": {\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"use_share_networks\": False,\n },\n },\n consts.SHARE_NETWORKS_CONTEXT_NAME: {},\n }\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n expected_ctxt = copy.deepcopy(inst.context)\n\n inst.setup()\n\n self.assertEqual(expected_ctxt, inst.context)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_list_share_networks\")\n def test_setup_use_existing_share_networks(\n self, mock_manila_scenario__list_share_networks, mock_clients):\n existing_sns = self.existing_sns\n expected_ctxt = copy.deepcopy(self.ctxt_use_existing)\n inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing)\n mock_manila_scenario__list_share_networks.return_value = (\n self.existing_sns)\n expected_ctxt.update({\n \"delete_share_networks\": False,\n \"tenants\": {\n \"tenant_1_id\": {\n \"id\": \"tenant_1_id\",\n \"name\": \"tenant_1_name\",\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"share_networks\": [\n sn.to_dict() for sn in existing_sns[0:2]],\n },\n },\n \"tenant_2_id\": {\n \"id\": \"tenant_2_id\",\n \"name\": \"tenant_2_name\",\n consts.SHARE_NETWORKS_CONTEXT_NAME: {\n \"share_networks\": [\n sn.to_dict() for sn in existing_sns[2:5]],\n },\n },\n }\n })\n\n inst.setup()\n\n self.assertEqual(expected_ctxt[\"task\"], inst.context.get(\"task\"))\n self.assertEqual(expected_ctxt[\"config\"], inst.context.get(\"config\"))\n self.assertEqual(expected_ctxt[\"users\"], inst.context.get(\"users\"))\n self.assertFalse(\n inst.context.get(consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get(\n \"delete_share_networks\"))\n self.assertEqual(expected_ctxt[\"tenants\"], inst.context.get(\"tenants\"))\n\n def test_setup_use_existing_share_networks_tenant_not_found(self):\n ctxt = copy.deepcopy(self.ctxt_use_existing)\n ctxt.update({\"tenants\": {}})\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n self.assertRaises(exceptions.ContextSetupFailure, inst.setup)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_list_share_networks\")\n def test_setup_use_existing_share_networks_sn_not_found(\n self, mock_manila_scenario__list_share_networks, mock_clients):\n ctxt = copy.deepcopy(self.ctxt_use_existing)\n ctxt[\"config\"][consts.SHARE_NETWORKS_CONTEXT_NAME][\n \"share_networks\"] = {\"tenant_1_id\": [\"foo\"]}\n inst = manila_share_networks.ShareNetworks(ctxt)\n mock_manila_scenario__list_share_networks.return_value = (\n self.existing_sns)\n\n self.assertRaises(exceptions.ContextSetupFailure, inst.setup)\n\n def test_setup_use_existing_share_networks_with_empty_list(self):\n ctxt = copy.deepcopy(self.ctxt_use_existing)\n ctxt[\"config\"][consts.SHARE_NETWORKS_CONTEXT_NAME][\n \"share_networks\"] = {}\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n self.assertRaises(exceptions.ContextSetupFailure, inst.setup)\n\n @ddt.data(True, False)\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_create_share_network\")\n @mock.patch(MANILA_UTILS_PATH + \"_add_security_service_to_share_network\")\n def test_setup_autocreate_share_networks_with_security_services(\n self,\n neutron,\n mock_manila_scenario__add_security_service_to_share_network,\n mock_manila_scenario__create_share_network,\n mock_clients):\n networks_per_tenant = 2\n ctxt = self._get_context(\n networks_per_tenant=networks_per_tenant,\n neutron_network_provider=neutron,\n use_security_services=True,\n )\n inst = manila_share_networks.ShareNetworks(ctxt)\n for tenant_id in list(ctxt[\"tenants\"].keys()):\n inst.context[\"tenants\"][tenant_id][\n consts.SECURITY_SERVICES_CONTEXT_NAME] = {\n \"security_services\": [\n Fake(id=\"fake_id\").to_dict() for i in (1, 2, 3)\n ]\n }\n\n inst.setup()\n\n self.assertEqual(ctxt[\"task\"], inst.context.get(\"task\"))\n self.assertEqual(ctxt[\"config\"], inst.context.get(\"config\"))\n self.assertEqual(ctxt[\"users\"], inst.context.get(\"users\"))\n self.assertEqual(ctxt[\"tenants\"], inst.context.get(\"tenants\"))\n mock_add_security_service_to_share_network = (\n mock_manila_scenario__add_security_service_to_share_network)\n mock_add_security_service_to_share_network.assert_has_calls([\n mock.call(mock.ANY, mock.ANY)\n for _ in range(\n self.TENANTS_AMOUNT * networks_per_tenant\n * len(self.SECURITY_SERVICES))])\n if neutron:\n sn_args = {\n \"neutron_net_id\": mock.ANY,\n \"neutron_subnet_id\": mock.ANY,\n }\n else:\n sn_args = {\"nova_net_id\": mock.ANY}\n expected_calls = [\n mock.call(**sn_args),\n mock.call().to_dict(),\n mock.ANY,\n mock.ANY,\n mock.ANY,\n ]\n mock_manila_scenario__create_share_network.assert_has_calls(\n expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant))\n mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL)\n for i in range(self.TENANTS_AMOUNT)])\n\n @ddt.data(True, False)\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_create_share_network\")\n @mock.patch(MANILA_UTILS_PATH + \"_add_security_service_to_share_network\")\n def test_setup_autocreate_share_networks_wo_security_services(\n self,\n neutron,\n mock_manila_scenario__add_security_service_to_share_network,\n mock_manila_scenario__create_share_network,\n mock_clients):\n networks_per_tenant = 2\n ctxt = self._get_context(\n networks_per_tenant=networks_per_tenant,\n neutron_network_provider=neutron,\n )\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n inst.setup()\n\n self.assertEqual(ctxt[\"task\"], inst.context.get(\"task\"))\n self.assertEqual(ctxt[\"config\"], inst.context.get(\"config\"))\n self.assertEqual(ctxt[\"users\"], inst.context.get(\"users\"))\n self.assertEqual(ctxt[\"tenants\"], inst.context.get(\"tenants\"))\n self.assertFalse(\n mock_manila_scenario__add_security_service_to_share_network.called)\n if neutron:\n sn_args = {\n \"neutron_net_id\": mock.ANY,\n \"neutron_subnet_id\": mock.ANY,\n }\n else:\n sn_args = {\"nova_net_id\": mock.ANY}\n expected_calls = [mock.call(**sn_args), mock.call().to_dict()]\n mock_manila_scenario__create_share_network.assert_has_calls(\n expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant))\n mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL)\n for i in range(self.TENANTS_AMOUNT)])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_create_share_network\")\n @mock.patch(MANILA_UTILS_PATH + \"_add_security_service_to_share_network\")\n def test_setup_autocreate_share_networks_wo_networks(\n self,\n mock_manila_scenario__add_security_service_to_share_network,\n mock_manila_scenario__create_share_network,\n mock_clients):\n ctxt = self._get_context(networks_per_tenant=0)\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n inst.setup()\n\n self.assertEqual(ctxt[\"task\"], inst.context.get(\"task\"))\n self.assertEqual(ctxt[\"config\"], inst.context.get(\"config\"))\n self.assertEqual(ctxt[\"users\"], inst.context.get(\"users\"))\n self.assertEqual(ctxt[\"tenants\"], inst.context.get(\"tenants\"))\n self.assertFalse(\n mock_manila_scenario__add_security_service_to_share_network.called)\n expected_calls = [mock.call(), mock.call().to_dict()]\n mock_manila_scenario__create_share_network.assert_has_calls(\n expected_calls * self.TENANTS_AMOUNT)\n mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL)\n for i in range(self.TENANTS_AMOUNT)])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(MANILA_UTILS_PATH + \"_delete_share_network\")\n @mock.patch(MANILA_UTILS_PATH + \"_list_share_servers\")\n @mock.patch(MANILA_UTILS_PATH + \"_list_share_networks\")\n def test_cleanup_used_existing_share_networks(\n self,\n mock_manila_scenario__list_share_networks,\n mock_manila_scenario__list_share_servers,\n mock_manila_scenario__delete_share_network,\n mock_clients):\n inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing)\n mock_manila_scenario__list_share_networks.return_value = (\n self.existing_sns)\n inst.setup()\n\n inst.cleanup()\n\n self.assertFalse(mock_manila_scenario__list_share_servers.called)\n self.assertFalse(mock_manila_scenario__delete_share_network.called)\n self.assertEqual(2, mock_clients.call_count)\n for user in self.ctxt_use_existing[\"users\"]:\n self.assertIn(mock.call(user[\"credential\"]),\n mock_clients.mock_calls)\n\n @mock.patch(\"rally_openstack.task.contexts.manila.manila_share_networks.\"\n \"resource_manager.cleanup\")\n def test_cleanup_autocreated_share_networks(self, mock_cleanup):\n task_id = \"task\"\n ctxt = {\n \"config\": {\"manila_share_networks\": {\n \"use_share_networks\": True}},\n \"users\": [mock.Mock()],\n \"task\": {\"uuid\": task_id}}\n\n inst = manila_share_networks.ShareNetworks(ctxt)\n\n inst.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"manila.share_networks\"],\n users=ctxt[\"users\"],\n superclass=manila_share_networks.ShareNetworks,\n task_id=task_id)\n" }, { "alpha_fraction": 0.6159299612045288, "alphanum_fraction": 0.6209341287612915, "avg_line_length": 40.344825744628906, "blob_id": "043d3ce0031508fecf3782491f95466be35f9cc0", "content_id": "c89212474ba7e63beb719f7d6fad5bc3328e0563", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2398, "license_type": "permissive", "max_line_length": 79, "num_lines": 58, "path": "/rally_openstack/task/contexts/murano/murano_environments.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.murano import utils as murano_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"murano_environments\", platform=\"openstack\", order=402)\nclass EnvironmentGenerator(context.OpenStackContext):\n \"\"\"Context class for creating murano environments.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"environments_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n },\n \"required\": [\"environments_per_tenant\"],\n \"additionalProperties\": False\n }\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants():\n self.context[\"tenants\"][tenant_id][\"environments\"] = []\n for i in range(self.config[\"environments_per_tenant\"]):\n murano_util = murano_utils.MuranoScenario(\n {\"user\": user,\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"],\n \"config\": self.context[\"config\"]})\n env = murano_util._create_environment()\n self.context[\"tenants\"][tenant_id][\"environments\"].append(env)\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"murano.environments\"],\n users=self.context.get(\"users\", []),\n superclass=murano_utils.MuranoScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.6222086548805237, "alphanum_fraction": 0.6243499517440796, "avg_line_length": 41.18064498901367, "blob_id": "a85b0f27c06a565d543737d083625d47699e99b7", "content_id": "780b7cb8b330903bf70614a629e78f3f7f367b7d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6538, "license_type": "permissive", "max_line_length": 78, "num_lines": 155, "path": "/tests/unit/common/services/barbican/test_secrets.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common.services.key_manager import barbican\nfrom tests.unit import test\n\n\nclass BarbicanServiceTestCase(test.TestCase):\n def setUp(self):\n super(BarbicanServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.service = barbican.BarbicanService(\n self.clients,\n name_generator=self.name_generator)\n\n def atomic_actions(self):\n return self.service._atomic_actions\n\n def test__list_secrets(self):\n self.assertEqual(\n self.service.list_secrets(),\n self.service._clients.barbican().secrets.list.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"barbican.list_secrets\")\n\n def test__create_secret(self):\n self.assertEqual(\n self.service.create_secret(),\n self.service._clients.barbican().secrets.create(\n name=\"fake_secret\", payload=\"rally_data\")\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"barbican.create_secret\")\n\n def test__get_secret(self):\n self.service.get_secret(\"fake_secret\")\n self.service._clients.barbican().secrets.get \\\n .assert_called_once_with(\"fake_secret\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"barbican.get_secret\")\n\n def test__delete_secret(self):\n self.service.delete_secret(\"fake_secret\")\n self.service._clients.barbican().secrets.delete \\\n .assert_called_once_with(\"fake_secret\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"barbican.delete_secret\")\n\n def test__list_containers(self):\n self.assertEqual(\n self.service.list_container(),\n self.service._clients.barbican().containers.list.return_value)\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.list_container\")\n\n def test__container_delete(self):\n self.service.container_delete(\"fake_container\")\n self.service._clients.barbican().containers.delete \\\n .assert_called_once_with(\"fake_container\")\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.container_delete\")\n\n def test__container_create(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"container\")\n self.service.container_create()\n self.service._clients.barbican().containers.create \\\n .assert_called_once_with(name=\"container\", secrets=None)\n\n def test__create_rsa_container(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"container\")\n self.service.create_rsa_container()\n self.service._clients.barbican().containers.create_rsa \\\n .assert_called_once_with(\n name=\"container\", private_key=None,\n private_key_passphrase=None, public_key=None)\n\n def test__create_generate_container(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"container\")\n self.service.create_certificate_container()\n self.service._clients.barbican().containers \\\n .create_certificate.assert_called_once_with(\n certificate=None, intermediates=None,\n name=\"container\", private_key=None,\n private_key_passphrase=None)\n\n def test__list_orders(self):\n self.assertEqual(\n self.service.orders_list(),\n self.service._clients.barbican().orders.list.return_value)\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.orders_list\")\n\n def test__orders_get(self):\n self.service.orders_get(\"fake_order\")\n self.service._clients.barbican().orders.get \\\n .assert_called_once_with(\"fake_order\")\n\n def test__orders_delete(self):\n self.service.orders_delete(\"fake_order\")\n self.service._clients.barbican().orders.delete \\\n .assert_called_once_with(\"fake_order\")\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.orders_delete\")\n\n def test__create_key(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"key\")\n self.service.create_key()\n self.service._clients.barbican().orders.create_key \\\n .assert_called_once_with(\n name=\"key\", algorithm=\"aes\", bit_length=256, mode=None,\n payload_content_type=None, expiration=None)\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.create_key\")\n\n def test__create_asymmetric(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"key\")\n self.service.create_asymmetric()\n self.service._clients.barbican().orders.create_asymmetric \\\n .assert_called_once_with(\n algorithm=\"aes\", bit_length=256, expiration=None, name=\"key\",\n pass_phrase=None, payload_content_type=None)\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.create_asymmetric\")\n\n def test_create_certificate(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"key\")\n self.service.create_certificate()\n self.service._clients.barbican().orders.create_certificate \\\n .assert_called_once_with(\n name=\"key\", request_type=None, subject_dn=None,\n source_container_ref=None, ca_id=None, profile=None,\n request_data=None)\n self._test_atomic_action_timer(\n self.atomic_actions(), \"barbican.create_certificate\")\n" }, { "alpha_fraction": 0.5700796842575073, "alphanum_fraction": 0.5748008489608765, "avg_line_length": 33.581634521484375, "blob_id": "5d5ad5b5aa4036d64461765cabc644f22125779d", "content_id": "3af19c6e818d8fc832b154e30d87e54ec0dd8a04", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3389, "license_type": "permissive", "max_line_length": 78, "num_lines": 98, "path": "/tests/unit/task/contexts/network/test_routers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Orange\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.network import routers as router_context\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\n\nfrom tests.unit import test\n\nSCN = \"rally_openstack.task.scenarios\"\nCTX = \"rally_openstack.task.contexts.network.routers\"\n\n\nclass RouterTestCase(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n return tenants\n\n def test__init__default(self):\n self.context.update({\n \"config\": {\n \"router\": {\n \"routers_per_tenant\": 1,\n }\n }\n })\n context = router_context.Router(self.context)\n self.assertEqual(context.config[\"routers_per_tenant\"], 1)\n\n @mock.patch(\"%s.neutron.utils.NeutronScenario._create_router\" % SCN,\n return_value={\"id\": \"uuid\"})\n def test_setup(self, mock_neutron_scenario__create_router):\n tenants_count = 2\n users_per_tenant = 3\n routers_per_tenant = 2\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 3,\n \"concurrent\": 2,\n },\n \"router\": {\n \"routers_per_tenant\": routers_per_tenant,\n }\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n new_context = copy.deepcopy(self.context)\n for id_ in tenants.keys():\n new_context[\"tenants\"][id_].setdefault(\"routers\", [])\n for i in range(routers_per_tenant):\n new_context[\"tenants\"][id_][\"routers\"].append({\"id\": \"uuid\"})\n\n routers_ctx = router_context.Router(self.context)\n routers_ctx.setup()\n self.assertEqual(new_context, self.context)\n\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n self.context.update({\"users\": mock.MagicMock()})\n routers_ctx = router_context.Router(self.context)\n routers_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"neutron.router\"],\n users=self.context[\"users\"],\n superclass=neutron_utils.NeutronScenario,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.5496119856834412, "alphanum_fraction": 0.5526607632637024, "avg_line_length": 39.53932571411133, "blob_id": "6410d70185d90566ecefd646a4113453604ec962", "content_id": "55d409e7ba81859d1055e26e09181b339c1947a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3608, "license_type": "permissive", "max_line_length": 79, "num_lines": 89, "path": "/rally_openstack/task/contexts/manila/manila_security_services.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts as rally_consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.scenarios.manila import utils as manila_utils\n\n\nCONF = cfg.CONF\nCONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=CONTEXT_NAME, platform=\"openstack\", order=445)\nclass SecurityServices(context.OpenStackContext):\n \"\"\"This context creates 'security services' for Manila project.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": rally_consts.JSON_SCHEMA,\n \"properties\": {\n \"security_services\": {\n \"type\": \"array\",\n \"description\":\n \"It is expected to be list of dicts with data for creation\"\n \" of security services.\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\"type\": {\"enum\": [\"active_directory\",\n \"kerberos\", \"ldap\"]}},\n \"required\": [\"type\"],\n \"additionalProperties\": True,\n \"description\":\n \"Data for creation of security services. \\n \"\n \"Example:\\n\\n\"\n \" .. code-block:: json\\n\\n\"\n \" {'type': 'LDAP', 'dns_ip': 'foo_ip', \\n\"\n \" 'server': 'bar_ip', 'domain': 'quuz_domain',\\n\"\n \" 'user': 'ololo', 'password': 'fake_password'}\\n\"\n }\n },\n },\n \"additionalProperties\": False\n }\n DEFAULT_CONFIG = {\n \"security_services\": [],\n }\n\n def setup(self):\n for user, tenant_id in (self._iterate_per_tenants(\n self.context.get(\"users\", []))):\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME] = {\n \"security_services\": [],\n }\n if self.config[\"security_services\"]:\n manila_scenario = manila_utils.ManilaScenario({\n \"task\": self.task,\n \"owner_id\": self.context[\"owner_id\"],\n \"user\": user\n })\n for ss in self.config[\"security_services\"]:\n inst = manila_scenario._create_security_service(\n **ss).to_dict()\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME][\n \"security_services\"].append(inst)\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\"manila.security_services\"],\n users=self.context.get(\"users\", []),\n superclass=manila_utils.ManilaScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5578015446662903, "alphanum_fraction": 0.5650206804275513, "avg_line_length": 41.929752349853516, "blob_id": "1f2163dd913321eedecd21447e06985621157c82", "content_id": "ae5809d1d2be70cf9529f76551bcf6e1b6bb4916", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10389, "license_type": "permissive", "max_line_length": 78, "num_lines": 242, "path": "/tests/unit/task/scenarios/glance/test_images.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.glance import images\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.glance.images\"\nGLANCE_V2_PATH = (\"rally_openstack.common.services.image.glance_v2.\"\n \"GlanceV2Service\")\n\n\nclass GlanceBasicTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(GlanceBasicTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant_id\",\n \"name\": \"fake_tenant_name\"}\n })\n return context\n\n def setUp(self):\n super(GlanceBasicTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.image.image.Image\")\n self.addCleanup(patch.stop)\n self.mock_image = patch.start()\n\n def test_create_and_list_image(self):\n image_service = self.mock_image.return_value\n fake_image = mock.Mock(id=1, name=\"img_2\")\n image_service.create_image.return_value = fake_image\n image_service.list_images.return_value = [\n mock.Mock(id=0, name=\"img_1\"),\n fake_image,\n mock.Mock(id=2, name=\"img_3\")]\n properties = {\"fakeprop\": \"fake\"}\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n # Positive case\n images.CreateAndListImage(self.context).run(\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_once_with(**call_args)\n\n # Negative case: image isn't created\n image_service.create_image.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n images.CreateAndListImage(self.context).run,\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_with(**call_args)\n\n # Negative case: created image n ot in the list of available images\n image_service.create_image.return_value = mock.Mock(\n id=12, name=\"img_nameN\")\n self.assertRaises(exceptions.RallyAssertionError,\n images.CreateAndListImage(self.context).run,\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_with(**call_args)\n image_service.list_images.assert_called_with()\n\n def test_list_images(self):\n image_service = self.mock_image.return_value\n\n images.ListImages(self.context).run()\n image_service.list_images.assert_called_once_with()\n\n def test_create_and_delete_image(self):\n image_service = self.mock_image.return_value\n\n fake_image = fakes.FakeImage(id=1, name=\"imagexxx\")\n image_service.create_image.return_value = fake_image\n properties = {\"fakeprop\": \"fake\"}\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n images.CreateAndDeleteImage(self.context).run(\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n\n image_service.create_image.assert_called_once_with(**call_args)\n image_service.delete_image.assert_called_once_with(fake_image.id)\n\n def test_create_and_get_image(self):\n image_service = self.mock_image.return_value\n\n fake_image = fakes.FakeImage(id=1, name=\"img_name1\")\n image_service.create_image.return_value = fake_image\n fake_image_info = fakes.FakeImage(id=1, name=\"img_name1\",\n status=\"active\")\n image_service.get_image.return_value = fake_image_info\n properties = {\"fakeprop\": \"fake\"}\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n # Positive case\n images.CreateAndGetImage(self.context).run(\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_once_with(**call_args)\n image_service.get_image.assert_called_once_with(fake_image)\n\n # Negative case: image isn't created\n image_service.create_image.reset_mock()\n image_service.create_image.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n images.CreateAndGetImage(self.context).run,\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_with(**call_args)\n\n # Negative case: image obtained in _get_image not the created image\n image_service.create_image.reset_mock()\n image_service.get_image.reset_mock()\n image_service.create_image.return_value = fakes.FakeImage(\n id=12, name=\"img_nameN\")\n self.assertRaises(exceptions.RallyAssertionError,\n images.CreateAndGetImage(self.context).run,\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties)\n image_service.create_image.assert_called_with(**call_args)\n image_service.get_image.assert_called_with(\n image_service.create_image.return_value)\n\n def test_create_and_download_image(self):\n image_service = self.mock_image.return_value\n\n fake_image = fakes.FakeImage()\n image_service.create_image.return_value = fake_image\n properties = {\"fakeprop\": \"fake\"}\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n images.CreateAndDownloadImage(self.context).run(\n \"cf\", \"url\", \"df\", \"vs\", 0, 0, properties=properties)\n\n image_service.create_image.assert_called_once_with(**call_args)\n image_service.download_image.assert_called_once_with(fake_image.id)\n\n @mock.patch(\"%s.CreateImageAndBootInstances._boot_servers\" % BASE)\n def test_create_image_and_boot_instances(self, mock_boot_servers):\n image_service = self.mock_image.return_value\n\n fake_image = fakes.FakeImage()\n fake_servers = [mock.Mock() for i in range(5)]\n image_service.create_image.return_value = fake_image\n mock_boot_servers.return_value = fake_servers\n boot_server_kwargs = {\"fakeserverarg\": \"f\"}\n properties = {\"fakeprop\": \"fake\"}\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n images.CreateImageAndBootInstances(self.context).run(\n \"cf\", \"url\", \"df\", \"fid\", 5, visibility=\"vs\", min_disk=0,\n min_ram=0, properties=properties,\n boot_server_kwargs=boot_server_kwargs)\n image_service.create_image.assert_called_once_with(**call_args)\n mock_boot_servers.assert_called_once_with(\"image-id-0\", \"fid\",\n 5, **boot_server_kwargs)\n\n def test_create_and_update_image(self):\n image_service = self.mock_image.return_value\n\n fake_image = fakes.FakeImage(id=1, name=\"imagexxx\")\n image_service.create_image.return_value = fake_image\n properties = {\"fakeprop\": \"fake\"}\n create_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0,\n \"properties\": properties}\n\n images.CreateAndUpdateImage(self.context).run(\n \"cf\", \"url\", \"df\", None, \"vs\", 0, 0, properties, 0, 0)\n\n image_service.create_image.assert_called_once_with(**create_args)\n image_service.update_image.assert_called_once_with(\n fake_image.id, min_disk=0, min_ram=0, remove_props=None)\n\n @mock.patch(\"%s.create_image\" % GLANCE_V2_PATH)\n @mock.patch(\"%s.deactivate_image\" % GLANCE_V2_PATH)\n def test_create_and_deactivate_image(self, mock_deactivate_image,\n mock_create_image):\n fake_image = fakes.FakeImage(id=1, name=\"img_name1\")\n mock_create_image.return_value = fake_image\n call_args = {\"container_format\": \"cf\",\n \"image_location\": \"url\",\n \"disk_format\": \"df\",\n \"visibility\": \"vs\",\n \"min_disk\": 0,\n \"min_ram\": 0}\n\n images.CreateAndDeactivateImage(self.context).run(\n \"cf\", \"url\", \"df\", \"vs\", 0, 0)\n mock_create_image.assert_called_once_with(**call_args)\n mock_deactivate_image.assert_called_once_with(fake_image.id)\n" }, { "alpha_fraction": 0.582442045211792, "alphanum_fraction": 0.5984848737716675, "avg_line_length": 37.35897445678711, "blob_id": "d718417901c033b3d493d4ed5db82a604c8809f1", "content_id": "d56271737225fabf3c6156533d68908e328af8cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4488, "license_type": "permissive", "max_line_length": 78, "num_lines": 117, "path": "/tests/unit/task/scenarios/quotas/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Kylin Cloud\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.quotas import utils\nfrom tests.unit import test\n\n\nclass QuotasScenarioTestCase(test.ScenarioTestCase):\n\n def test__update_quotas(self):\n tenant_id = \"fake_tenant\"\n quotas = {\n \"metadata_items\": 10,\n \"key_pairs\": 10,\n \"injected_file_content_bytes\": 1024,\n \"injected_file_path_bytes\": 1024,\n \"ram\": 5120,\n \"instances\": 10,\n \"injected_files\": 10,\n \"cores\": 10,\n }\n self.admin_clients(\"nova\").quotas.update.return_value = quotas\n scenario = utils.QuotasScenario(self.context)\n scenario._generate_quota_values = mock.MagicMock(return_value=quotas)\n\n result = scenario._update_quotas(\"nova\", tenant_id)\n\n self.assertEqual(quotas, result)\n self.admin_clients(\"nova\").quotas.update.assert_called_once_with(\n tenant_id, **quotas)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"quotas.update_quotas\")\n\n def test__update_quotas_fn(self):\n tenant_id = \"fake_tenant\"\n quotas = {\n \"metadata_items\": 10,\n \"key_pairs\": 10,\n \"injected_file_content_bytes\": 1024,\n \"injected_file_path_bytes\": 1024,\n \"ram\": 5120,\n \"instances\": 10,\n \"injected_files\": 10,\n \"cores\": 10,\n }\n self.admin_clients(\"nova\").quotas.update.return_value = quotas\n scenario = utils.QuotasScenario(self.context)\n scenario._generate_quota_values = mock.MagicMock(return_value=quotas)\n\n mock_quota = mock.Mock(return_value=quotas)\n\n result = scenario._update_quotas(\"nova\", tenant_id,\n quota_update_fn=mock_quota)\n\n self.assertEqual(quotas, result)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"quotas.update_quotas\")\n\n def test__generate_quota_values_nova(self):\n max_quota = 1024\n scenario = utils.QuotasScenario(self.context)\n quotas = scenario._generate_quota_values(max_quota, \"nova\")\n for k, v in quotas.items():\n self.assertGreaterEqual(v, -1)\n self.assertLessEqual(v, max_quota)\n\n def test__generate_quota_values_cinder(self):\n max_quota = 1024\n scenario = utils.QuotasScenario(self.context)\n quotas = scenario._generate_quota_values(max_quota, \"cinder\")\n for k, v in quotas.items():\n self.assertGreaterEqual(v, -1)\n self.assertLessEqual(v, max_quota)\n\n def test__generate_quota_values_neutron(self):\n max_quota = 1024\n scenario = utils.QuotasScenario(self.context)\n quotas = scenario._generate_quota_values(max_quota, \"neutron\")\n for v in quotas.values():\n for v1 in v.values():\n for v2 in v1.values():\n self.assertGreaterEqual(v2, -1)\n self.assertLessEqual(v2, max_quota)\n\n def test__delete_quotas(self):\n tenant_id = \"fake_tenant\"\n scenario = utils.QuotasScenario(self.context)\n scenario._delete_quotas(\"nova\", tenant_id)\n\n self.admin_clients(\"nova\").quotas.delete.assert_called_once_with(\n tenant_id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"quotas.delete_quotas\")\n\n def test__get_quotas(self):\n tenant_id = \"fake_tenant\"\n scenario = utils.QuotasScenario(self.context)\n scenario._get_quotas(\"nova\", tenant_id)\n\n self.admin_clients(\"nova\").quotas.get.assert_called_once_with(\n tenant_id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"quotas.get_quotas\")\n" }, { "alpha_fraction": 0.63559889793396, "alphanum_fraction": 0.6463660597801208, "avg_line_length": 39.16216278076172, "blob_id": "81ebefa41cd2eee33f67e67de5192eb35e538b56", "content_id": "8dc8d45b68b58e7e23a34df0ed93a401a8b3a6d7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2972, "license_type": "permissive", "max_line_length": 78, "num_lines": 74, "path": "/tests/unit/task/scenarios/gnocchi/test_archive_policy.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.gnocchi import archive_policy\nfrom tests.unit import test\n\n\nclass GnocchiArchivePolicyTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(GnocchiArchivePolicyTestCase,\n self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(GnocchiArchivePolicyTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.gnocchi.metric.GnocchiService\")\n self.addCleanup(patch.stop)\n self.mock_metric = patch.start()\n\n def test_list_archive_policy(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy.ListArchivePolicy(self.context)\n scenario.run()\n metric_service.list_archive_policy.assert_called_once_with()\n\n def test_create_archive_policy(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy.CreateArchivePolicy(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n attrs = [{\"foo\": \"1:23:24\", \"bar\": \"5:43:21\"}]\n aggreg = [\"foo1\", \"foo2\"]\n\n scenario.run(definition=attrs, aggregation_methods=aggreg)\n metric_service.create_archive_policy.assert_called_once_with(\n \"name\", definition=attrs, aggregation_methods=aggreg)\n\n def test_create_delete_archive_policy(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy.CreateDeleteArchivePolicy(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n attrs = [{\"foo\": \"2:34:55\", \"bar\": \"4:32:10\"}]\n aggreg = [\"foo3\", \"foo4\"]\n\n scenario.run(definition=attrs, aggregation_methods=aggreg)\n metric_service.create_archive_policy.assert_called_once_with(\n \"name\", definition=attrs, aggregation_methods=aggreg)\n metric_service.delete_archive_policy.assert_called_once_with(\n \"name\")\n" }, { "alpha_fraction": 0.5970661640167236, "alphanum_fraction": 0.6008114814758301, "avg_line_length": 37.14285659790039, "blob_id": "a627c91f45e2f6418f1848eca61d4b4781b62e38", "content_id": "c362cb4b8b1d35dc47fd9c1ad1b40ccd78121196", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3204, "license_type": "permissive", "max_line_length": 75, "num_lines": 84, "path": "/tests/unit/task/contexts/senlin/test_profiles.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.senlin import profiles\nfrom tests.unit import test\n\n\nBASE_CTX = \"rally.task.context\"\nCTX = \"rally_openstack.context\"\nBASE_SCN = \"rally.task.scenarios\"\nSCN = \"rally_openstack.task.scenarios\"\n\n\nclass ProfilesGeneratorTestCase(test.ScenarioTestCase):\n \"\"\"Generate tenants.\"\"\"\n def _gen_tenants(self, count):\n tenants = {}\n for _id in range(count):\n tenants[str(_id)] = {\"id\": str(_id)}\n return tenants\n\n def setUp(self):\n super(ProfilesGeneratorTestCase, self).setUp()\n self.tenants_count = 2\n self.users_per_tenant = 3\n tenants = self._gen_tenants(self.tenants_count)\n users = []\n for tenant in tenants:\n for i in range(self.users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": tenant,\n \"credential\": mock.MagicMock()})\n\n self.context = {\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_count,\n \"users_per_tenant\": self.users_per_tenant\n },\n \"profiles\": {\n \"type\": \"profile_type_name\",\n \"version\": \"1.0\",\n \"properties\": {\"k1\": \"v1\", \"k2\": \"v2\"}\n },\n },\n \"users\": users,\n \"tenants\": tenants,\n \"task\": mock.MagicMock()\n }\n\n @mock.patch(\"%s.senlin.utils.SenlinScenario._create_profile\" % SCN,\n return_value=mock.MagicMock(id=\"TEST_PROFILE_ID\"))\n def test_setup(self, mock_senlin_scenario__create_profile):\n profile_ctx = profiles.ProfilesGenerator(self.context)\n profile_ctx.setup()\n spec = self.context[\"config\"][\"profiles\"]\n\n mock_calls = [mock.call(spec) for i in range(self.tenants_count)]\n mock_senlin_scenario__create_profile.assert_has_calls(mock_calls)\n\n for tenant in self.context[\"tenants\"]:\n self.assertEqual(\"TEST_PROFILE_ID\",\n self.context[\"tenants\"][tenant][\"profile\"])\n\n @mock.patch(\"%s.senlin.utils.SenlinScenario._delete_profile\" % SCN)\n def test_cleanup(self, mock_senlin_scenario__delete_profile):\n for tenant in self.context[\"tenants\"]:\n self.context[\"tenants\"][tenant].update(\n {\"profile\": \"TEST_PROFILE_ID\"})\n profile_ctx = profiles.ProfilesGenerator(self.context)\n profile_ctx.cleanup()\n mock_calls = [mock.call(\"TEST_PROFILE_ID\") for i in range(\n self.tenants_count)]\n mock_senlin_scenario__delete_profile.assert_has_calls(mock_calls)\n" }, { "alpha_fraction": 0.6558663249015808, "alphanum_fraction": 0.6585947871208191, "avg_line_length": 41.49275207519531, "blob_id": "5c972067435bd3dd8e2b96a124f0ffdb228cd715", "content_id": "d8112c253d5d2d0f2a8c1c4879d0d1d51d6de700", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2932, "license_type": "permissive", "max_line_length": 78, "num_lines": 69, "path": "/tests/unit/task/scenarios/gnocchi/test_archive_policy_rule.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.gnocchi import archive_policy_rule\nfrom tests.unit import test\n\n\nclass GnocchiArchivePolicyRuleTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(GnocchiArchivePolicyRuleTestCase,\n self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(GnocchiArchivePolicyRuleTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.gnocchi.metric.GnocchiService\")\n self.addCleanup(patch.stop)\n self.mock_metric = patch.start()\n\n def test_list_archive_policy_rule(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy_rule.ListArchivePolicyRule(self.context)\n scenario.run()\n metric_service.list_archive_policy_rule.assert_called_once_with()\n\n def test_create_archive_policy_rule(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy_rule.CreateArchivePolicyRule(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(metric_pattern=\"foo_pat*\", archive_policy_name=\"foo_pol\")\n metric_service.create_archive_policy_rule.assert_called_once_with(\n \"name\", metric_pattern=\"foo_pat*\", archive_policy_name=\"foo_pol\")\n\n def test_create_delete_archive_policy_rule(self):\n metric_service = self.mock_metric.return_value\n scenario = archive_policy_rule.CreateDeleteArchivePolicyRule(\n self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(metric_pattern=\"foo_pat*\", archive_policy_name=\"foo_pol\")\n metric_service.create_archive_policy_rule.assert_called_once_with(\n \"name\", metric_pattern=\"foo_pat*\", archive_policy_name=\"foo_pol\")\n metric_service.delete_archive_policy_rule.assert_called_once_with(\n \"name\")\n" }, { "alpha_fraction": 0.5912504196166992, "alphanum_fraction": 0.5979431867599487, "avg_line_length": 46.859375, "blob_id": "d0088f0412cb5e253c01e19fb2c1a74bd3704db1", "content_id": "b6900f054d44b4c7859811434f420a11a70661d3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6126, "license_type": "permissive", "max_line_length": 78, "num_lines": 128, "path": "/tests/unit/task/scenarios/neutron/test_trunk.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.neutron import trunk\nfrom tests.unit import test\n\n\nclass NeutronTrunkTestCase(test.ScenarioTestCase):\n\n def test_create_and_list_trunks(self):\n subport_count = 10\n network_create_args = {}\n net = mock.MagicMock()\n scenario = trunk.CreateAndListTrunks(self.context)\n scenario._create_network = mock.Mock(return_value=net)\n scenario._create_port = mock.MagicMock()\n scenario._create_trunk = mock.MagicMock()\n scenario._list_subports_by_trunk = mock.MagicMock()\n scenario._update_port = mock.Mock()\n scenario.run(network_create_args=network_create_args,\n subport_count=subport_count)\n scenario._create_network.assert_called_once_with(\n network_create_args)\n scenario._create_port.assert_has_calls(\n [mock.call(net, {})\n for _ in range(subport_count + 1)])\n self.assertEqual(1, scenario._create_trunk.call_count)\n self.assertEqual(1, scenario._list_subports_by_trunk.call_count)\n\n def test_boot_server_with_subports(self):\n img_name = \"img\"\n flavor_uuid = 0\n subport_count = 10\n network_create_args = {}\n net = mock.MagicMock()\n port = {\"port\": {\"id\": \"port-id\"}}\n kwargs = {\"nics\": [{\"port-id\": \"port-id\"}]}\n subnet = {\"subnet\": {\"id\": \"subnet-id\"}}\n scenario = trunk.BootServerWithSubports(self.context)\n scenario._boot_server = mock.MagicMock()\n scenario._create_port = mock.MagicMock(return_value=port)\n scenario._create_trunk = mock.MagicMock()\n scenario._create_network_and_subnets = mock.MagicMock()\n scenario._create_network_and_subnets.return_value = net, [subnet]\n scenario.run(img_name, flavor_uuid,\n network_create_args=network_create_args,\n subport_count=subport_count)\n scenario._create_port.assert_has_calls(\n [mock.call(net, {\"fixed_ips\": [{\"subnet_id\":\n subnet[\"subnet\"][\"id\"]}]})\n for _ in range(subport_count + 1)])\n self.assertEqual(1, scenario._create_trunk.call_count)\n self.assertEqual(11, scenario._create_network_and_subnets.call_count)\n scenario._boot_server.assert_called_once_with(img_name, flavor_uuid,\n **kwargs)\n\n def test_boot_server_and_add_subports(self):\n img_name = \"img\"\n flavor_uuid = 0\n subport_count = 10\n network_create_args = {}\n net = mock.MagicMock()\n port = {\"port\": {\"id\": \"port-id\"}}\n kwargs = {\"nics\": [{\"port-id\": \"port-id\"}]}\n subnet = {\"subnet\": {\"id\": \"subnet-id\"}}\n scenario = trunk.BootServerAndAddSubports(self.context)\n scenario._boot_server = mock.MagicMock()\n scenario._create_port = mock.MagicMock(return_value=port)\n scenario._create_trunk = mock.MagicMock()\n scenario._add_subports_to_trunk = mock.MagicMock()\n scenario._create_network_and_subnets = mock.MagicMock()\n scenario._create_network_and_subnets.return_value = net, [subnet]\n scenario.run(img_name, flavor_uuid,\n network_create_args=network_create_args,\n subport_count=subport_count)\n scenario._create_port.assert_has_calls(\n [mock.call(net, {\"fixed_ips\": [{\"subnet_id\":\n subnet[\"subnet\"][\"id\"]}]})\n for _ in range(subport_count + 1)])\n self.assertEqual(1, scenario._create_trunk.call_count)\n scenario._boot_server.assert_called_once_with(img_name, flavor_uuid,\n **kwargs)\n self.assertEqual(10, scenario._add_subports_to_trunk.call_count)\n self.assertEqual(11, scenario._create_network_and_subnets.call_count)\n\n def test_boot_server_and_batch_add_subports(self):\n img_name = \"img\"\n flavor_uuid = 0\n subports_per_batch = 10\n batches = 5\n network_create_args = {}\n net = mock.MagicMock()\n port = {\"port\": {\"id\": \"port-id\"}}\n kwargs = {\"nics\": [{\"port-id\": \"port-id\"}]}\n subnet = {\"subnet\": {\"id\": \"subnet-id\"}}\n scenario = trunk.BootServerAndBatchAddSubports(self.context)\n scenario._boot_server = mock.MagicMock()\n scenario._create_port = mock.MagicMock(return_value=port)\n scenario._create_trunk = mock.MagicMock()\n scenario._add_subports_to_trunk = mock.MagicMock()\n scenario._create_network_and_subnets = mock.MagicMock()\n scenario._create_network_and_subnets.return_value = net, [subnet]\n scenario.run(img_name, flavor_uuid,\n network_create_args=network_create_args,\n subports_per_batch=10, batches=5)\n scenario._create_port.assert_has_calls(\n [mock.call(net, {\"fixed_ips\": [{\"subnet_id\":\n subnet[\"subnet\"][\"id\"]}]})\n for _ in range(subports_per_batch * batches + 1)])\n self.assertEqual(1, scenario._create_trunk.call_count)\n scenario._boot_server.assert_called_once_with(img_name, flavor_uuid,\n **kwargs)\n self.assertEqual(5, scenario._add_subports_to_trunk.call_count)\n self.assertEqual(51, scenario._create_network_and_subnets.call_count)\n" }, { "alpha_fraction": 0.6138402223587036, "alphanum_fraction": 0.6177915334701538, "avg_line_length": 37.53498077392578, "blob_id": "8b96fc3fdab82e3c2906ec6506229230f436687c", "content_id": "3a1162a06a8b208597a6deb080b58282c5bb8470", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9364, "license_type": "permissive", "max_line_length": 78, "num_lines": 243, "path": "/rally_openstack/common/services/image/glance_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport time\n\nfrom rally.common import cfg\nfrom rally.common import utils as rutils\nfrom rally.task import atomic\nfrom rally.task import utils\nimport requests\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.image import glance_common\nfrom rally_openstack.common.services.image import image\n\n\nCONF = cfg.CONF\n\n\[email protected](\"glance\", service_type=\"image\", version=\"2\")\nclass GlanceV2Service(service.Service, glance_common.GlanceMixin):\n\n @atomic.action_timer(\"glance_v2.upload_data\")\n def upload_data(self, image_id, image_location):\n \"\"\"Upload the data for an image.\n\n :param image_id: Image ID to upload data to.\n :param image_location: Location of the data to upload to.\n \"\"\"\n image_location = os.path.expanduser(image_location)\n image_data = None\n response = None\n try:\n if os.path.isfile(image_location):\n image_data = open(image_location, \"rb\")\n else:\n response = requests.get(image_location, stream=True,\n verify=False)\n image_data = response.raw\n self._clients.glance(\"2\").images.upload(image_id, image_data)\n finally:\n if image_data is not None:\n image_data.close()\n if response is not None:\n response.close()\n\n @atomic.action_timer(\"glance_v2.create_image\")\n def create_image(self, image_name=None, container_format=None,\n image_location=None, disk_format=None,\n visibility=None, min_disk=0,\n min_ram=0, properties=None):\n \"\"\"Creates new image.\n\n :param image_name: Image name for which need to be created\n :param container_format: Container format\n :param image_location: The new image's location\n :param disk_format: Disk format\n :param visibility: The created image's visible status.\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: Dict of image properties\n \"\"\"\n image_name = image_name or self.generate_random_name()\n\n properties = properties or {}\n image_obj = self._clients.glance(\"2\").images.create(\n name=image_name,\n container_format=container_format,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n **properties)\n\n rutils.interruptable_sleep(CONF.openstack.\n glance_image_create_prepoll_delay)\n\n start = time.time()\n image_obj = utils.wait_for_status(\n image_obj.id, [\"queued\"],\n update_resource=self.get_image,\n timeout=CONF.openstack.glance_image_create_timeout,\n check_interval=CONF.openstack.glance_image_create_poll_interval)\n timeout = time.time() - start\n\n self.upload_data(image_obj.id, image_location=image_location)\n\n image_obj = utils.wait_for_status(\n image_obj, [\"active\"],\n update_resource=self.get_image,\n timeout=timeout,\n check_interval=CONF.openstack.glance_image_create_poll_interval)\n return image_obj\n\n @atomic.action_timer(\"glance_v2.update_image\")\n def update_image(self, image_id, image_name=None, min_disk=0,\n min_ram=0, remove_props=None):\n \"\"\"Update image.\n\n :param image_id: ID of image to update\n :param image_name: Image name to be updated to\n :param min_disk: The min disk of updated image\n :param min_ram: The min ram of updated image\n :param remove_props: List of property names to remove\n \"\"\"\n image_name = image_name or self.generate_random_name()\n\n return self._clients.glance(\"2\").images.update(\n image_id=image_id,\n name=image_name,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n\n @atomic.action_timer(\"glance_v2.list_images\")\n def list_images(self, status=\"active\", visibility=None, owner=None):\n \"\"\"List images.\n\n :param status: Filter in images for the specified status\n :param visibility: Filter in images for the specified visibility\n :param owner: Filter in images for tenant ID\n \"\"\"\n filters = {}\n filters[\"status\"] = status\n if visibility:\n filters[\"visibility\"] = visibility\n if owner:\n filters[\"owner\"] = owner\n # NOTE(boris-42): image.list() is lazy method which doesn't query API\n # until it's used, do not remove list().\n return list(self._clients.glance(\"2\").images.list(filters=filters))\n\n @atomic.action_timer(\"glance_v2.set_visibility\")\n def set_visibility(self, image_id, visibility=\"shared\"):\n \"\"\"Update visibility.\n\n :param image_id: ID of image to update\n :param visibility: The visibility of specified image\n \"\"\"\n self._clients.glance(\"2\").images.update(image_id,\n visibility=visibility)\n\n @atomic.action_timer(\"glance_v2.deactivate_image\")\n def deactivate_image(self, image_id):\n \"\"\"deactivate image.\"\"\"\n self._clients.glance(\"2\").images.deactivate(image_id)\n\n @atomic.action_timer(\"glance_v2.reactivate_image\")\n def reactivate_image(self, image_id):\n \"\"\"reactivate image.\"\"\"\n self._clients.glance(\"2\").images.reactivate(image_id)\n\n\[email protected]_layer(GlanceV2Service)\nclass UnifiedGlanceV2Service(glance_common.UnifiedGlanceMixin, image.Image):\n \"\"\"Compatibility layer for Glance V2.\"\"\"\n\n @staticmethod\n def _check_v2_visibility(visibility):\n visibility_values = [\"public\", \"private\", \"shared\", \"community\"]\n if visibility and visibility not in visibility_values:\n raise image.VisibilityException(\n message=\"Improper visibility value: %s in glance_v2\"\n % visibility)\n\n def create_image(self, image_name=None, container_format=None,\n image_location=None, disk_format=None,\n visibility=None, min_disk=0,\n min_ram=0, properties=None):\n \"\"\"Creates new image.\n\n :param image_name: Image name for which need to be created\n :param container_format: Container format\n :param image_location: The new image's location\n :param disk_format: Disk format\n :param visibility: The access permission for the created image.\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: Dict of image properties\n \"\"\"\n image_obj = self._impl.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n return self._unify_image(image_obj)\n\n def update_image(self, image_id, image_name=None, min_disk=0,\n min_ram=0, remove_props=None):\n \"\"\"Update image.\n\n :param image_id: ID of image to update\n :param image_name: Image name to be updated to\n :param min_disk: The min disk of updated image\n :param min_ram: The min ram of updated image\n :param remove_props: List of property names to remove\n \"\"\"\n image_obj = self._impl.update_image(\n image_id=image_id,\n image_name=image_name,\n min_disk=min_disk,\n min_ram=min_ram,\n remove_props=remove_props)\n return self._unify_image(image_obj)\n\n def list_images(self, status=\"active\", visibility=None, owner=None):\n \"\"\"List images.\n\n :param status: Filter in images for the specified status\n :param visibility: Filter in images for the specified visibility\n :param owner: Filter in images for tenant ID\n \"\"\"\n self._check_v2_visibility(visibility)\n\n images = self._impl.list_images(\n status=status, visibility=visibility, owner=owner)\n return [self._unify_image(i) for i in images]\n\n def set_visibility(self, image_id, visibility=\"shared\"):\n \"\"\"Update visibility.\n\n :param image_id: ID of image to update\n :param visibility: The visibility of specified image\n \"\"\"\n self._check_v2_visibility(visibility)\n\n self._impl.set_visibility(image_id=image_id, visibility=visibility)\n" }, { "alpha_fraction": 0.6370921730995178, "alphanum_fraction": 0.6424346566200256, "avg_line_length": 35.65034866333008, "blob_id": "274621b0cded940e272267c03089aa1cd08cf5f7", "content_id": "3454ae59c9c3c6ad16acd7553c72695732484bc5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5241, "license_type": "permissive", "max_line_length": 79, "num_lines": 143, "path": "/rally_openstack/task/scenarios/quotas/quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Kylin Cloud\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.quotas import utils\n\n\n\"\"\"Scenarios for quotas.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova.quotas\"]},\n name=\"Quotas.nova_update\",\n platform=\"openstack\")\nclass NovaUpdate(utils.QuotasScenario):\n\n def run(self, max_quota=1024):\n \"\"\"Update quotas for Nova.\n\n :param max_quota: Max value to be updated for quota.\n \"\"\"\n\n self._update_quotas(\"nova\", self.context[\"tenant\"][\"id\"],\n max_quota)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova.quotas\"]},\n name=\"Quotas.nova_update_and_delete\", platform=\"openstack\")\nclass NovaUpdateAndDelete(utils.QuotasScenario):\n\n def run(self, max_quota=1024):\n \"\"\"Update and delete quotas for Nova.\n\n :param max_quota: Max value to be updated for quota.\n \"\"\"\n\n self._update_quotas(\"nova\", self.context[\"tenant\"][\"id\"],\n max_quota)\n self._delete_quotas(\"nova\", self.context[\"tenant\"][\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder.quotas\"]},\n name=\"Quotas.cinder_update\", platform=\"openstack\")\nclass CinderUpdate(utils.QuotasScenario):\n\n def run(self, max_quota=1024):\n \"\"\"Update quotas for Cinder.\n\n :param max_quota: Max value to be updated for quota.\n \"\"\"\n\n self._update_quotas(\"cinder\", self.context[\"tenant\"][\"id\"],\n max_quota)\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder.quotas\"]},\n name=\"Quotas.cinder_get\", platform=\"openstack\")\nclass CinderGet(utils.QuotasScenario):\n\n def run(self):\n \"\"\"Get quotas for Cinder.\n\n Measure the \"cinder quota-show\" command performance\n\n \"\"\"\n self._get_quotas(\"cinder\", self.context[\"tenant\"][\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"cinder.quotas\"]},\n name=\"Quotas.cinder_update_and_delete\",\n platform=\"openstack\")\nclass CinderUpdateAndDelete(utils.QuotasScenario):\n\n def run(self, max_quota=1024):\n \"\"\"Update and Delete quotas for Cinder.\n\n :param max_quota: Max value to be updated for quota.\n \"\"\"\n\n self._update_quotas(\"cinder\", self.context[\"tenant\"][\"id\"],\n max_quota)\n self._delete_quotas(\"cinder\", self.context[\"tenant\"][\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"neutron.quota\"]},\n name=\"Quotas.neutron_update\", platform=\"openstack\")\nclass NeutronUpdate(utils.QuotasScenario):\n\n def run(self, max_quota=1024):\n \"\"\"Update quotas for neutron.\n\n :param max_quota: Max value to be updated for quota.\n \"\"\"\n\n quota_update_fn = self.admin_clients(\"neutron\").update_quota\n self._update_quotas(\"neutron\", self.context[\"tenant\"][\"id\"],\n max_quota, quota_update_fn)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"admin_cleanup@openstack\": [\"nova.quotas\"]},\n name=\"Quotas.nova_get\", platform=\"openstack\")\nclass NovaGet(utils.QuotasScenario):\n\n def run(self):\n \"\"\"Get quotas for nova.\"\"\"\n\n self._get_quotas(\"nova\", self.context[\"tenant\"][\"id\"])\n" }, { "alpha_fraction": 0.6458878517150879, "alphanum_fraction": 0.6470502614974976, "avg_line_length": 42.28302001953125, "blob_id": "37d16d7d7ae70626a675d42ab2f17224d5eaf899", "content_id": "b082a0c88e8f682164da76dde9ac0b3da9fbc75f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6882, "license_type": "permissive", "max_line_length": 78, "num_lines": 159, "path": "/rally_openstack/task/scenarios/murano/packages.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport os\n\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.murano import utils\n\n\n\"\"\"Scenarios for Murano packages.\"\"\"\n\n\[email protected](package={\"type\": \"expand_user_path\"})\[email protected](\"file_exists\", param_name=\"package\", mode=os.F_OK)\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"murano.packages\"]},\n name=\"MuranoPackages.import_and_list_packages\",\n platform=\"openstack\")\nclass ImportAndListPackages(utils.MuranoScenario):\n\n def run(self, package, include_disabled=False):\n \"\"\"Import Murano package and get list of packages.\n\n Measure the \"murano import-package\" and \"murano package-list\" commands\n performance.\n It imports Murano package from \"package\" (if it is not a zip archive\n then zip archive will be prepared) and gets list of imported packages.\n\n :param package: path to zip archive that represents Murano\n application package or absolute path to folder with\n package components\n :param include_disabled: specifies whether the disabled packages will\n be included in a the result or not.\n Default value is False.\n \"\"\"\n package_path = self._zip_package(package)\n try:\n self._import_package(package_path)\n self._list_packages(include_disabled=include_disabled)\n finally:\n os.remove(package_path)\n\n\[email protected](package={\"type\": \"expand_user_path\"})\[email protected](\"file_exists\", param_name=\"package\", mode=os.F_OK)\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"murano.packages\"]},\n name=\"MuranoPackages.import_and_delete_package\",\n platform=\"openstack\")\nclass ImportAndDeletePackage(utils.MuranoScenario):\n\n def run(self, package):\n \"\"\"Import Murano package and then delete it.\n\n Measure the \"murano import-package\" and \"murano package-delete\"\n commands performance.\n It imports Murano package from \"package\" (if it is not a zip archive\n then zip archive will be prepared) and deletes it.\n\n :param package: path to zip archive that represents Murano\n application package or absolute path to folder with\n package components\n \"\"\"\n package_path = self._zip_package(package)\n try:\n package = self._import_package(package_path)\n self._delete_package(package)\n finally:\n os.remove(package_path)\n\n\[email protected](package={\"type\": \"expand_user_path\"})\[email protected](\"file_exists\", param_name=\"package\", mode=os.F_OK)\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"murano.packages\"]},\n name=\"MuranoPackages.package_lifecycle\",\n platform=\"openstack\")\nclass PackageLifecycle(utils.MuranoScenario):\n\n def run(self, package, body, operation=\"replace\"):\n \"\"\"Import Murano package, modify it and then delete it.\n\n Measure the Murano import, update and delete package\n commands performance.\n It imports Murano package from \"package\" (if it is not a zip archive\n then zip archive will be prepared), modifies it (using data from\n \"body\") and deletes.\n\n :param package: path to zip archive that represents Murano\n application package or absolute path to folder with\n package components\n :param body: dict object that defines what package property will be\n updated, e.g {\"tags\": [\"tag\"]} or {\"enabled\": \"true\"}\n :param operation: string object that defines the way of how package\n property will be updated, allowed operations are\n \"add\", \"replace\" or \"delete\".\n Default value is \"replace\".\n\n \"\"\"\n package_path = self._zip_package(package)\n try:\n package = self._import_package(package_path)\n self._update_package(package, body, operation)\n self._delete_package(package)\n finally:\n os.remove(package_path)\n\n\[email protected](package={\"type\": \"expand_user_path\"})\[email protected](\"file_exists\", param_name=\"package\", mode=os.F_OK)\[email protected](\"required_services\", services=[consts.Service.MURANO])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"murano.packages\"]},\n name=\"MuranoPackages.import_and_filter_applications\",\n platform=\"openstack\")\nclass ImportAndFilterApplications(utils.MuranoScenario):\n\n def run(self, package, filter_query):\n \"\"\"Import Murano package and then filter packages by some criteria.\n\n Measure the performance of package import and package\n filtering commands.\n It imports Murano package from \"package\" (if it is not a zip archive\n then zip archive will be prepared) and filters packages by some\n criteria.\n\n :param package: path to zip archive that represents Murano\n application package or absolute path to folder with\n package components\n :param filter_query: dict that contains filter criteria, lately it\n will be passed as **kwargs to filter method\n e.g. {\"category\": \"Web\"}\n \"\"\"\n package_path = self._zip_package(package)\n try:\n self._import_package(package_path)\n self._filter_applications(filter_query)\n finally:\n os.remove(package_path)\n" }, { "alpha_fraction": 0.5524475574493408, "alphanum_fraction": 0.5560735464096069, "avg_line_length": 38, "blob_id": "d65f7b9727a9aeca91eba659ff1064b2ee09f05a", "content_id": "46597acc983ae89d393a553f460d2a1bb1ff426d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3861, "license_type": "permissive", "max_line_length": 78, "num_lines": 99, "path": "/tests/unit/task/contexts/monasca/test_metrics.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.monasca import metrics\nfrom rally_openstack.task.scenarios.monasca import utils as monasca_utils\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.monasca\"\n\n\nclass MonascaMetricGeneratorTestCase(test.TestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id in range(count):\n tenants[str(id)] = {\"name\": str(id)}\n return tenants\n\n def _gen_context(self, tenants_count, users_per_tenant,\n metrics_per_tenant):\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id,\n \"endpoint\": mock.MagicMock()})\n context = test.get_test_context()\n context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants_count,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"monasca_metrics\": {\n \"name\": \"fake-metric-name\",\n \"dimensions\": {\n \"region\": \"fake-region\",\n \"service\": \"fake-identity\",\n \"hostname\": \"fake-hostname\",\n \"url\": \"fake-url\"\n },\n \"metrics_per_tenant\": metrics_per_tenant,\n },\n \"roles\": [\n \"monasca-user\"\n ]\n },\n \"admin\": {\n \"endpoint\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n return tenants, context\n\n @mock.patch(\"%s.metrics.rutils.interruptable_sleep\" % CTX)\n @mock.patch(\"%s.metrics.monasca_utils.MonascaScenario\" % CTX)\n def test_setup(self, mock_monasca_scenario, mock_interruptable_sleep):\n tenants_count = 2\n users_per_tenant = 4\n metrics_per_tenant = 5\n\n tenants, real_context = self._gen_context(\n tenants_count, users_per_tenant, metrics_per_tenant)\n\n monasca_ctx = metrics.MonascaMetricGenerator(real_context)\n monasca_ctx.setup()\n\n self.assertEqual(tenants_count, mock_monasca_scenario.call_count,\n \"Scenario should be constructed same times as \"\n \"number of tenants\")\n self.assertEqual(metrics_per_tenant * tenants_count,\n mock_monasca_scenario.return_value._create_metrics.\n call_count,\n \"Total number of metrics created should be tenant\"\n \"counts times metrics per tenant\")\n first_call = mock.call(0.001)\n second_call = mock.call(monasca_utils.CONF.openstack.\n monasca_metric_create_prepoll_delay,\n atomic_delay=1)\n self.assertEqual(\n [first_call] * metrics_per_tenant * tenants_count + [second_call],\n mock_interruptable_sleep.call_args_list,\n \"Method interruptable_sleep should be called tenant counts times \"\n \"metrics plus one\")\n" }, { "alpha_fraction": 0.6247391104698181, "alphanum_fraction": 0.6285334825515747, "avg_line_length": 39.23664093017578, "blob_id": "d2a77c5d0d19e09d4e73b78866639d611c520027", "content_id": "ca122ad15974c3c8846065ba5622e84a28e0998f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5271, "license_type": "permissive", "max_line_length": 78, "num_lines": 131, "path": "/rally_openstack/task/scenarios/nova/server_groups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\n\"\"\"Scenarios for Nova Group servers.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServerGroups.create_and_list_server_groups\",\n platform=\"openstack\")\nclass CreateAndListServerGroups(utils.NovaScenario):\n\n def run(self, policies=None, all_projects=False, kwargs=None):\n \"\"\"Create a server group, then list all server groups.\n\n Measure the \"nova server-group-create\" and \"nova server-group-list\"\n command performance.\n\n :param policies: Server group policy\n :param all_projects: If True, display server groups from all\n projects(Admin only)\n :param kwargs: The server group specifications to add.\n DEPRECATED, specify arguments explicitly.\n \"\"\"\n if kwargs is None:\n kwargs = {\n \"policies\": policies\n }\n else:\n LOG.warning(\"The argument `kwargs` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n server_group = self._create_server_group(**kwargs)\n msg = (\"Server Groups isn't created\")\n self.assertTrue(server_group, err_msg=msg)\n\n server_groups_list = self._list_server_groups(all_projects)\n msg = (\"Server Group not included into list of server groups\\n\"\n \"Created server group: {}\\n\"\n \"list of server groups: {}\").format(server_group,\n server_groups_list)\n self.assertIn(server_group, server_groups_list, err_msg=msg)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServerGroups.create_and_get_server_group\",\n platform=\"openstack\")\nclass CreateAndGetServerGroup(utils.NovaScenario):\n\n def run(self, policies=None, kwargs=None):\n \"\"\"Create a server group, then get its detailed information.\n\n Measure the \"nova server-group-create\" and \"nova server-group-get\"\n command performance.\n\n :param policies: Server group policy\n :param kwargs: The server group specifications to add.\n DEPRECATED, specify arguments explicitly.\n \"\"\"\n if kwargs is None:\n kwargs = {\n \"policies\": policies\n }\n else:\n LOG.warning(\"The argument `kwargs` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n server_group = self._create_server_group(**kwargs)\n msg = (\"Server Groups isn't created\")\n self.assertTrue(server_group, err_msg=msg)\n\n server_group_info = self._get_server_group(server_group.id)\n self.assertEqual(server_group.id, server_group_info.id)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServerGroups.create_and_delete_server_group\",\n platform=\"openstack\")\nclass CreateAndDeleteServerGroup(utils.NovaScenario):\n\n def run(self, policies=None, kwargs=None):\n \"\"\"Create a server group, then delete it.\n\n Measure the \"nova server-group-create\" and \"nova server-group-delete\"\n command performance.\n\n :param policies: Server group policy\n :param kwargs: The server group specifications to add.\n DEPRECATED, specify arguments explicitly.\n \"\"\"\n if kwargs is None:\n kwargs = {\n \"policies\": policies\n }\n else:\n LOG.warning(\"The argument `kwargs` is deprecated since\"\n \" Rally 0.10.0. Specify all arguments from it\"\n \" explicitly.\")\n server_group = self._create_server_group(**kwargs)\n msg = (\"Server Group isn't created\")\n self.assertTrue(server_group, err_msg=msg)\n\n self._delete_server_group(server_group.id)\n" }, { "alpha_fraction": 0.588866114616394, "alphanum_fraction": 0.5928425192832947, "avg_line_length": 38.33175277709961, "blob_id": "f02c01790c1c874e5022430a0b9d5178b946cec5", "content_id": "d475f87147b40d84fce6ce14727d75d71452184e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8299, "license_type": "permissive", "max_line_length": 79, "num_lines": 211, "path": "/rally_openstack/common/services/image/glance_v1.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom rally.common import cfg\nfrom rally.common import utils as rutils\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.image import glance_common\nfrom rally_openstack.common.services.image import image\n\n\nCONF = cfg.CONF\n\n\[email protected](\"glance\", service_type=\"image\", version=\"1\")\nclass GlanceV1Service(service.Service, glance_common.GlanceMixin):\n\n @atomic.action_timer(\"glance_v1.create_image\")\n def create_image(self, image_name=None, container_format=None,\n image_location=None, disk_format=None,\n is_public=True, min_disk=0, min_ram=0,\n properties=None):\n \"\"\"Creates new image.\n\n :param image_name: Image name for which need to be created\n :param container_format: Container format\n :param image_location: The new image's location\n :param disk_format: Disk format\n :param is_public: The created image's public status\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: Dict of image properties\n \"\"\"\n image_location = os.path.expanduser(image_location)\n image_name = image_name or self.generate_random_name()\n kwargs = {}\n\n try:\n if os.path.isfile(image_location):\n kwargs[\"data\"] = open(image_location, \"rb\")\n else:\n kwargs[\"copy_from\"] = image_location\n\n image_obj = self._clients.glance(\"1\").images.create(\n name=image_name,\n container_format=container_format,\n disk_format=disk_format,\n is_public=is_public,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties,\n **kwargs)\n\n rutils.interruptable_sleep(CONF.openstack.\n glance_image_create_prepoll_delay)\n\n image_obj = utils.wait_for_status(\n image_obj, [\"active\"],\n update_resource=self.get_image,\n timeout=CONF.openstack.glance_image_create_timeout,\n check_interval=CONF.openstack.glance_image_create_poll_interval\n )\n\n finally:\n if \"data\" in kwargs:\n kwargs[\"data\"].close()\n\n return image_obj\n\n @atomic.action_timer(\"glance_v1.update_image\")\n def update_image(self, image_id, image_name=None, min_disk=0,\n min_ram=0):\n \"\"\"Update image.\n\n :param image_id: ID of image to update\n :param image_name: Image name to be updated to\n :param min_disk: The min disk of updated image\n :param min_ram: The min ram of updated image\n \"\"\"\n image_name = image_name or self.generate_random_name()\n\n return self._clients.glance(\"1\").images.update(image_id,\n name=image_name,\n min_disk=min_disk,\n min_ram=min_ram)\n\n @atomic.action_timer(\"glance_v1.list_images\")\n def list_images(self, status=\"active\", is_public=None, owner=None):\n \"\"\"List images.\n\n :param status: Filter in images for the specified status\n :param is_public: Filter in images for the specified public status\n :param owner: Filter in images for tenant ID\n \"\"\"\n # NOTE(boris-42): image.list() is lazy method which doesn't query API\n # until it's used, do not remove list().\n return list(self._clients.glance(\"1\").images.list(status=status,\n owner=owner,\n is_public=is_public))\n\n @atomic.action_timer(\"glance_v1.set_visibility\")\n def set_visibility(self, image_id, is_public=True):\n \"\"\"Update visibility.\n\n :param image_id: ID of image to update\n :param is_public: Image is public or not\n \"\"\"\n self._clients.glance(\"1\").images.update(image_id, is_public=is_public)\n\n\[email protected]_layer(GlanceV1Service)\nclass UnifiedGlanceV1Service(glance_common.UnifiedGlanceMixin, image.Image):\n \"\"\"Compatibility layer for Glance V1.\"\"\"\n\n @staticmethod\n def _check_v1_visibility(visibility):\n visibility_values = [\"public\", \"private\"]\n if visibility and visibility not in visibility_values:\n raise image.VisibilityException(\n message=\"Improper visibility value: %s in glance_v1\"\n % visibility)\n\n def create_image(self, image_name=None, container_format=None,\n image_location=None, disk_format=None,\n visibility=\"public\", min_disk=0,\n min_ram=0, properties=None):\n \"\"\"Creates new image.\n\n :param image_name: Image name for which need to be created\n :param container_format: Container format\n :param image_location: The new image's location\n :param disk_format: Disk format\n :param visibility: The created image's visible status\n :param min_disk: The min disk of created images\n :param min_ram: The min ram of created images\n :param properties: Dict of image properties\n \"\"\"\n self._check_v1_visibility(visibility)\n\n is_public = visibility != \"private\"\n image_obj = self._impl.create_image(\n image_name=image_name,\n container_format=container_format,\n image_location=image_location,\n disk_format=disk_format,\n is_public=is_public,\n min_disk=min_disk,\n min_ram=min_ram,\n properties=properties)\n return self._unify_image(image_obj)\n\n def update_image(self, image_id, image_name=None, min_disk=0,\n min_ram=0, remove_props=None):\n \"\"\"Update image.\n\n :param image_id: ID of image to update\n :param image_name: Image name to be updated to\n :param min_disk: The min disk of updated image\n :param min_ram: The min ram of updated image\n :param remove_props: List of property names to remove\n \"\"\"\n if remove_props is not None:\n raise image.RemovePropsException(\"Remove prop: %s is not \"\n \"supported in \"\n \"glance_v1\" % remove_props)\n image_obj = self._impl.update_image(\n image_id=image_id,\n image_name=image_name,\n min_disk=min_disk,\n min_ram=min_ram)\n return self._unify_image(image_obj)\n\n def list_images(self, status=\"active\", visibility=None, owner=None):\n \"\"\"List images.\n\n :param status: Filter in images for the specified status\n :param visibility: Filter in images for the specified visibility\n :param owner: Filter in images for tenant ID\n \"\"\"\n self._check_v1_visibility(visibility)\n\n is_public = visibility != \"private\"\n\n images = self._impl.list_images(status=status, is_public=is_public)\n return [self._unify_image(i) for i in images]\n\n def set_visibility(self, image_id, visibility=\"public\"):\n \"\"\"Update visibility.\n\n :param image_id: ID of image to update\n :param visibility: The visibility of specified image\n \"\"\"\n self._check_v1_visibility(visibility)\n\n is_public = visibility != \"private\"\n self._impl.set_visibility(image_id=image_id, is_public=is_public)\n" }, { "alpha_fraction": 0.5891443490982056, "alphanum_fraction": 0.5909602642059326, "avg_line_length": 28.638320922851562, "blob_id": "5f0d1fc23a759f8e9fb7a62583dfad73ba233f5f", "content_id": "6191e0f9489c1eba23f96f6cdc13c6fa43c5e354", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55068, "license_type": "permissive", "max_line_length": 79, "num_lines": 1858, "path": "/tests/unit/fakes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport itertools\nimport multiprocessing\nimport random\nimport re\nimport string\nfrom unittest import mock\nimport uuid\n\nfrom glanceclient import exc\nfrom neutronclient.common import exceptions as neutron_exceptions\nfrom novaclient import exceptions as nova_exceptions\nfrom swiftclient import exceptions as swift_exceptions\n\nfrom rally import api\nfrom rally.common import utils as rally_utils\nfrom rally.task import context\nfrom rally.task import scenario\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\n\n\ndef generate_uuid():\n return str(uuid.uuid4())\n\n\ndef generate_name(prefix=\"\", length=12, choices=string.ascii_lowercase):\n \"\"\"Generate pseudo-random name.\n\n :param prefix: str, custom prefix for genertated name\n :param length: int, length of autogenerated part of result name\n :param choices: str, chars that accurs in generated name\n :returns: str, pseudo-random name\n \"\"\"\n return prefix + \"\".join(random.choice(choices) for i in range(length))\n\n\ndef generate_mac():\n \"\"\"Generate pseudo-random MAC address.\n\n :returns: str, MAC address\n \"\"\"\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))\n\n\ndef setup_dict(data, required=None, defaults=None):\n \"\"\"Setup and validate dict scenario_base on mandatory keys and default data\n\n This function reduces code that constructs dict objects\n with specific schema (e.g. for API data).\n\n :param data: dict, input data\n :param required: list, mandatory keys to check\n :param defaults: dict, default data\n :returns: dict, with all keys set\n :raises IndexError, ValueError: If input data is incorrect\n \"\"\"\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults\n\n\nclass FakeCredential(credential.OpenStackCredential):\n def __init__(self, **creds):\n creds.setdefault(\"auth_url\", \"https://example.com\")\n creds.setdefault(\"username\", \"admin\")\n creds.setdefault(\"password\", \"pass\")\n super(FakeCredential, self).__init__(**creds)\n self.clients = mock.Mock()\n\n\nclass FakeResource(object):\n\n def __init__(self, manager=None, name=None, status=\"ACTIVE\", items=None,\n deployment_uuid=None, id=None):\n self.name = name or generate_uuid()\n self.status = status\n self.manager = manager\n self.uuid = generate_uuid()\n self.id = id or self.uuid\n self.items = items or {}\n self.deployment_uuid = deployment_uuid or generate_uuid()\n\n def __getattr__(self, name):\n # NOTE(msdubov): e.g. server.delete() -> manager.delete(server)\n def manager_func(*args, **kwargs):\n return getattr(self.manager, name)(self, *args, **kwargs)\n return manager_func\n\n def __getitem__(self, key):\n return self.items[key]\n\n\nclass FakeServer(FakeResource):\n def suspend(self):\n self.status = \"SUSPENDED\"\n\n def lock(self):\n setattr(self, \"OS-EXT-STS:locked\", True)\n\n def unlock(self):\n setattr(self, \"OS-EXT-STS:locked\", False)\n\n\nclass FakeImage(FakeResource):\n\n def __init__(self, manager=None, id=\"image-id-0\", min_ram=0,\n size=0, min_disk=0, status=\"active\", name=None):\n super(FakeImage, self).__init__(manager, id=id, name=name)\n self.min_ram = min_ram\n self.size = size\n self.min_disk = min_disk\n self.status = status\n self.update = mock.MagicMock()\n\n\nclass FakeStrategy(FakeResource):\n pass\n\n\nclass FakeGoal(FakeResource):\n pass\n\n\nclass FakeMurano(FakeResource):\n pass\n\n\nclass FakeFloatingIP(FakeResource):\n pass\n\n\nclass FakeFloatingIPPool(FakeResource):\n pass\n\n\nclass FakeTenant(FakeResource):\n\n def __init__(self, manager, name):\n super(FakeTenant, self).__init__(manager, name=name)\n\n\nclass FakeUser(FakeResource):\n pass\n\n\nclass FakeService(FakeResource):\n pass\n\n\nclass FakeNetwork(FakeResource):\n pass\n\n\nclass FakeFlavor(FakeResource):\n\n def __init__(self, id=\"flavor-id-0\", manager=None, ram=0, disk=0, vcpus=1,\n name=\"flavor-name-0\"):\n super(FakeFlavor, self).__init__(manager, id=id)\n self.ram = ram\n self.disk = disk\n self.vcpus = vcpus\n self.name = name\n\n\nclass FakeSecret(FakeResource):\n\n def __init__(self, id=\"secret-id-0\", manager=None, secret_ref=\"secret_ref\",\n name=\"secret-name-0\"):\n super(FakeSecret, self).__init__(manager, id=id)\n self.secret_ref = secret_ref\n\n\nclass FakeLoadBalancer(FakeResource):\n pass\n\n\nclass FakeKeypair(FakeResource):\n pass\n\n\nclass FakeStack(FakeResource):\n pass\n\n\nclass FakeDomain(FakeResource):\n pass\n\n\nclass FakeQuotas(FakeResource):\n pass\n\n\nclass FakeSecurityGroup(FakeResource):\n\n def __init__(self, manager=None, rule_manager=None, id=None, name=None):\n super(FakeSecurityGroup, self).__init__(manager, id=id, name=name)\n self.rule_manager = rule_manager\n\n @property\n def rules(self):\n return [rule for rule in self.rule_manager.list()\n if rule.parent_group_id == self.id]\n\n\nclass FakeSecurityGroupRule(FakeResource):\n def __init__(self, name, **kwargs):\n super(FakeSecurityGroupRule, self).__init__(name)\n if \"cidr\" in kwargs:\n kwargs[\"ip_range\"] = {\"cidr\": kwargs[\"cidr\"]}\n del kwargs[\"cidr\"]\n for key, value in kwargs.items():\n self.items[key] = value\n setattr(self, key, value)\n\n\nclass FakeMetric(FakeResource):\n def __init_(self, manager=None, **kwargs):\n super(FakeMetric, self).__init__(manager)\n self.metric = kwargs.get(\"metric_name\")\n self.optional_args = kwargs.get(\"optional_args\", {})\n\n\nclass FakeAlarm(FakeResource):\n def __init__(self, manager=None, **kwargs):\n super(FakeAlarm, self).__init__(manager)\n self.meter_name = kwargs.get(\"meter_name\")\n self.threshold = kwargs.get(\"threshold\")\n self.state = kwargs.get(\"state\", \"fake-alarm-state\")\n self.alarm_id = kwargs.get(\"alarm_id\", \"fake-alarm-id\")\n self.state = kwargs.get(\"state\", \"ok\")\n self.optional_args = kwargs.get(\"optional_args\", {})\n\n\nclass FakeSample(FakeResource):\n def __init__(self, manager=None, **kwargs):\n super(FakeSample, self).__init__(manager)\n self.counter_name = kwargs.get(\"counter_name\", \"fake-counter-name\")\n self.counter_type = kwargs.get(\"counter_type\", \"fake-counter-type\")\n self.counter_unit = kwargs.get(\"counter_unit\", \"fake-counter-unit\")\n self.counter_volume = kwargs.get(\"counter_volume\", 100)\n\n @property\n def resource_id(self):\n return \"fake-resource-id\"\n\n def to_dict(self):\n return {\"counter_name\": self.counter_name,\n \"counter_type\": self.counter_type,\n \"counter_unit\": self.counter_unit,\n \"counter_volume\": self.counter_volume,\n \"resource_id\": self.resource_id}\n\n\nclass FakeVolume(FakeResource):\n @property\n def _info(self):\n return {\"id\": \"uuid\"}\n\n\nclass FakeVolumeType(FakeResource):\n pass\n\n\nclass FakeVolumeTransfer(FakeResource):\n pass\n\n\nclass FakeVolumeSnapshot(FakeResource):\n pass\n\n\nclass FakeVolumeBackup(FakeResource):\n pass\n\n\nclass FakeRole(FakeResource):\n pass\n\n\nclass FakeQueue(FakeResource):\n def __init__(self, manager=None, name=\"myqueue\"):\n super(FakeQueue, self).__init__(manager, name)\n self.queue_name = name\n self.messages = FakeMessagesManager(name)\n\n def post(self, messages):\n for msg in messages:\n self.messages.create(**msg)\n\n def messages(self):\n return self.messages.list()\n\n\nclass FakeDbInstance(FakeResource):\n pass\n\n\nclass FakeMessage(FakeResource):\n def __init__(self, manager=None, **kwargs):\n super(FakeMessage, self).__init__(manager)\n self.body = kwargs.get(\"body\", \"fake-body\")\n self.ttl = kwargs.get(\"ttl\", 100)\n\n\nclass FakeAvailabilityZone(FakeResource):\n def __init__(self, manager=None):\n super(FakeAvailabilityZone, self).__init__(manager)\n self.zoneName = mock.MagicMock()\n self.zoneState = mock.MagicMock()\n self.hosts = mock.MagicMock()\n\n\nclass FakeWorkbook(FakeResource):\n def __init__(self, manager=None):\n super(FakeWorkbook, self).__init__(manager)\n self.workbook = mock.MagicMock()\n\n\nclass FakeWorkflow(FakeResource):\n def __init__(self, manager=None):\n super(FakeWorkflow, self).__init__(manager)\n self.workflow = mock.MagicMock()\n\n\nclass FakeExecution(FakeResource):\n def __init__(self, manager=None):\n super(FakeExecution, self).__init__(manager)\n self.execution = mock.MagicMock()\n\n\nclass FakeObject(FakeResource):\n pass\n\n\nclass FakeClusterTemplate(FakeResource):\n pass\n\n\nclass FakeManager(object):\n\n def __init__(self):\n super(FakeManager, self).__init__()\n self.cache = {}\n self.resources_order = []\n\n def get(self, resource_uuid):\n return self.cache.get(resource_uuid)\n\n def delete(self, resource_uuid):\n cached = self.get(resource_uuid)\n if cached is not None:\n cached.status = \"DELETED\"\n del self.cache[resource_uuid]\n self.resources_order.remove(resource_uuid)\n\n def _cache(self, resource):\n self.resources_order.append(resource.uuid)\n self.cache[resource.uuid] = resource\n return resource\n\n def list(self, **kwargs):\n return [self.cache[key] for key in self.resources_order]\n\n def find(self, **kwargs):\n for resource in self.cache.values():\n match = True\n for key, value in kwargs.items():\n if getattr(resource, key, None) != value:\n match = False\n break\n if match:\n return resource\n\n\nclass FakeServerManager(FakeManager):\n\n def __init__(self, image_mgr=None):\n super(FakeServerManager, self).__init__()\n self.images = image_mgr or FakeImageManager()\n\n def get(self, resource_uuid):\n server = self.cache.get(resource_uuid)\n if server is not None:\n return server\n raise nova_exceptions.NotFound(\"Server %s not found\" % (resource_uuid))\n\n def _create(self, server_class=FakeServer, name=None):\n server = self._cache(server_class(self))\n if name is not None:\n server.name = name\n return server\n\n def create(self, name, image_id, flavor_id, **kwargs):\n return self._create(name=name)\n\n def create_image(self, server, name):\n image = self.images._create()\n return image.uuid\n\n def add_floating_ip(self, server, fip):\n pass\n\n def remove_floating_ip(self, server, fip):\n pass\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETED\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeImageManager(FakeManager):\n\n def __init__(self):\n super(FakeImageManager, self).__init__()\n\n def get(self, resource_uuid):\n image = self.cache.get(resource_uuid)\n if image is not None:\n return image\n raise exc.HTTPNotFound(\"Image %s not found\" % (resource_uuid))\n\n def _create(self, image_class=FakeImage, name=None, id=None):\n image = self._cache(image_class(self))\n image.owner = \"dummy\"\n image.id = image.uuid\n if name is not None:\n image.name = name\n return image\n\n def create(self, name, copy_from, container_format, disk_format):\n return self._create(name=name)\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETED\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeStrategyManager(FakeManager):\n def get(self, resource_name):\n for key in self.resources_order:\n if self.cache[key].name == resource_name:\n return self.cache[key]\n\n\nclass FakeGoalManager(FakeManager):\n def get(self, resource_name):\n for key in self.resources_order:\n if self.cache[key].name == resource_name:\n return self.cache[key]\n\n\nclass FakePackageManager(FakeManager):\n\n def create(self, package_descr, package_arch, package_class=FakeMurano):\n package = self._cache(package_class(self))\n package.name = list(package_arch.keys())[0]\n return package\n\n\nclass FakeFloatingIPsManager(FakeManager):\n\n def create(self):\n return FakeFloatingIP(self)\n\n\nclass FakeFloatingIPPoolsManager(FakeManager):\n\n def create(self):\n return FakeFloatingIPPool(self)\n\n\nclass FakeTenantsManager(FakeManager):\n\n def create(self, name):\n return self._cache(FakeTenant(self, name))\n\n def update(self, tenant_id, name=None, description=None):\n tenant = self.get(tenant_id)\n name = name or (tenant.name + \"_updated\")\n desc = description or (tenant.name + \"_description_updated\")\n tenant.name = name\n tenant.description = desc\n return self._cache(tenant)\n\n\nclass FakeNetworkManager(FakeManager):\n\n def create(self, net_id):\n net = FakeNetwork(self)\n net.id = net_id\n return self._cache(net)\n\n\nclass FakeFlavorManager(FakeManager):\n\n def create(self):\n flv = FakeFlavor(self)\n return self._cache(flv)\n\n\nclass FakeKeypairManager(FakeManager):\n\n def create(self, name, public_key=None):\n kp = FakeKeypair(self)\n kp.name = name or kp.name\n return self._cache(kp)\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETED\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeClusterTemplateManager(FakeManager):\n\n def create(self, name):\n cluster_template = FakeClusterTemplate(self)\n cluster_template.name = name or cluster_template.name\n return self._cache(cluster_template)\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeStackManager(FakeManager):\n\n def create(self, name):\n stack = FakeStack(self)\n stack.name = name or stack.name\n return self._cache(stack)\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETE_COMPLETE\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeDomainManager(FakeManager):\n\n def create(self, name):\n domain = FakeDomain(self)\n domain.name = name or domain.name\n return self._cache(domain)\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETE_COMPLETE\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeNovaQuotasManager(FakeManager):\n\n def update(self, tenant_id, **kwargs):\n fq = FakeQuotas(self)\n return self._cache(fq)\n\n def delete(self, tenant_id):\n pass\n\n\nclass FakeCinderQuotasManager(FakeManager):\n\n def update(self, tenant_id, **kwargs):\n fq = FakeQuotas(self)\n return self._cache(fq)\n\n def delete(self, tenant_id):\n pass\n\n\nclass FakeSecurityGroupManager(FakeManager):\n def __init__(self, rule_manager=None):\n super(FakeSecurityGroupManager, self).__init__()\n self.rule_manager = rule_manager\n self.create(\"default\")\n\n def create(self, name, description=\"\"):\n sg = FakeSecurityGroup(\n manager=self,\n rule_manager=self.rule_manager)\n sg.name = name or sg.name\n sg.description = description\n return self._cache(sg)\n\n def to_dict(self, obj):\n return {\"id\": obj.id, \"name\": obj.name}\n\n def find(self, name, **kwargs):\n kwargs[\"name\"] = name\n for resource in self.cache.values():\n match = True\n for key, value in kwargs.items():\n if getattr(resource, key, None) != value:\n match = False\n break\n if match:\n return resource\n raise nova_exceptions.NotFound(\"Security Group not found\")\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETED\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeSecurityGroupRuleManager(FakeManager):\n def __init__(self):\n super(FakeSecurityGroupRuleManager, self).__init__()\n\n def create(self, parent_group_id, **kwargs):\n kwargs[\"parent_group_id\"] = parent_group_id\n sgr = FakeSecurityGroupRule(self, **kwargs)\n return self._cache(sgr)\n\n\nclass FakeUsersManager(FakeManager):\n\n def create(self, username, password, email, tenant_id):\n user = FakeUser(manager=self, name=username)\n user.name = username or user.name\n return self._cache(user)\n\n\nclass FakeServicesManager(FakeManager):\n\n def list(self):\n return []\n\n\nclass FakeVolumeManager(FakeManager):\n def __init__(self):\n super(FakeVolumeManager, self).__init__()\n self.__volumes = {}\n self.__tenant_id = generate_uuid()\n\n def create(self, size=None, **kwargs):\n volume = FakeVolume(self)\n volume.size = size or 1\n volume.name = kwargs.get(\"display_name\", volume.name)\n volume.status = \"available\"\n volume.tenant_id = self.__tenant_id\n self.__volumes[volume.id] = volume\n return self._cache(volume)\n\n def list(self):\n return self.__volumes.values()\n\n def delete(self, resource):\n super(FakeVolumeManager, self).delete(resource.id)\n del self.__volumes[resource.id]\n\n\nclass FakeVolumeTypeManager(FakeManager):\n\n def create(self, name):\n vol_type = FakeVolumeType(self)\n vol_type.name = name or vol_type.name\n return self._cache(vol_type)\n\n\nclass FakeVolumeTransferManager(FakeManager):\n def __init__(self):\n super(FakeVolumeTransferManager, self).__init__()\n self.__volume_transfers = {}\n\n def list(self):\n return self.__volume_transfers.values()\n\n def create(self, name):\n transfer = FakeVolumeTransfer(self)\n transfer.name = name or transfer.name\n self.__volume_transfers[transfer.id] = transfer\n return self._cache(transfer)\n\n def delete(self, resource):\n super(FakeVolumeTransferManager, self).delete(resource.id)\n del self.__volume_transfers[resource.id]\n\n\nclass FakeVolumeSnapshotManager(FakeManager):\n def __init__(self):\n super(FakeVolumeSnapshotManager, self).__init__()\n self.__snapshots = {}\n self.__tenant_id = generate_uuid()\n\n def create(self, name, force=False, display_name=None):\n snapshot = FakeVolumeSnapshot(self)\n snapshot.name = name or snapshot.name\n snapshot.status = \"available\"\n snapshot.tenant_id = self.__tenant_id\n self.__snapshots[snapshot.id] = snapshot\n return self._cache(snapshot)\n\n def list(self):\n return self.__snapshots.values()\n\n def delete(self, resource):\n super(FakeVolumeSnapshotManager, self).delete(resource.id)\n del self.__snapshots[resource.id]\n\n\nclass FakeVolumeBackupManager(FakeManager):\n def __init__(self):\n super(FakeVolumeBackupManager, self).__init__()\n self.__backups = {}\n self.__tenant_id = generate_uuid()\n\n def create(self, name):\n backup = FakeVolumeBackup(self)\n backup.name = name or backup.name\n self.__backups[backup.id] = backup\n return self._cache(backup)\n\n def list(self):\n return self.__backups.values()\n\n def delete(self, resource):\n super(FakeVolumeBackupManager, self).delete(resource.id)\n del self.__backups[resource.id]\n\n\nclass FakeRolesManager(FakeManager):\n\n def create(self, role_id, name):\n role = FakeRole(self)\n role.name = name\n role.id = role_id\n return self._cache(role)\n\n def roles_for_user(self, user, tenant):\n role = FakeRole(self)\n role.name = \"admin\"\n return [role, ]\n\n def add_user_role(self, user, role, tenant):\n pass\n\n\nclass FakeMetricManager(FakeManager):\n\n def create(self, **kwargs):\n metric = FakeMetric(self, **kwargs)\n return self._cache(metric)\n\n def get(self, metric_id):\n metric = self.find(metric_id=metric_id)\n return [metric]\n\n\nclass FakeMetricsManager(FakeManager):\n\n def list(self):\n return [\"fake-metric\"]\n\n\nclass FakeQueuesManager(FakeManager):\n def __init__(self):\n super(FakeQueuesManager, self).__init__()\n self.__queues = {}\n\n def create(self, name):\n queue = FakeQueue(self, name)\n self.__queues[queue.name] = queue\n return self._cache(queue)\n\n def list(self):\n return self.__queues.values()\n\n def delete(self, queue):\n super(FakeQueuesManager, self).delete(queue.name)\n del self.__queues[queue.name]\n\n\nclass FakeDbInstanceManager(FakeManager):\n def __init__(self):\n super(FakeDbInstanceManager, self).__init__()\n self.__db_instances = {}\n\n def create(self, name, flavor_id, size):\n instance = FakeDbInstance(self)\n instance.name = name or instance.name\n instance.flavor_id = flavor_id\n instance.size = size\n return self._cache(instance)\n\n def list(self):\n return self.__db_instances.values()\n\n def delete(self, resource):\n if not isinstance(resource, str):\n resource = resource.id\n\n cached = self.get(resource)\n if cached is not None:\n cached.status = \"DELETE_COMPLETE\"\n del self.cache[resource]\n self.resources_order.remove(resource)\n\n\nclass FakeMessagesManager(FakeManager):\n def __init__(self, queue=\"myqueue\"):\n super(FakeMessagesManager, self).__init__()\n self.__queue = queue\n self.__messages = {}\n\n def create(self, **kwargs):\n message = FakeMessage(self, **kwargs)\n self.__messages[message.id] = message\n return self._cache(message)\n\n def list(self):\n return self.__messages.values()\n\n def delete(self, message):\n super(FakeMessagesManager, self).delete(message.id)\n del self.__messages[message.id]\n\n\nclass FakeAvailabilityZonesManager(FakeManager):\n def __init__(self):\n super(FakeAvailabilityZonesManager, self).__init__()\n self.zones = FakeAvailabilityZone()\n\n def list(self):\n return [self.zones]\n\n\nclass FakeWorkbookManager(FakeManager):\n def __init__(self):\n super(FakeWorkbookManager, self).__init__()\n self.workbook = FakeWorkbook()\n\n def list(self):\n return [self.workbook]\n\n\nclass FakeWorkflowManager(FakeManager):\n def __init__(self):\n super(FakeWorkflowManager, self).__init__()\n self.workflow = FakeWorkflow()\n\n def list(self):\n return [self.workflow]\n\n\nclass FakeExecutionManager(FakeManager):\n def __init__(self):\n super(FakeExecutionManager, self).__init__()\n self.execution = FakeExecution()\n\n def list(self):\n return [self.execution]\n\n def create(self):\n return self.execution\n\n\nclass FakeObjectManager(FakeManager):\n\n def get_account(self, **kwargs):\n containers = self.list()\n return (mock.MagicMock(), [{\"name\": con.name} for con in containers])\n\n def get_container(self, name, **kwargs):\n container = self.find(name=name)\n if container is None:\n raise swift_exceptions.ClientException(\"Container GET failed\")\n return (mock.MagicMock(), [{\"name\": obj} for obj in container.items])\n\n def put_container(self, name, **kwargs):\n if self.find(name=name):\n raise swift_exceptions.ClientException(\"Container PUT failed\")\n self._cache(FakeObject(name=name))\n\n def delete_container(self, name, **kwargs):\n container = self.find(name=name)\n if container is None or len(container.items.keys()) > 0:\n raise swift_exceptions.ClientException(\"Container DELETE failed\")\n self.delete(container.uuid)\n\n def get_object(self, container_name, object_name, **kwargs):\n container = self.find(name=container_name)\n if container is None or object_name not in container.items:\n raise swift_exceptions.ClientException(\"Object GET failed\")\n return (mock.MagicMock(), container.items[object_name])\n\n def put_object(self, container_name, object_name, content, **kwargs):\n container = self.find(name=container_name)\n if container is None:\n raise swift_exceptions.ClientException(\"Object PUT failed\")\n container.items[object_name] = content\n return mock.MagicMock()\n\n def delete_object(self, container_name, object_name, **kwargs):\n container = self.find(name=container_name)\n if container is None or object_name not in container.items:\n raise swift_exceptions.ClientException(\"Object DELETE failed\")\n del container.items[object_name]\n\n\nclass FakeServiceCatalog(object):\n def get_credentials(self):\n return {\"image\": [{\"publicURL\": \"http://fake.to\"}],\n \"metering\": [{\"publicURL\": \"http://fake.to\"}],\n \"monitoring\": [{\"publicURL\": \"http://fake.to\"}]}\n\n def url_for(self, **kwargs):\n return \"http://fake.to\"\n\n\nclass FakeGlanceClient(object):\n\n def __init__(self, version=\"1\"):\n self.images = FakeImageManager()\n self.version = version\n\n\nclass FakeMuranoClient(object):\n\n def __init__(self):\n self.packages = FakePackageManager()\n\n\nclass FakeCinderClient(object):\n\n def __init__(self):\n self.volumes = FakeVolumeManager()\n self.volume_types = FakeVolumeTypeManager()\n self.transfers = FakeVolumeTransferManager()\n self.volume_snapshots = FakeVolumeSnapshotManager()\n self.backups = FakeVolumeBackupManager()\n self.quotas = FakeCinderQuotasManager()\n\n\nclass FakeNovaClient(object):\n\n def __init__(self, failed_server_manager=False):\n self.images = FakeImageManager()\n self.servers = FakeServerManager(self.images)\n self.floating_ips = FakeFloatingIPsManager()\n self.floating_ip_pools = FakeFloatingIPPoolsManager()\n self.networks = FakeNetworkManager()\n self.flavors = FakeFlavorManager()\n self.keypairs = FakeKeypairManager()\n self.security_group_rules = FakeSecurityGroupRuleManager()\n self.security_groups = FakeSecurityGroupManager(\n rule_manager=self.security_group_rules)\n self.quotas = FakeNovaQuotasManager()\n self.set_management_url = mock.MagicMock()\n self.availability_zones = FakeAvailabilityZonesManager()\n\n\nclass FakeHeatClient(object):\n\n def __init__(self):\n self.stacks = FakeStackManager()\n\n\nclass FakeDesignateClient(object):\n\n def __init__(self):\n self.domains = FakeDomainManager()\n\n\nclass FakeKeystoneClient(object):\n\n def __init__(self):\n self.tenants = FakeTenantsManager()\n self.users = FakeUsersManager()\n self.roles = FakeRolesManager()\n self.project_id = \"abc123\"\n self.auth_url = \"http://example.com:5000/v2.0/\"\n self.auth_token = \"fake\"\n self.auth_user_id = generate_uuid()\n self.auth_tenant_id = generate_uuid()\n self.service_catalog = FakeServiceCatalog()\n self.services = FakeServicesManager()\n self.region_name = \"RegionOne\"\n self.auth_ref = mock.Mock()\n self.auth_ref.role_names = [\"admin\"]\n self.version = \"v2.0\"\n self.session = mock.MagicMock()\n self.authenticate = mock.MagicMock()\n\n def authenticate(self):\n return True\n\n def list_users(self):\n return self.users.list()\n\n def list_projects(self):\n return self.tenants.list()\n\n def list_services(self):\n return self.services.list()\n\n def list_roles(self):\n return self.roles.list()\n\n def delete_user(self, uuid):\n return self.users.delete(uuid)\n\n\nclass FakeGnocchiClient(object):\n def __init__(self):\n self.metric = FakeMetricManager()\n\n\nclass FakeMonascaClient(object):\n\n def __init__(self):\n self.metrics = FakeMetricsManager()\n\n\nclass FakeNeutronClient(object):\n\n def __init__(self, **kwargs):\n self.__networks = {}\n self.__subnets = {}\n self.__routers = {}\n self.__ports = {}\n self.__pools = {}\n self.__vips = {}\n self.__fips = {}\n self.__healthmonitors = {}\n self.__tenant_id = kwargs.get(\"tenant_id\", generate_uuid())\n\n self.format = \"json\"\n self.version = \"2.0\"\n\n @staticmethod\n def _filter(resource_list, search_opts):\n return [res for res in resource_list\n if all(res[field] == value\n for field, value in search_opts.items())]\n\n def add_interface_router(self, router_id, data):\n subnet_id = data[\"subnet_id\"]\n\n if (router_id not in self.__routers\n or subnet_id not in self.__subnets):\n raise neutron_exceptions.NeutronClientException\n\n subnet = self.__subnets[subnet_id]\n\n port = self.create_port(\n {\"port\": {\"network_id\": subnet[\"network_id\"]}})[\"port\"]\n port[\"device_id\"] = router_id\n port[\"fixed_ips\"].append({\"subnet_id\": subnet_id,\n \"ip_address\": subnet[\"gateway_ip\"]})\n\n return {\"subnet_id\": subnet_id,\n \"tenant_id\": port[\"tenant_id\"],\n \"port_id\": port[\"id\"],\n \"id\": router_id}\n\n def create_network(self, data):\n network = setup_dict(data[\"network\"],\n defaults={\"name\": generate_name(\"net_\"),\n \"admin_state_up\": True})\n network_id = generate_uuid()\n network.update({\"id\": network_id,\n \"status\": \"ACTIVE\",\n \"subnets\": [],\n \"provider:physical_network\": None,\n \"tenant_id\": self.__tenant_id,\n \"provider:network_type\": \"local\",\n \"router:external\": True,\n \"shared\": False,\n \"provider:segmentation_id\": None})\n self.__networks[network_id] = network\n return {\"network\": network}\n\n def create_pool(self, data):\n pool = setup_dict(data[\"pool\"],\n required=[\"lb_method\", \"protocol\", \"subnet_id\"],\n defaults={\"name\": generate_name(\"pool_\"),\n \"admin_state_up\": True})\n if pool[\"subnet_id\"] not in self.__subnets:\n raise neutron_exceptions.NeutronClientException\n pool_id = generate_uuid()\n\n pool.update({\"id\": pool_id,\n \"status\": \"PENDING_CREATE\",\n \"tenant_id\": self.__tenant_id})\n self.__pools[pool_id] = pool\n return {\"pool\": pool}\n\n def create_vip(self, data):\n vip = setup_dict(data[\"vip\"],\n required=[\"protocol_port\", \"protocol\", \"subnet_id\",\n \"pool_id\"],\n defaults={\"name\": generate_name(\"vip_\"),\n \"admin_state_up\": True})\n if (vip[\"subnet_id\"] not in self.__subnets) or (vip[\"pool_id\"] not in\n self.__pools):\n raise neutron_exceptions.NeutronClientException\n vip_id = generate_uuid()\n\n vip.update({\"id\": vip_id,\n \"status\": \"PENDING_CREATE\",\n \"tenant_id\": self.__tenant_id})\n self.__vips[vip_id] = vip\n return {\"vip\": vip}\n\n def create_floatingip(self, data):\n fip = setup_dict(data[\"floatingip\"],\n required=[\"floating_network\"],\n defaults={\"admin_state_up\": True})\n if (fip[\"floating_network\"] not in self.__nets):\n raise neutron_exceptions.NeutronClientException\n fip_id = generate_uuid()\n\n fip.update({\"id\": fip_id,\n \"tenant_id\": self.__tenant_id})\n self.__fips[fip_id] = fip\n return {\"fip\": fip}\n\n def create_health_monitor(self, data):\n healthmonitor = setup_dict(data[\"healthmonitor\"],\n required=[\"type\", \"timeout\", \"delay\",\n \"max_retries\"],\n defaults={\"admin_state_up\": True})\n healthmonitor_id = generate_uuid()\n\n healthmonitor.update({\"id\": healthmonitor_id,\n \"status\": \"PENDING_CREATE\",\n \"tenant_id\": self.__tenant_id})\n self.__healthmonitors[healthmonitor_id] = healthmonitor\n return {\"healthmonitor\": healthmonitor}\n\n def create_port(self, data):\n port = setup_dict(data[\"port\"],\n required=[\"network_id\"],\n defaults={\"name\": generate_name(\"port_\"),\n \"admin_state_up\": True})\n if port[\"network_id\"] not in self.__networks:\n raise neutron_exceptions.NeutronClientException\n\n port_id = generate_uuid()\n port.update({\"id\": port_id,\n \"status\": \"ACTIVE\",\n \"binding:host_id\": \"fakehost\",\n \"extra_dhcp_opts\": [],\n \"binding:vnic_type\": \"normal\",\n \"binding:vif_type\": \"ovs\",\n \"device_owner\": \"\",\n \"mac_address\": generate_mac(),\n \"binding:profile\": {},\n \"binding:vif_details\": {u\"port_filter\": True},\n \"security_groups\": [],\n \"fixed_ips\": [],\n \"device_id\": \"\",\n \"tenant_id\": self.__tenant_id,\n \"allowed_address_pairs\": []})\n self.__ports[port_id] = port\n return {\"port\": port}\n\n def create_router(self, data):\n router = setup_dict(data[\"router\"],\n defaults={\"name\": generate_name(\"router_\"),\n \"external_gateway_info\": None,\n \"admin_state_up\": True})\n router_id = generate_uuid()\n router.update({\"id\": router_id,\n \"status\": \"ACTIVE\",\n \"external_gateway_info\": None,\n \"tenant_id\": self.__tenant_id})\n self.__routers[router_id] = router\n return {\"router\": router}\n\n def create_subnet(self, data):\n subnet = setup_dict(\n data[\"subnet\"],\n required=[\"network_id\", \"cidr\", \"ip_version\"],\n defaults={\"name\": generate_name(\"subnet_\"),\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"]})\n if subnet[\"network_id\"] not in self.__networks:\n raise neutron_exceptions.NeutronClientException\n\n subnet_id = generate_uuid()\n subnet.update({\"id\": subnet_id,\n \"enable_dhcp\": True,\n \"tenant_id\": self.__tenant_id,\n \"ipv6_ra_mode\": None,\n \"allocation_pools\": [],\n \"gateway_ip\": re.sub(\"./.*$\", \"1\", subnet[\"cidr\"]),\n \"ipv6_address_mode\": None,\n \"ip_version\": 4,\n \"host_routes\": []})\n self.__subnets[subnet_id] = subnet\n return {\"subnet\": subnet}\n\n def update_resource(self, resource_id, resource_dict, data):\n if resource_id not in resource_dict:\n raise neutron_exceptions.NeutronClientException\n self.resource_list[resource_id].update(data)\n\n def update_network(self, network_id, data):\n self.update_resource(network_id, self.__networks, data)\n\n def update_pool(self, pool_id, data):\n self.update_resource(pool_id, self.__pools, data)\n\n def update_vip(self, vip_id, data):\n self.update_resource(vip_id, self.__vips, data)\n\n def update_health_monitor(self, healthmonitor_id, data):\n self.update_resource(healthmonitor_id, self.__healthmonitors, data)\n\n def update_subnet(self, subnet_id, data):\n self.update_resource(subnet_id, self.__subnets, data)\n\n def update_port(self, port_id, data):\n self.update_resource(port_id, self.__ports, data)\n\n def update_router(self, router_id, data):\n self.update_resource(router_id, self.__routers, data)\n\n def delete_network(self, network_id):\n if network_id not in self.__networks:\n raise neutron_exceptions.NeutronClientException\n for port in self.__ports.values():\n if port[\"network_id\"] == network_id:\n # Network is in use by port\n raise neutron_exceptions.NeutronClientException\n del self.__networks[network_id]\n return \"\"\n\n def delete_pool(self, pool_id):\n if pool_id not in self.__pools:\n raise neutron_exceptions.NeutronClientException\n del self.__pools[pool_id]\n return \"\"\n\n def delete_vip(self, vip_id):\n if vip_id not in self.__vips:\n raise neutron_exceptions.NeutronClientException\n del self.__vips[vip_id]\n\n def delete_health_monitor(self, healthmonitor_id):\n if healthmonitor_id not in self.__healthmonitors:\n raise neutron_exceptions.NeutronClientException\n del self.__healthmonitors[healthmonitor_id]\n return \"\"\n\n def delete_floatingip(self, fip_id):\n if fip_id not in self.__fips:\n raise neutron_exceptions.NeutronClientException\n del self.__fips[fip_id]\n return \"\"\n\n def delete_port(self, port_id):\n if port_id not in self.__ports:\n raise neutron_exceptions.PortNotFoundClient\n if self.__ports[port_id][\"device_owner\"]:\n # Port is owned by some device\n raise neutron_exceptions.NeutronClientException\n del self.__ports[port_id]\n return \"\"\n\n def delete_router(self, router_id):\n if router_id not in self.__routers:\n raise neutron_exceptions.NeutronClientException\n for port in self.__ports.values():\n if port[\"device_id\"] == router_id:\n # Router has active port\n raise neutron_exceptions.NeutronClientException\n del self.__routers[router_id]\n return \"\"\n\n def delete_subnet(self, subnet_id):\n if subnet_id not in self.__subnets:\n raise neutron_exceptions.NeutronClientException\n for port in self.__ports.values():\n for fip in port[\"fixed_ips\"]:\n if fip[\"subnet_id\"] == subnet_id:\n # Subnet has IP allocation from some port\n raise neutron_exceptions.NeutronClientException\n del self.__subnets[subnet_id]\n return \"\"\n\n def list_networks(self, **search_opts):\n nets = self._filter(self.__networks.values(), search_opts)\n return {\"networks\": nets}\n\n def list_pools(self, **search_opts):\n pools = self._filter(self.__pools.values(), search_opts)\n return {\"pools\": pools}\n\n def list_vips(self, **search_opts):\n vips = self._filter(self.__vips.values(), search_opts)\n return {\"vips\": vips}\n\n def list_health_monitors(self, **search_opts):\n healthmonitors = self._filter(\n self.__healthmonitors.values(), search_opts)\n return {\"healthmonitors\": healthmonitors}\n\n def list_ports(self, **search_opts):\n ports = self._filter(self.__ports.values(), search_opts)\n return {\"ports\": ports}\n\n def list_routers(self, **search_opts):\n routers = self._filter(self.__routers.values(), search_opts)\n return {\"routers\": routers}\n\n def list_subnets(self, **search_opts):\n subnets = self._filter(self.__subnets.values(), search_opts)\n return {\"subnets\": subnets}\n\n def list_floatingips(self, **search_opts):\n fips = self._filter(self.__fips.values(), search_opts)\n return {\"floatingips\": fips}\n\n def remove_interface_router(self, router_id, data):\n subnet_id = data[\"subnet_id\"]\n\n if (router_id not in self.__routers\n or subnet_id not in self.__subnets):\n raise neutron_exceptions.NeutronClientException\n\n subnet = self.__subnets[subnet_id]\n\n for port_id, port in self.__ports.items():\n if port[\"device_id\"] == router_id:\n for fip in port[\"fixed_ips\"]:\n if fip[\"subnet_id\"] == subnet_id:\n del self.__ports[port_id]\n return {\"subnet_id\": subnet_id,\n \"tenant_id\": subnet[\"tenant_id\"],\n \"port_id\": port_id,\n \"id\": router_id}\n\n raise neutron_exceptions.NeutronClientException\n\n def associate_health_monitor(self, pool_id, healthmonitor_id):\n if pool_id not in self.__pools:\n raise neutron_exceptions.NeutronClientException\n if healthmonitor_id not in self.__healthmonitors:\n raise neutron_exceptions.NeutronClientException\n self.__pools[pool_id][\"pool\"][\"healthmonitors\"] = healthmonitor_id\n return {\"pool\": self.__pools[pool_id]}\n\n def disassociate_health_monitor(self, pool_id, healthmonitor_id):\n if pool_id not in self.__pools:\n raise neutron_exceptions.NeutronClientException\n if healthmonitor_id not in self.__healthmonitors:\n raise neutron_exceptions.NeutronClientException\n del self.__pools[pool_id][\"pool\"][\"healthmonitors\"][healthmonitor_id]\n return \"\"\n\n\nclass FakeOctaviaClient(object):\n\n def __init__(self):\n pass\n\n\nclass FakeIronicClient(object):\n\n def __init__(self):\n # TODO(romcheg):Fake Manager subclasses to manage BM nodes.\n pass\n\n\nclass FakeSaharaClient(object):\n\n def __init__(self):\n self.job_executions = mock.MagicMock()\n self.jobs = mock.MagicMock()\n self.job_binary_internals = mock.MagicMock()\n self.job_binaries = mock.MagicMock()\n self.data_sources = mock.MagicMock()\n\n self.clusters = mock.MagicMock()\n self.cluster_templates = mock.MagicMock()\n self.node_group_templates = mock.MagicMock()\n\n self.setup_list_methods()\n\n def setup_list_methods(self):\n mock_with_id = mock.MagicMock()\n mock_with_id.id = 42\n\n # First call of list returns a list with one object, the next should\n # empty after delete.\n self.job_executions.list.side_effect = [[mock_with_id], []]\n self.jobs.list.side_effect = [[mock_with_id], []]\n self.job_binary_internals.list.side_effect = [[mock_with_id], []]\n self.job_binaries.list.side_effect = [[mock_with_id], []]\n self.data_sources.list.side_effect = [[mock_with_id], []]\n\n self.clusters.list.side_effect = [[mock_with_id], []]\n self.cluster_templates.list.side_effect = [[mock_with_id], []]\n self.node_group_templates.list.side_effect = [[mock_with_id], []]\n\n\nclass FakeZaqarClient(object):\n\n def __init__(self):\n self.queues = FakeQueuesManager()\n\n def queue(self, name, **kwargs):\n return self.queues.create(name, **kwargs)\n\n\nclass FakeTroveClient(object):\n\n def __init__(self):\n self.instances = FakeDbInstanceManager()\n\n\nclass FakeMistralClient(object):\n\n def __init__(self):\n self.workbook = FakeWorkbookManager()\n self.workflow = FakeWorkflowManager()\n self.execution = FakeExecutionManager()\n\n\nclass FakeSwiftClient(FakeObjectManager):\n pass\n\n\nclass FakeEC2Client(object):\n\n def __init__(self):\n pass\n\n\nclass FakeSenlinClient(object):\n\n def __init__(self):\n # TODO(Yanyan Hu):Fake interfaces of senlinclient.\n pass\n\n\nclass FakeMagnumClient(object):\n\n def __init__(self):\n self.cluster_templates = FakeClusterTemplateManager()\n\n\nclass FakeWatcherClient(object):\n\n def __init__(self):\n self.strategy = FakeStrategyManager()\n self.goal = FakeGoalManager()\n\n\nclass FakeBarbicanClient(object):\n\n def __init__(self):\n pass\n\n\nclass FakeClients(object):\n\n def __init__(self, credential_=None):\n self._nova = None\n self._glance = None\n self._keystone = None\n self._cinder = None\n self._neutron = None\n self._octavia = None\n self._sahara = None\n self._heat = None\n self._designate = None\n self._zaqar = None\n self._trove = None\n self._mistral = None\n self._swift = None\n self._murano = None\n self._monasca = None\n self._ec2 = None\n self._senlin = None\n self._watcher = None\n self._barbican = None\n self._credential = credential_ or FakeCredential(\n auth_url=\"http://fake.example.org:5000/v2.0/\",\n username=\"fake_username\",\n password=\"fake_password\",\n tenant_name=\"fake_tenant_name\")\n\n def keystone(self, version=None):\n if not self._keystone:\n self._keystone = FakeKeystoneClient()\n return self._keystone\n\n def verified_keystone(self):\n return self.keystone()\n\n def nova(self):\n if not self._nova:\n self._nova = FakeNovaClient()\n return self._nova\n\n def glance(self, version=\"1\"):\n if not self._glance:\n self._glance = FakeGlanceClient(version)\n return self._glance\n\n def cinder(self):\n if not self._cinder:\n self._cinder = FakeCinderClient()\n return self._cinder\n\n def neutron(self):\n if not self._neutron:\n self._neutron = FakeNeutronClient()\n return self._neutron\n\n def octavia(self):\n if not self._octavia:\n self._octavia = FakeOctaviaClient()\n return self._octavia\n\n def sahara(self):\n if not self._sahara:\n self._sahara = FakeSaharaClient()\n return self._sahara\n\n def heat(self):\n if not self._heat:\n self._heat = FakeHeatClient()\n return self._heat\n\n def designate(self):\n if not self._designate:\n self._designate = FakeDesignateClient()\n return self._designate\n\n def monasca(self):\n if not self._monasca:\n self._monasca = FakeMonascaClient()\n return self._monasca\n\n def zaqar(self):\n if not self._zaqar:\n self._zaqar = FakeZaqarClient()\n return self._zaqar\n\n def trove(self):\n if not self._trove:\n self._trove = FakeTroveClient()\n return self._trove\n\n def mistral(self):\n if not self._mistral:\n self._mistral = FakeMistralClient()\n return self._mistral\n\n def swift(self):\n if not self._swift:\n self._swift = FakeSwiftClient()\n return self._swift\n\n def murano(self):\n if not self._murano:\n self._murano = FakeMuranoClient()\n return self._murano\n\n def ec2(self):\n if not self._ec2:\n self._ec2 = FakeEC2Client()\n return self._ec2\n\n def senlin(self):\n if not self._senlin:\n self._senlin = FakeSenlinClient()\n return self._senlin\n\n def watcher(self):\n if not self._watcher:\n self._watcher = FakeWatcherClient()\n return self._watcher\n\n def barbican(self):\n if not self._barbican:\n self._barbican = FakeBarbicanClient()\n return self._barbican\n\n\nclass FakeRunner(object):\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"type\": {\n \"type\": \"string\",\n \"enum\": [\"fake\"]\n },\n\n \"a\": {\n \"type\": \"string\"\n },\n\n \"b\": {\n \"type\": \"number\"\n }\n },\n \"required\": [\"type\", \"a\"]\n }\n\n\nclass FakeScenario(scenario.Scenario):\n\n def idle_time(self):\n return 0\n\n def do_it(self, **kwargs):\n pass\n\n def with_output(self, **kwargs):\n return {\"data\": {\"a\": 1}, \"error\": None}\n\n def with_add_output(self):\n self.add_output(additive={\"title\": \"Additive\",\n \"description\": \"Additive description\",\n \"data\": [[\"a\", 1]],\n \"chart_plugin\": \"FooPlugin\"},\n complete={\"title\": \"Complete\",\n \"description\": \"Complete description\",\n \"data\": [[\"a\", [[1, 2], [2, 3]]]],\n \"chart_plugin\": \"BarPlugin\"})\n\n def too_long(self, **kwargs):\n pass\n\n def something_went_wrong(self, **kwargs):\n raise Exception(\"Something went wrong\")\n\n def raise_timeout(self, **kwargs):\n raise multiprocessing.TimeoutError()\n\n\[email protected](name=\"classbased.fooscenario\")\nclass FakeClassBasedScenario(FakeScenario):\n \"\"\"Fake class-based scenario.\"\"\"\n\n def run(self, *args, **kwargs):\n pass\n\n\nclass FakeTimer(rally_utils.Timer):\n\n def duration(self):\n return 10\n\n def timestamp(self):\n return 0\n\n def finish_timestamp(self):\n return 3\n\n\[email protected](name=\"fake\", order=1)\nclass FakeContext(context.Context):\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"test\": {\n \"type\": \"integer\"\n },\n },\n \"additionalProperties\": False\n }\n\n def __init__(self, context_obj=None):\n context_obj = context_obj or {}\n context_obj.setdefault(\"config\", {})\n context_obj[\"config\"].setdefault(\"fake\", None)\n context_obj.setdefault(\"task\", mock.MagicMock())\n super(FakeContext, self).__init__(context_obj)\n\n def setup(self):\n pass\n\n def cleanup(self):\n pass\n\n\[email protected](name=\"fake_hidden_context\", order=1, hidden=True)\nclass FakeHiddenContext(FakeContext):\n pass\n\n\[email protected](name=\"fake_user_context\", order=1)\nclass FakeUserContext(FakeContext):\n\n admin = {\n \"id\": \"adminuuid\",\n \"credential\": FakeCredential(\n auth_url=\"aurl\",\n username=\"aname\",\n password=\"apwd\",\n tenant_name=\"atenant\")\n }\n user = {\n \"id\": \"uuid\",\n \"credential\": FakeCredential(\n auth_url=\"url\",\n username=\"name\",\n password=\"pwd\",\n tenant_name=\"tenant\"),\n \"tenant_id\": \"uuid\"\n }\n tenants = {\"uuid\": {\"name\": \"tenant\"}}\n\n def __init__(self, ctx):\n super(FakeUserContext, self).__init__(ctx)\n self.context.setdefault(\"admin\", FakeUserContext.admin)\n self.context.setdefault(\"users\", [FakeUserContext.user])\n self.context.setdefault(\"tenants\", FakeUserContext.tenants)\n self.context.setdefault(\n \"scenario_name\", \"NovaServers.boot_server_from_volume_and_delete\")\n\n\nclass FakeDeployment(dict):\n\n def __init__(self, **kwargs):\n platform = kwargs.pop(\"platform\", \"openstack\")\n kwargs[\"credentials\"] = {\n platform: [{\"admin\": kwargs.pop(\"admin\", None),\n \"users\": kwargs.pop(\"users\", [])}],\n \"default\": [{\"admin\": None, \"users\": []}]}\n dict.__init__(self, **kwargs)\n self.update_status = mock.Mock()\n self.env_obj = mock.Mock()\n\n def get_platforms(self):\n return [platform for platform in self[\"credentials\"]]\n\n def get_credentials_for(self, platform):\n return self[\"credentials\"][platform][0]\n\n def verify_connections(self):\n pass\n\n def get_validation_context(self):\n return {}\n\n\nclass FakeEnvironment(object):\n def __init__(self, env_uuid, data):\n self.uuid = env_uuid\n self.data = data\n\n @property\n def cached_data(self):\n return self.data\n\n\nclass FakeTask(dict, object):\n\n def __init__(self, task=None, temporary=False, **kwargs):\n self.is_temporary = temporary\n self.update_status = mock.Mock()\n self.set_failed = mock.Mock()\n self.set_validation_failed = mock.Mock()\n task = task or {}\n for k, v in itertools.chain(task.items(), kwargs.items()):\n self[k] = v\n self.task = self\n\n def to_dict(self):\n return self\n\n\nclass FakeAPI(object):\n\n def __init__(self):\n self._deployment = mock.create_autospec(api._Deployment)\n self._task = mock.create_autospec(api._Task)\n self._verifier = mock.create_autospec(api._Verifier)\n self._verification = mock.create_autospec(api._Verification)\n\n @property\n def deployment(self):\n return self._deployment\n\n @property\n def task(self):\n return self._task\n\n @property\n def verifier(self):\n return self._verifier\n\n @property\n def verification(self):\n return self._verification\n" }, { "alpha_fraction": 0.6527777910232544, "alphanum_fraction": 0.6564815044403076, "avg_line_length": 33.28571319580078, "blob_id": "9d5a3cae128ffaa29f53a7293a178e4e2111eeef", "content_id": "d0a49a87c04a859faa50fa53e63fbecdc16476af", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2160, "license_type": "permissive", "max_line_length": 79, "num_lines": 63, "path": "/rally_openstack/task/scenarios/zaqar/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright (c) 2014 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import atomic\n\nfrom rally_openstack.task import scenario\n\n\nclass ZaqarScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Zaqar scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"zaqar.create_queue\")\n def _queue_create(self, **kwargs):\n \"\"\"Create a Zaqar queue with random name.\n\n :param kwargs: other optional parameters to create queues like\n \"metadata\"\n :returns: Zaqar queue instance\n \"\"\"\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)\n\n @atomic.action_timer(\"zaqar.delete_queue\")\n def _queue_delete(self, queue):\n \"\"\"Removes a Zaqar queue.\n\n :param queue: queue to remove\n \"\"\"\n\n queue.delete()\n\n def _messages_post(self, queue, messages, min_msg_count, max_msg_count):\n \"\"\"Post a list of messages to a given Zaqar queue.\n\n :param queue: post the messages to queue\n :param messages: messages to post\n :param min_msg_count: minimum number of messages\n :param max_msg_count: maximum number of messages\n \"\"\"\n with atomic.ActionTimer(self, \"zaqar.post_between_%s_and_%s_messages\" %\n (min_msg_count, max_msg_count)):\n queue.post(messages)\n\n @atomic.action_timer(\"zaqar.list_messages\")\n def _messages_list(self, queue):\n \"\"\"Gets messages from a given Zaqar queue.\n\n :param queue: get messages from queue\n :returns: messages iterator\n \"\"\"\n\n return queue.messages()\n" }, { "alpha_fraction": 0.6845637559890747, "alphanum_fraction": 0.6872483491897583, "avg_line_length": 39.82191848754883, "blob_id": "846d86e8e81b00ce20258a959b187e32e21323d7", "content_id": "8888433fbec9a422a1e65424f6ad7733e43ae268", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2980, "license_type": "permissive", "max_line_length": 78, "num_lines": 73, "path": "/rally_openstack/task/scenarios/gnocchi/metric.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils\n\n\"\"\"Scenarios for Gnocchi metric.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"GnocchiMetric.list_metric\")\nclass ListMetric(gnocchiutils.GnocchiBase):\n\n def run(self, limit=None):\n \"\"\"List metrics.\n\n :param limit: Maximum number of metrics to list\n \"\"\"\n self.gnocchi.list_metric(limit=limit)\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"gnocchi.metric\"]},\n name=\"GnocchiMetric.create_metric\")\nclass CreateMetric(gnocchiutils.GnocchiBase):\n\n def run(self, archive_policy_name=\"low\", resource_id=None, unit=None):\n \"\"\"Create metric.\n\n :param archive_policy_name: Archive policy name\n :param resource_id: The resource ID to attach the metric to\n :param unit: The unit of the metric\n \"\"\"\n name = self.generate_random_name()\n self.gnocchi.create_metric(name,\n archive_policy_name=archive_policy_name,\n resource_id=resource_id, unit=unit)\n\n\[email protected](\"required_services\", services=[consts.Service.GNOCCHI])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"gnocchi.metric\"]},\n name=\"GnocchiMetric.create_delete_metric\")\nclass CreateDeleteMetric(gnocchiutils.GnocchiBase):\n\n def run(self, archive_policy_name=\"low\", resource_id=None, unit=None):\n \"\"\"Create metric and then delete it.\n\n :param archive_policy_name: Archive policy name\n :param resource_id: The resource ID to attach the metric to\n :param unit: The unit of the metric\n \"\"\"\n name = self.generate_random_name()\n metric = self.gnocchi.create_metric(\n name, archive_policy_name=archive_policy_name,\n resource_id=resource_id, unit=unit)\n self.gnocchi.delete_metric(metric[\"id\"])\n" }, { "alpha_fraction": 0.6217644810676575, "alphanum_fraction": 0.6254023909568787, "avg_line_length": 42.81766128540039, "blob_id": "11bc6a24a98670b9c5212661ce1ceca675d4cf8c", "content_id": "0484079d409ca7425739a6685463b048c8b15763", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38209, "license_type": "permissive", "max_line_length": 79, "num_lines": 872, "path": "/rally_openstack/task/scenarios/neutron/network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\n\"\"\"Scenarios for Neutron.\"\"\"\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_list_networks\",\n platform=\"openstack\")\nclass CreateAndListNetworks(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None):\n \"\"\"Create a network and then list all networks.\n\n Measure the \"neutron net-list\" command performance.\n\n If you have only 1 user in your context, you will\n add 1 network on every iteration. So you will have more\n and more networks and will be able to measure the\n performance of the \"neutron net-list\" command depending on\n the number of networks owned by users.\n\n :param network_create_args: dict, POST /v2.0/networks request options\n \"\"\"\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_show_network\",\n platform=\"openstack\")\nclass CreateAndShowNetwork(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None):\n \"\"\"Create a network and show network details.\n\n Measure the \"neutron net-show\" command performance.\n\n :param network_create_args: dict, POST /v2.0/networks request options\n \"\"\"\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_update_args\")\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_update_networks\",\n platform=\"openstack\")\nclass CreateAndUpdateNetworks(utils.NeutronBaseScenario):\n\n def run(self, network_update_args, network_create_args=None):\n \"\"\"Create and update a network.\n\n Measure the \"neutron net-create and net-update\" command performance.\n\n :param network_update_args: dict, PUT /v2.0/networks update request\n :param network_create_args: dict, POST /v2.0/networks request options\n \"\"\"\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_delete_networks\",\n platform=\"openstack\")\nclass CreateAndDeleteNetworks(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None):\n \"\"\"Create and delete a network.\n\n Measure the \"neutron net-create\" and \"net-delete\" command performance.\n\n :param network_create_args: dict, POST /v2.0/networks request options\n \"\"\"\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.delete_network(network[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_list_subnets\",\n platform=\"openstack\")\nclass CreateAndListSubnets(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n \"\"\"Create and a given number of subnets and list all subnets.\n\n The scenario creates a network, a given number of subnets and then\n lists subnets.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n \"\"\"\n network = self.neutron.create_network(**(network_create_args or {}))\n for _ in range(subnets_per_network):\n self.neutron.create_subnet(network[\"id\"],\n start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n self.neutron.list_subnets()\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_update_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_update_subnets\",\n platform=\"openstack\")\nclass CreateAndUpdateSubnets(utils.NeutronBaseScenario):\n\n def run(self, subnet_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n \"\"\"Create and update a subnet.\n\n The scenario creates a network, a given number of subnets\n and then updates the subnet. This scenario measures the\n \"neutron subnet-update\" command performance.\n\n :param subnet_update_args: dict, PUT /v2.0/subnets update options\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n \"\"\"\n network = self.neutron.create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.update_subnet(subnet[\"id\"], **subnet_update_args)\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_show_subnets\",\n platform=\"openstack\")\nclass CreateAndShowSubnets(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1):\n \"\"\"Create and show a subnet details.\n\n The scenario creates a network, a given number of subnets\n and show the subnet details. This scenario measures the\n \"neutron subnet-show\" command performance.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.get_subnet(subnet[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_delete_subnets\",\n platform=\"openstack\")\nclass CreateAndDeleteSubnets(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1):\n \"\"\"Create and delete a given number of subnets.\n\n The scenario creates a network, a given number of subnets and then\n deletes subnets.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n subnets = []\n for _ in range(subnets_per_network):\n subnets.append(\n self.neutron.create_subnet(\n network[\"id\"], start_cidr=subnet_cidr_start,\n **(subnet_create_args or {}))\n )\n for subnet in subnets:\n self.neutron.delete_subnet(subnet[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_list_routers\",\n platform=\"openstack\")\nclass CreateAndListRouters(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n \"\"\"Create and a given number of routers and list all routers.\n\n Create a network, a given number of subnets and routers\n and then list all routers.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n :param router_create_args: dict, POST /v2.0/routers request options\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n self.neutron.list_routers()\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_show_routers\",\n platform=\"openstack\")\nclass CreateAndShowRouters(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n \"\"\"Create and show a given number of routers.\n\n Create a network, a given number of subnets and routers\n and then show all routers.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for each network\n :param router_create_args: dict, POST /v2.0/routers request options\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.get_router(router[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_update_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_update_routers\",\n platform=\"openstack\")\nclass CreateAndUpdateRouters(utils.NeutronBaseScenario):\n\n def run(self, router_update_args, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n subnets_per_network=1, router_create_args=None):\n \"\"\"Create and update a given number of routers.\n\n Create a network, a given number of subnets and routers\n and then updating all routers.\n\n :param router_update_args: dict, PUT /v2.0/routers update options\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n :param router_create_args: dict, POST /v2.0/routers request options\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for router in net_topo[\"routers\"]:\n self.neutron.update_router(router[\"id\"], **router_update_args)\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"subnet_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_create_args\")\[email protected](\"number\", param_name=\"subnets_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_delete_routers\",\n platform=\"openstack\")\nclass CreateAndDeleteRouters(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None, subnet_create_args=None,\n subnet_cidr_start=None, subnets_per_network=1,\n router_create_args=None):\n \"\"\"Create and delete a given number of routers.\n\n Create a network, a given number of subnets and routers\n and then delete all routers.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param subnets_per_network: int, number of subnets for one network\n :param router_create_args: dict, POST /v2.0/routers request options\n \"\"\"\n subnet_create_args = dict(subnet_create_args or {})\n subnet_create_args[\"start_cidr\"] = subnet_cidr_start\n\n net_topo = self.neutron.create_network_topology(\n network_create_args=(network_create_args or {}),\n router_create_args=(router_create_args or {}),\n router_per_subnet=True,\n subnet_create_args=subnet_create_args,\n subnets_count=subnets_per_network\n )\n\n for e in range(subnets_per_network):\n router = net_topo[\"routers\"][e]\n subnet = net_topo[\"subnets\"][e]\n self.neutron.remove_interface_from_router(subnet_id=subnet[\"id\"],\n router_id=router[\"id\"])\n self.neutron.delete_router(router[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"router_create_args\")\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.set_and_clear_router_gateway\",\n platform=\"openstack\")\nclass SetAndClearRouterGateway(utils.NeutronBaseScenario):\n\n def run(self, enable_snat=True, network_create_args=None,\n router_create_args=None):\n \"\"\"Set and Remove the external network gateway from a router.\n\n create an external network and a router, set external network\n gateway for the router, remove the external network gateway from\n the router.\n\n :param enable_snat: True if enable snat\n :param network_create_args: dict, POST /v2.0/networks request\n options\n :param router_create_args: dict, POST /v2.0/routers request options\n \"\"\"\n network_create_args = network_create_args or {}\n router_create_args = router_create_args or {}\n\n ext_net = self.neutron.create_network(**network_create_args)\n router = self.neutron.create_router(**router_create_args)\n self.neutron.add_gateway_to_router(router_id=router[\"id\"],\n network_id=ext_net[\"id\"],\n enable_snat=enable_snat)\n self.neutron.remove_gateway_from_router(router[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"port_create_args\")\[email protected](\"number\", param_name=\"ports_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_list_ports\",\n platform=\"openstack\")\nclass CreateAndListPorts(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n \"\"\"Create and a given number of ports and list all ports.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param port_create_args: dict, POST /v2.0/ports request options\n :param ports_per_network: int, number of ports for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n self.neutron.create_port(network[\"id\"], **(port_create_args or {}))\n\n self.neutron.list_ports()\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"port_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"port_update_args\")\[email protected](\"number\", param_name=\"ports_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_update_ports\",\n platform=\"openstack\")\nclass CreateAndUpdatePorts(utils.NeutronBaseScenario):\n\n def run(self, port_update_args, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n \"\"\"Create and update a given number of ports.\n\n Measure the \"neutron port-create\" and \"neutron port-update\" commands\n performance.\n\n :param port_update_args: dict, PUT /v2.0/ports update request options\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param port_create_args: dict, POST /v2.0/ports request options\n :param ports_per_network: int, number of ports for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n self.neutron.update_port(port[\"id\"], **port_update_args)\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"port_create_args\")\[email protected](\"number\", param_name=\"ports_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_show_ports\",\n platform=\"openstack\")\nclass CreateAndShowPorts(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n \"\"\"Create a given number of ports and show created ports in trun.\n\n Measure the \"neutron port-create\" and \"neutron port-show\" commands\n performance.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options.\n :param port_create_args: dict, POST /v2.0/ports request options\n :param ports_per_network: int, number of ports for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n\n self.neutron.get_port(port[\"id\"])\n\n\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"network_create_args\")\[email protected](\"restricted_parameters\",\n param_names=\"name\",\n subdict=\"port_create_args\")\[email protected](\"number\", param_name=\"ports_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_delete_ports\",\n platform=\"openstack\")\nclass CreateAndDeletePorts(utils.NeutronBaseScenario):\n\n def run(self, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n \"\"\"Create and delete a port.\n\n Measure the \"neutron port-create\" and \"neutron port-delete\"\n commands performance.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param port_create_args: dict, POST /v2.0/ports request options\n :param ports_per_network: int, number of ports for one network\n \"\"\"\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n\n self.neutron.delete_port(port[\"id\"])\n\n\[email protected](\"number\", param_name=\"ports_per_network\", minval=1,\n integer_only=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_contexts\", contexts=[\"network\", \"networking_agents\"])\[email protected](\"required_platform\", platform=\"openstack\",\n users=True, admin=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"],\n \"networking_agents@openstack\": {},\n \"network@openstack\": {}},\n name=\"NeutronNetworks.create_and_bind_ports\",\n platform=\"openstack\")\nclass CreateAndBindPorts(utils.NeutronBaseScenario):\n\n def run(self, ports_per_network=1):\n \"\"\"Bind a given number of ports.\n\n Measure the performance of port binding and all of its pre-requisites:\n * openstack network create\n * openstack subnet create --ip-version 4\n * openstack subnet create --ip-version 6\n * openstack port create\n * openstack port update (binding)\n\n :param ports_per_network: int, number of ports for one network\n \"\"\"\n\n # NOTE(bence romsics): Find a host where we can expect to bind\n # successfully. Look at agent types used in the gate.\n host_to_bind = None\n for agent in self.context[\"networking_agents\"]:\n if (agent[\"admin_state_up\"]\n and agent[\"alive\"]\n and agent[\"agent_type\"] in\n cfg.CONF.openstack.neutron_bind_l2_agent_types):\n host_to_bind = agent[\"host\"]\n if host_to_bind is None:\n raise Exception(\n \"No live agent of type(s) to bind was found: %s\" %\n \", \".join(cfg.CONF.openstack.neutron_bind_l2_agent_types))\n\n tenant_id = self.context[\"tenant\"][\"id\"]\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n self.neutron.create_subnet(network_id=network[\"id\"], ip_version=4)\n self.neutron.create_subnet(network_id=network[\"id\"], ip_version=6)\n\n for i in range(ports_per_network):\n port = self.neutron.create_port(network_id=network[\"id\"])\n # port bind needs admin role\n self.admin_neutron.update_port(\n port_id=port[\"id\"],\n device_owner=\"compute:nova\",\n device_id=\"ba805478-85ff-11e9-a2e4-2b8dea218fc8\",\n **{\"binding:host_id\": host_to_bind},\n )\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_list_floating_ips\",\n platform=\"openstack\")\nclass CreateAndListFloatingIps(utils.NeutronBaseScenario):\n\n def run(self, floating_network=None, floating_ip_args=None):\n \"\"\"Create and list floating IPs.\n\n Measure the \"neutron floating-ip-create\" and \"neutron floating-ip-list\"\n commands performance.\n\n :param floating_network: str, external network for floating IP creation\n :param floating_ip_args: dict, POST /floatingips request options\n \"\"\"\n floating_ip_args = floating_ip_args or {}\n self.neutron.create_floatingip(floating_network=floating_network,\n **floating_ip_args)\n self.neutron.list_floatingips()\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.create_and_delete_floating_ips\",\n platform=\"openstack\")\nclass CreateAndDeleteFloatingIps(utils.NeutronBaseScenario):\n\n def run(self, floating_network=None, floating_ip_args=None):\n \"\"\"Create and delete floating IPs.\n\n Measure the \"neutron floating-ip-create\" and \"neutron\n floating-ip-delete\" commands performance.\n\n :param floating_network: str, external network for floating IP creation\n :param floating_ip_args: dict, POST /floatingips request options\n \"\"\"\n floating_ip_args = floating_ip_args or {}\n floatingip = self.neutron.create_floatingip(\n floating_network=floating_network, **floating_ip_args)\n self.neutron.delete_floatingip(floatingip[\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"external_network_exists\", param_name=\"floating_network\")\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronNetworks.associate_and_dissociate_floating_ips\",\n platform=\"openstack\")\nclass AssociateAndDissociateFloatingIps(utils.NeutronBaseScenario):\n\n def run(self, floating_network=None):\n \"\"\"Associate and dissociate floating IPs.\n\n Measure the \"openstack floating ip set\" and\n \"openstack floating ip unset\" commands performance.\n Because of the prerequisites for \"floating ip set/unset\" we also\n measure the performance of the following commands:\n\n * \"openstack network create\"\n * \"openstack subnet create\"\n * \"openstack port create\"\n * \"openstack router create\"\n * \"openstack router set --external-gateway\"\n * \"openstack router add subnet\"\n\n :param floating_network: str, external network for floating IP creation\n \"\"\"\n floating_network = self.neutron.find_network(floating_network,\n external=True)\n floating_ip = self.neutron.create_floatingip(\n floating_network=floating_network)\n\n private_network = self.neutron.create_network()\n subnet = self.neutron.create_subnet(network_id=private_network[\"id\"])\n port = self.neutron.create_port(network_id=private_network[\"id\"])\n\n router = self.neutron.create_router()\n self.neutron.add_gateway_to_router(\n router[\"id\"], network_id=floating_network[\"id\"])\n self.neutron.add_interface_to_router(\n subnet_id=subnet[\"id\"], router_id=router[\"id\"])\n\n self.neutron.associate_floatingip(\n floatingip_id=floating_ip[\"id\"], port_id=port[\"id\"])\n self.neutron.dissociate_floatingip(floatingip_id=floating_ip[\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"NeutronNetworks.list_agents\", platform=\"openstack\")\nclass ListAgents(utils.NeutronBaseScenario):\n\n def run(self, agent_args=None):\n \"\"\"List all neutron agents.\n\n This simple scenario tests the \"neutron agent-list\" command by\n listing all the neutron agents.\n\n :param agent_args: dict, POST /v2.0/agents request options\n \"\"\"\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSubnets.delete_subnets\",\n platform=\"openstack\")\nclass DeleteSubnets(utils.NeutronBaseScenario):\n\n def run(self):\n \"\"\"Delete a subnet that belongs to each precreated network.\n\n Each runner instance picks a specific subnet from the list based on its\n positional location in the list of users. By doing so, we can start\n multiple threads with sufficient number of users created and spread\n delete requests across all of them, so that they hit different subnets\n concurrently.\n\n Concurrent execution of this scenario should help reveal any race\n conditions and other concurrency issues in Neutron IP allocation layer,\n among other things.\n \"\"\"\n tenant_id = self.context[\"tenant\"][\"id\"]\n users = self.context[\"tenants\"][tenant_id][\"users\"]\n number = users.index(self.context[\"user\"])\n for network in self.context[\"tenants\"][tenant_id][\"networks\"]:\n # delete one of subnets based on the user sequential number\n subnet_id = network[\"subnets\"][number]\n self.neutron.delete_subnet(subnet_id)\n" }, { "alpha_fraction": 0.5850905179977417, "alphanum_fraction": 0.5894568562507629, "avg_line_length": 47.153846740722656, "blob_id": "8e362c8bc1affd64b5915877b4400b4a2230398b", "content_id": "0f8f5ac15db2892924ae211cc24f900355e09ae4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9390, "license_type": "permissive", "max_line_length": 78, "num_lines": 195, "path": "/rally_openstack/task/scenarios/neutron/trunk.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\nfrom rally_openstack.task.scenarios.nova import utils as nova_utils\n\n\nCONF = cfg.CONF\n\n\"\"\"Scenarios for Neutron Trunk.\"\"\"\n\n\[email protected](\"number\", param_name=\"subport_count\", minval=1,\n integer_only=True)\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronTrunks.create_and_list_trunks\")\nclass CreateAndListTrunks(neutron_utils.NeutronScenario):\n\n def run(self, network_create_args=None, subport_count=10):\n \"\"\"Create a given number of trunks with subports and list all trunks.\n\n :param network_create_args: dict, POST /v2.0/networks request\n options. Deprecated.\n :param trunk_count: int, number of trunk ports\n :param subport_count: int, number of subports per trunk\n \"\"\"\n net = self._create_network(network_create_args or {})\n ports = [self._create_port(net, {}) for _ in range(subport_count + 1)]\n parent, subports = ports[0], ports[1:]\n subport_payload = [{\"port_id\": p[\"port\"][\"id\"],\n \"segmentation_type\": \"vlan\",\n \"segmentation_id\": seg_id}\n for seg_id, p in enumerate(subports, start=1)]\n trunk_payload = {\"port_id\": parent[\"port\"][\"id\"],\n \"sub_ports\": subport_payload}\n trunk = self._create_trunk(trunk_payload)\n self._list_trunks()\n self._list_subports_by_trunk(trunk[\"trunk\"][\"id\"])\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=(consts.Service.NOVA,\n consts.Service.NEUTRON))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\", \"nova\"]},\n name=\"NeutronTrunks.boot_server_with_subports\",\n platform=\"openstack\")\nclass BootServerWithSubports(nova_utils.NovaScenario,\n neutron_utils.NeutronScenario):\n\n def run(self, image, flavor, network_create_args=None, subport_count=10):\n \"\"\"Boot a server with subports.\n\n Returns when the server is actually booted and in \"ACTIVE\" state.\n :param image: image ID or instance for server creation\n :param flavor: int, flavor ID or instance for server creation\n :param network_create_args: arguments for creating network\n :param subport_count: number of subports for the trunk port\n \"\"\"\n kwargs = {}\n ports = []\n network_create_args = network_create_args or {}\n for _ in range(subport_count + 1):\n net, subnet = self._create_network_and_subnets(\n network_create_args=network_create_args)\n ports.append(self._create_port(\n net, {\"fixed_ips\": [{\n \"subnet_id\": subnet[0][\"subnet\"][\"id\"]}]}))\n parent, subports = ports[0], ports[1:]\n subport_payload = [{\"port_id\": p[\"port\"][\"id\"],\n \"segmentation_type\": \"vlan\",\n \"segmentation_id\": seg_id}\n for seg_id, p in enumerate(subports, start=1)]\n trunk_payload = {\"port_id\": parent[\"port\"][\"id\"],\n \"sub_ports\": subport_payload}\n self._create_trunk(trunk_payload)\n kwargs[\"nics\"] = [{\"port-id\": parent[\"port\"][\"id\"]}]\n self._boot_server(image, flavor, **kwargs)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=(consts.Service.NOVA,\n consts.Service.NEUTRON))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\", \"nova\"]},\n name=\"NeutronTrunks.boot_server_and_add_subports\",\n platform=\"openstack\")\nclass BootServerAndAddSubports(nova_utils.NovaScenario,\n neutron_utils.NeutronScenario):\n\n def run(self, image, flavor, network_create_args=None, subport_count=10):\n \"\"\"Boot a server and add subports.\n\n Returns when the server is actually booted and in \"ACTIVE\" state.\n :param image: image ID or instance for server creation\n :param flavor: int, flavor ID or instance for server creation\n :param network_create_args: arguments for creating network\n :param subport_count: number of subports for the trunk port\n \"\"\"\n kwargs = {}\n ports = []\n network_create_args = network_create_args or {}\n for _ in range(subport_count + 1):\n net, subnet = self._create_network_and_subnets(\n network_create_args=network_create_args)\n ports.append(self._create_port(\n net, {\"fixed_ips\": [{\n \"subnet_id\": subnet[0][\"subnet\"][\"id\"]}]}))\n parent, subports = ports[0], ports[1:]\n trunk_payload = {\"port_id\": parent[\"port\"][\"id\"]}\n trunk = self._create_trunk(trunk_payload)\n kwargs[\"nics\"] = [{\"port-id\": parent[\"port\"][\"id\"]}]\n self._boot_server(image, flavor, **kwargs)\n for seg_id, p in enumerate(subports, start=1):\n subport_payload = [{\"port_id\": p[\"port\"][\"id\"],\n \"segmentation_type\": \"vlan\",\n \"segmentation_id\": seg_id}]\n self._add_subports_to_trunk(trunk[\"trunk\"][\"id\"], subport_payload)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=(consts.Service.NOVA,\n consts.Service.NEUTRON))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"neutron\", \"nova\"]},\n name=\"NeutronTrunks.boot_server_and_batch_add_subports\",\n platform=\"openstack\")\nclass BootServerAndBatchAddSubports(nova_utils.NovaScenario,\n neutron_utils.NeutronScenario):\n\n def run(self, image, flavor, network_create_args=None,\n subports_per_batch=10, batches=5):\n \"\"\"Boot a server and add subports in batches.\n\n Returns when the server is actually booted and in \"ACTIVE\" state.\n :param image: image ID or instance for server creation\n :param flavor: int, flavor ID or instance for server creation\n :param network_create_args: arguments for creating network\n :param subports_per_batch: number of subports per batches\n :param batches: number of batches to create subports in\n \"\"\"\n kwargs = {}\n ports = []\n network_create_args = network_create_args or {}\n for _ in range(subports_per_batch * batches + 1):\n net, subnet = self._create_network_and_subnets(\n network_create_args=network_create_args)\n ports.append(self._create_port(\n net, {\"fixed_ips\": [{\n \"subnet_id\": subnet[0][\"subnet\"][\"id\"]}]}))\n parent, subports = ports[0], ports[1:]\n trunk_payload = {\"port_id\": parent[\"port\"][\"id\"]}\n trunk = self._create_trunk(trunk_payload)\n kwargs[\"nics\"] = [{\"port-id\": parent[\"port\"][\"id\"]}]\n self._boot_server(image, flavor, **kwargs)\n begin = 0\n for _ in range(0, batches):\n end = begin + subports_per_batch\n subport_payload = [{\"port_id\": p[\"port\"][\"id\"],\n \"segmentation_type\": \"vlan\",\n \"segmentation_id\": seg_id}\n for seg_id, p in enumerate(\n subports[slice(begin, end)],\n start=begin + 1)]\n begin = begin + subports_per_batch\n self._add_subports_to_trunk(trunk[\"trunk\"][\"id\"], subport_payload)\n" }, { "alpha_fraction": 0.5871538519859314, "alphanum_fraction": 0.5913905501365662, "avg_line_length": 40.69716262817383, "blob_id": "a4c088060c7a846b4f563e68cd7aca5bd5d79454", "content_id": "d8d4be5b4cee6c3b219d95dcb7e3f133f270e788", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13218, "license_type": "permissive", "max_line_length": 81, "num_lines": 317, "path": "/rally_openstack/common/services/identity/keystone_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\n\nfrom rally.task import atomic\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.identity import keystone_common\n\n\[email protected](\"keystone\", service_type=\"identity\", version=\"2\")\nclass KeystoneV2Service(service.Service, keystone_common.KeystoneMixin):\n\n @atomic.action_timer(\"keystone_v2.create_tenant\")\n def create_tenant(self, tenant_name=None):\n tenant_name = tenant_name or self.generate_random_name()\n return self._clients.keystone(\"2\").tenants.create(tenant_name)\n\n @atomic.action_timer(\"keystone_v2.update_tenant\")\n def update_tenant(self, tenant_id, name=None, enabled=None,\n description=None):\n \"\"\"Update tenant name and description.\n\n :param tenant_id: Id of tenant to update\n :param name: tenant name to be set (if boolean True, random name will\n be set)\n :param enabled: enabled status of project\n :param description: tenant description to be set (if boolean True,\n random description will be set)\n \"\"\"\n if name is True:\n name = self.generate_random_name()\n if description is True:\n description = self.generate_random_name()\n self._clients.keystone(\"2\").tenants.update(\n tenant_id, name=name, description=description, enabled=enabled)\n\n @atomic.action_timer(\"keystone_v2.delete_tenant\")\n def delete_tenant(self, tenant_id):\n return self._clients.keystone(\"2\").tenants.delete(tenant_id)\n\n @atomic.action_timer(\"keystone_v2.list_tenants\")\n def list_tenants(self):\n return self._clients.keystone(\"2\").tenants.list()\n\n @atomic.action_timer(\"keystone_v2.get_tenant\")\n def get_tenant(self, tenant_id):\n \"\"\"Get tenant.\"\"\"\n return self._clients.keystone(\"2\").tenants.get(tenant_id)\n\n @atomic.action_timer(\"keystone_v2.create_user\")\n def create_user(self, username=None, password=None, email=None,\n tenant_id=None, enabled=True):\n username = username or self.generate_random_name()\n password = password or str(uuid.uuid4())\n email = email or (username + \"@rally.me\")\n return self._clients.keystone(\"2\").users.create(name=username,\n password=password,\n email=email,\n tenant_id=tenant_id,\n enabled=enabled)\n\n @atomic.action_timer(\"keystone_v2.create_users\")\n def create_users(self, tenant_id, number_of_users, user_create_args=None):\n \"\"\"Create specified amount of users.\n\n :param tenant_id: Id of tenant\n :param number_of_users: number of users to create\n :param user_create_args: additional user creation arguments\n \"\"\"\n users = []\n for _i in range(number_of_users):\n users.append(self.create_user(tenant_id=tenant_id,\n **(user_create_args or {})))\n return users\n\n @atomic.action_timer(\"keystone_v2.update_user\")\n def update_user(self, user_id, **kwargs):\n allowed_args = (\"name\", \"email\", \"enabled\")\n restricted = set(kwargs) - set(allowed_args)\n if restricted:\n raise NotImplementedError(\n \"Failed to update '%s', since Keystone V2 allows to update \"\n \"only '%s'.\" % (\"', '\".join(restricted),\n \"', '\".join(allowed_args)))\n self._clients.keystone(\"2\").users.update(user_id, **kwargs)\n\n @atomic.action_timer(\"keystone_v2.update_user_password\")\n def update_user_password(self, user_id, password):\n self._clients.keystone(\"2\").users.update_password(user_id,\n password=password)\n\n @atomic.action_timer(\"keystone_v2.create_service\")\n def create_service(self, name=None, service_type=None, description=None):\n \"\"\"Creates keystone service.\n\n :param name: name of service to create\n :param service_type: type of the service\n :param description: description of the service\n :returns: keystone service instance\n \"\"\"\n name = name or self.generate_random_name()\n service_type = service_type or \"rally_test_type\"\n description = description or self.generate_random_name()\n return self._clients.keystone(\"2\").services.create(\n name,\n service_type=service_type,\n description=description)\n\n @atomic.action_timer(\"keystone_v2.create_role\")\n def create_role(self, name=None):\n name = name or self.generate_random_name()\n return self._clients.keystone(\"2\").roles.create(name)\n\n @atomic.action_timer(\"keystone_v2.add_role\")\n def add_role(self, role_id, user_id, tenant_id):\n self._clients.keystone(\"2\").roles.add_user_role(\n user=user_id, role=role_id, tenant=tenant_id)\n\n @atomic.action_timer(\"keystone_v2.list_roles\")\n def list_roles(self):\n \"\"\"List all roles.\"\"\"\n return self._clients.keystone(\"2\").roles.list()\n\n @atomic.action_timer(\"keystone_v2.list_roles_for_user\")\n def list_roles_for_user(self, user_id, tenant_id=None):\n return self._clients.keystone(\"2\").roles.roles_for_user(\n user_id, tenant_id)\n\n @atomic.action_timer(\"keystone_v2.revoke_role\")\n def revoke_role(self, role_id, user_id, tenant_id):\n self._clients.keystone(\"2\").roles.remove_user_role(user=user_id,\n role=role_id,\n tenant=tenant_id)\n\n @atomic.action_timer(\"keystone_v2.create_ec2creds\")\n def create_ec2credentials(self, user_id, tenant_id):\n \"\"\"Create ec2credentials.\n\n :param user_id: User ID for which to create credentials\n :param tenant_id: Tenant ID for which to create credentials\n\n :returns: Created ec2-credentials object\n \"\"\"\n return self._clients.keystone(\"2\").ec2.create(user_id,\n tenant_id=tenant_id)\n\n\[email protected]_layer(KeystoneV2Service)\nclass UnifiedKeystoneV2Service(keystone_common.UnifiedKeystoneMixin,\n identity.Identity):\n \"\"\"Compatibility layer for Keystone V2.\"\"\"\n\n @staticmethod\n def _check_domain(domain_name):\n if domain_name.lower() != \"default\":\n raise NotImplementedError(\"Domain functionality not implemented \"\n \"in Keystone v2\")\n\n @staticmethod\n def _unify_tenant(tenant):\n return identity.Project(id=tenant.id, name=tenant.name,\n domain_id=\"default\")\n\n @staticmethod\n def _unify_user(user):\n return identity.User(id=user.id, name=user.name,\n project_id=getattr(user, \"tenantId\", None),\n domain_id=\"default\")\n\n def create_project(self, project_name=None, domain_name=\"Default\"):\n \"\"\"Creates new project/tenant and return project object.\n\n :param project_name: Name of project to be created.\n :param domain_name: Restricted for Keystone V2. Should not be set or\n \"Default\" is expected.\n \"\"\"\n self._check_domain(domain_name)\n tenant = self._impl.create_tenant(project_name)\n return self._unify_tenant(tenant)\n\n def update_project(self, project_id, name=None, enabled=None,\n description=None):\n \"\"\"Update project name, enabled and description\n\n :param project_id: Id of project to update\n :param name: project name to be set\n :param enabled: enabled status of project\n :param description: project description to be set\n \"\"\"\n self._impl.update_tenant(tenant_id=project_id, name=name,\n enabled=enabled, description=description)\n\n def delete_project(self, project_id):\n \"\"\"Deletes project.\"\"\"\n return self._impl.delete_tenant(project_id)\n\n def list_projects(self):\n \"\"\"List all projects.\"\"\"\n return [self._unify_tenant(t) for t in self._impl.list_tenants()]\n\n def get_project(self, project_id):\n \"\"\"Get project.\"\"\"\n return self._unify_tenant(self._impl.get_tenant(project_id))\n\n def create_user(self, username=None, password=None, project_id=None,\n domain_name=\"Default\", enabled=True,\n default_role=\"member\"):\n \"\"\"Create user.\n\n :param username: name of user\n :param password: user password\n :param project_id: user's default project\n :param domain_name: Restricted for Keystone V2. Should not be set or\n \"Default\" is expected.\n :param enabled: whether the user is enabled.\n :param default_role: Restricted for Keystone V2. Should not be set or\n \"member\" is expected.\n \"\"\"\n self._check_domain(domain_name)\n user = self._impl.create_user(username=username,\n password=password,\n tenant_id=project_id,\n enabled=enabled)\n return self._unify_user(user)\n\n def create_users(self, tenant_id, number_of_users, user_create_args=None):\n \"\"\"Create specified amount of users.\n\n :param tenant_id: Id of tenant\n :param number_of_users: number of users to create\n :param user_create_args: additional user creation arguments\n \"\"\"\n if user_create_args and \"domain_name\" in user_create_args:\n self._check_domain(user_create_args[\"domain_name\"])\n return [self._unify_user(u)\n for u in self._impl.create_users(\n tenant_id=tenant_id, number_of_users=number_of_users,\n user_create_args=user_create_args)]\n\n def list_users(self):\n \"\"\"List all users.\"\"\"\n return [self._unify_user(u) for u in self._impl.list_users()]\n\n def update_user(self, user_id, enabled=None, name=None, email=None,\n password=None):\n if password is not None:\n self._impl.update_user_password(user_id=user_id, password=password)\n\n update_args = {}\n if enabled is not None:\n update_args[\"enabled\"] = enabled\n if name is not None:\n update_args[\"name\"] = name\n if email is not None:\n update_args[\"email\"] = email\n\n if update_args:\n self._impl.update_user(user_id, **update_args)\n\n def list_services(self):\n \"\"\"List all services.\"\"\"\n return [self._unify_service(s) for s in self._impl.list_services()]\n\n def create_role(self, name=None, domain_name=None):\n \"\"\"Add role to user.\"\"\"\n if domain_name is not None:\n raise NotImplementedError(\"Domain functionality not implemented \"\n \"in Keystone v2\")\n\n return self._unify_role(self._impl.create_role(name))\n\n def add_role(self, role_id, user_id, project_id):\n \"\"\"Add role to user.\"\"\"\n self._impl.add_role(role_id=role_id, user_id=user_id,\n tenant_id=project_id)\n\n def revoke_role(self, role_id, user_id, project_id):\n \"\"\"Revokes a role from a user.\"\"\"\n return self._impl.revoke_role(role_id=role_id, user_id=user_id,\n tenant_id=project_id)\n\n def list_roles(self, user_id=None, project_id=None, domain_name=None):\n \"\"\"List all roles.\"\"\"\n if domain_name:\n raise NotImplementedError(\"Domain functionality not implemented \"\n \"in Keystone v2\")\n if user_id:\n roles = self._impl.list_roles_for_user(user_id,\n tenant_id=project_id)\n else:\n roles = self._impl.list_roles()\n return [self._unify_role(role) for role in roles]\n\n def create_ec2credentials(self, user_id, project_id):\n \"\"\"Create ec2credentials.\n\n :param user_id: User ID for which to create credentials\n :param project_id: Project ID for which to create credentials\n\n :returns: Created ec2-credentials object\n \"\"\"\n return self._impl.create_ec2credentials(user_id=user_id,\n tenant_id=project_id)\n" }, { "alpha_fraction": 0.639103889465332, "alphanum_fraction": 0.6401052474975586, "avg_line_length": 45.87350845336914, "blob_id": "d39f546aa1eeed27a9f00346f8e4823a0137bb05", "content_id": "8bc70b19b5b1358068ddc406b384c79659bbfdc0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58920, "license_type": "permissive", "max_line_length": 79, "num_lines": 1257, "path": "/rally_openstack/task/scenarios/nova/servers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport jsonschema\nfrom rally.common import logging\nfrom rally import exceptions as rally_exceptions\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.cinder import utils as cinder_utils\nfrom rally_openstack.task.scenarios.neutron import utils as neutron_utils\nfrom rally_openstack.task.scenarios.nova import utils\n\n\n\"\"\"Scenarios for Nova servers.\"\"\"\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_list_server\",\n platform=\"openstack\")\nclass BootAndListServer(utils.NovaScenario):\n\n def run(self, image, flavor, detailed=True, **kwargs):\n \"\"\"Boot a server from an image and then list all servers.\n\n Measure the \"nova list\" command performance.\n\n If you have only 1 user in your context, you will\n add 1 server on every iteration. So you will have more\n and more servers and will be able to measure the\n performance of the \"nova list\" command depending on\n the number of servers owned by users.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param detailed: True if the server listing should contain\n detailed information about all of them\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n msg = (\"Servers isn't created\")\n self.assertTrue(server, err_msg=msg)\n\n pool_list = self._list_servers(detailed)\n msg = (\"Server not included into list of available servers\\n\"\n \"Booted server: {}\\n\"\n \"Pool of servers: {}\").format(server, pool_list)\n self.assertIn(server, pool_list, err_msg=msg)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"NovaServers.list_servers\", platform=\"openstack\")\nclass ListServers(utils.NovaScenario):\n\n def run(self, detailed=True):\n \"\"\"List all servers.\n\n This simple scenario test the nova list command by listing\n all the servers.\n\n :param detailed: True if detailed information about servers\n should be listed\n \"\"\"\n self._list_servers(detailed)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_delete_server\",\n platform=\"openstack\")\nclass BootAndDeleteServer(utils.NovaScenario):\n\n def run(self, image, flavor, min_sleep=0, max_sleep=0,\n force_delete=False, **kwargs):\n \"\"\"Boot and delete a server.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between volume creation and deletion\n (of random duration from [min_sleep, max_sleep]).\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_delete_multiple_servers\",\n platform=\"openstack\")\nclass BootAndDeleteMultipleServers(utils.NovaScenario):\n\n def run(self, image, flavor, count=2, min_sleep=0,\n max_sleep=0, force_delete=False, **kwargs):\n \"\"\"Boot multiple servers in a single request and delete them.\n\n Deletion is done in parallel with one request per server, not\n with a single request for all servers.\n\n :param image: The image to boot from\n :param flavor: Flavor used to boot instance\n :param count: Number of instances to boot\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for instance creation\n \"\"\"\n servers = self._boot_servers(image, flavor, 1, instances_amount=count,\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._delete_servers(servers, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", validate_disk=False)\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"]},\n name=\"NovaServers.boot_server_from_volume_and_delete\",\n platform=\"openstack\")\nclass BootServerFromVolumeAndDelete(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size, volume_type=None,\n min_sleep=0, max_sleep=0, force_delete=False, **kwargs):\n \"\"\"Boot a server from volume and then delete it.\n\n The scenario first creates a volume and then a server.\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between volume creation and deletion\n (of random duration from [min_sleep, max_sleep]).\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param volume_size: volume size (in GB)\n :param volume_type: specifies volume type when there are\n multiple backends\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n volume = self.cinder.create_volume(volume_size, imageRef=image,\n volume_type=volume_type)\n block_device_mapping = {\"vda\": \"%s:::0\" % volume.id}\n server = self._boot_server(None, flavor,\n block_device_mapping=block_device_mapping,\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_bounce_server\",\n platform=\"openstack\")\nclass BootAndBounceServer(utils.NovaScenario):\n\n def run(self, image, flavor, force_delete=False, actions=None, **kwargs):\n \"\"\"Boot a server and run specified actions against it.\n\n Actions should be passed into the actions parameter. Available actions\n are 'hard_reboot', 'soft_reboot', 'stop_start', 'rescue_unrescue',\n 'pause_unpause', 'suspend_resume', 'lock_unlock' and 'shelve_unshelve'.\n Delete server after all actions were completed.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param force_delete: True if force_delete should be used\n :param actions: list of action dictionaries, where each action\n dictionary speicifes an action to be performed\n in the following format:\n {\"action_name\": <no_of_iterations>}\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n action_builder = self._bind_actions()\n actions = actions or []\n try:\n action_builder.validate(actions)\n except jsonschema.exceptions.ValidationError as error:\n raise rally_exceptions.InvalidConfigException(\n \"Invalid server actions configuration \\'%(actions)s\\' due to: \"\n \"%(error)s\" % {\"actions\": str(actions), \"error\": str(error)})\n server = self._boot_server(image, flavor, **kwargs)\n for action in action_builder.build_actions(actions, server):\n action()\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_lock_unlock_and_delete\",\n platform=\"openstack\")\nclass BootLockUnlockAndDelete(utils.NovaScenario):\n\n def run(self, image, flavor, min_sleep=0,\n max_sleep=0, force_delete=False, **kwargs):\n \"\"\"Boot a server, lock it, then unlock and delete it.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the\n scenario to simulate a pause between locking and unlocking the\n server (of random duration from min_sleep to max_sleep).\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param min_sleep: Minimum sleep time between locking and unlocking\n in seconds\n :param max_sleep: Maximum sleep time between locking and unlocking\n in seconds\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._lock_server(server)\n self.sleep_between(min_sleep, max_sleep)\n self._unlock_server(server)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.GLANCE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"glance\"]},\n name=\"NovaServers.snapshot_server\",\n platform=\"openstack\")\nclass SnapshotServer(utils.NovaScenario):\n\n def run(self, image, flavor, force_delete=False, **kwargs):\n \"\"\"Boot a server, make its snapshot and delete both.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n\n server = self._boot_server(image, flavor, **kwargs)\n image = self._create_image(server)\n self._delete_server(server, force=force_delete)\n\n server = self._boot_server(image.id, flavor, **kwargs)\n self._delete_server(server, force=force_delete)\n self._delete_image(image)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_server\",\n platform=\"openstack\")\nclass BootServer(utils.NovaScenario):\n\n def run(self, image, flavor, auto_assign_nic=False, **kwargs):\n \"\"\"Boot a server.\n\n Assumes that cleanup is done elsewhere.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param auto_assign_nic: True if NICs should be assigned\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n self._boot_server(image, flavor,\n auto_assign_nic=auto_assign_nic, **kwargs)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", validate_disk=False)\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"]},\n name=\"NovaServers.boot_server_from_volume\",\n platform=\"openstack\")\nclass BootServerFromVolume(utils.NovaScenario, cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size,\n volume_type=None, auto_assign_nic=False, **kwargs):\n \"\"\"Boot a server from volume.\n\n The scenario first creates a volume and then a server.\n Assumes that cleanup is done elsewhere.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param volume_size: volume size (in GB)\n :param volume_type: specifies volume type when there are\n multiple backends\n :param auto_assign_nic: True if NICs should be assigned\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n volume = self.cinder.create_volume(volume_size, imageRef=image,\n volume_type=volume_type)\n block_device_mapping = {\"vda\": \"%s:::0\" % volume.id}\n self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic,\n block_device_mapping=block_device_mapping,\n **kwargs)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"},\n to_flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=(consts.Service.NOVA))\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.resize_server\", platform=\"openstack\")\nclass ResizeServer(utils.NovaScenario):\n\n def run(self, image, flavor, to_flavor, force_delete=False, **kwargs):\n \"\"\"Boot a server, then resize and delete it.\n\n This test will confirm the resize by default,\n or revert the resize if confirm is set to false.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param to_flavor: flavor to be used to resize the booted instance\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._resize(server, to_flavor)\n # by default we confirm\n confirm = kwargs.get(\"confirm\", True)\n if confirm:\n self._resize_confirm(server)\n else:\n self._resize_revert(server)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"},\n to_flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.resize_shutoff_server\",\n platform=\"openstack\")\nclass ResizeShutoffServer(utils.NovaScenario):\n\n def run(self, image, flavor, to_flavor, confirm=True,\n force_delete=False, **kwargs):\n \"\"\"Boot a server and stop it, then resize and delete it.\n\n This test will confirm the resize by default,\n or revert the resize if confirm is set to false.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param to_flavor: flavor to be used to resize the booted instance\n :param confirm: True if need to confirm resize else revert resize\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._stop_server(server)\n self._resize(server, to_flavor)\n\n if confirm:\n self._resize_confirm(server, \"SHUTOFF\")\n else:\n self._resize_revert(server, \"SHUTOFF\")\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"},\n to_flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"NovaServers.boot_server_attach_created_volume_and_resize\",\n platform=\"openstack\")\nclass BootServerAttachCreatedVolumeAndResize(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, to_flavor, volume_size, min_sleep=0,\n max_sleep=0, force_delete=False, confirm=True, do_delete=True,\n boot_server_kwargs=None, create_volume_kwargs=None):\n \"\"\"Create a VM from image, attach a volume to it and resize.\n\n Simple test to create a VM and attach a volume, then resize the VM,\n detach the volume then delete volume and VM.\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between attaching a volume and running resize\n (of random duration from range [min_sleep, max_sleep]).\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param to_flavor: flavor to be used to resize the booted instance\n :param volume_size: volume size (in GB)\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param confirm: True if need to confirm resize else revert resize\n :param do_delete: True if resources needs to be deleted explicitly\n else use rally cleanup to remove resources\n :param boot_server_kwargs: optional arguments for VM creation\n :param create_volume_kwargs: optional arguments for volume creation\n \"\"\"\n boot_server_kwargs = boot_server_kwargs or {}\n create_volume_kwargs = create_volume_kwargs or {}\n\n server = self._boot_server(image, flavor, **boot_server_kwargs)\n volume = self.cinder.create_volume(volume_size, **create_volume_kwargs)\n\n self._attach_volume(server, volume)\n self.sleep_between(min_sleep, max_sleep)\n self._resize(server, to_flavor)\n\n if confirm:\n self._resize_confirm(server)\n else:\n self._resize_revert(server)\n\n if do_delete:\n self._detach_volume(server, volume)\n self.cinder.delete_volume(volume)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"NovaServers.boot_server_attach_created_volume_and_extend\",\n platform=\"openstack\")\nclass BootServerAttachCreatedVolumeAndExtend(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size, new_volume_size, min_sleep=0,\n max_sleep=0, force_delete=False, do_delete=True,\n boot_server_kwargs=None, create_volume_kwargs=None):\n \"\"\"Create a VM from image, attach a volume then extend volume\n\n Simple test to create a VM and attach a volume, then extend the\n volume while its running, detach the volume then delete volume\n and VM.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between attaching a volume and running resize\n (of random duration from range [min_sleep, max_sleep]).\n\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param volume_size: volume size (in GB)\n :param new_volume_size: new volume size (in GB)\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param do_delete: True if resources needs to be deleted explicitly\n else use rally cleanup to remove resources\n :param boot_server_kwargs: optional arguments for VM creation\n :param create_volume_kwargs: optional arguments for volume creation\n \"\"\"\n boot_server_kwargs = boot_server_kwargs or {}\n create_volume_kwargs = create_volume_kwargs or {}\n\n server = self._boot_server(image, flavor, **boot_server_kwargs)\n volume = self.cinder.create_volume(volume_size, **create_volume_kwargs)\n\n self._attach_volume(server, volume)\n self.sleep_between(min_sleep, max_sleep)\n self.cinder.extend_volume(volume, new_size=new_volume_size)\n\n if do_delete:\n self._detach_volume(server, volume)\n self.cinder.delete_volume(volume)\n self._delete_server(server, force=force_delete)\n\n\[email protected](\"number\", param_name=\"volume_num\", minval=1,\n integer_only=True)\[email protected](\"number\", param_name=\"volume_size\", minval=1,\n integer_only=True)\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", validate_disk=False)\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"NovaServers.boot_server_attach_volume_and_list_attachments\",\n platform=\"openstack\")\nclass BootServerAttachVolumeAndListAttachments(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size=1, volume_num=2,\n boot_server_kwargs=None, create_volume_kwargs=None):\n \"\"\"Create a VM, attach N volume to it and list server's attachemnt.\n\n Measure the \"nova volume-attachments\" command performance.\n\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param volume_size: volume size (in GB), default 1G\n :param volume_num: the num of attached volume\n :param boot_server_kwargs: optional arguments for VM creation\n :param create_volume_kwargs: optional arguments for volume creation\n \"\"\"\n boot_server_kwargs = boot_server_kwargs or {}\n create_volume_kwargs = create_volume_kwargs or {}\n\n server = self._boot_server(image, flavor, **boot_server_kwargs)\n attachments = []\n for i in range(volume_num):\n volume = self.cinder.create_volume(volume_size,\n **create_volume_kwargs)\n attachments.append(self._attach_volume(server, volume))\n\n list_attachments = self._list_attachments(server.id)\n\n for attachment in attachments:\n msg = (\"attachment not included into list of available \"\n \"attachments\\n attachment: {}\\n\"\n \"list attachments: {}\").format(attachment, list_attachments)\n self.assertIn(attachment, list_attachments, err_msg=msg)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"},\n to_flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", validate_disk=False)\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"]},\n name=\"NovaServers.boot_server_from_volume_and_resize\",\n platform=\"openstack\")\nclass BootServerFromVolumeAndResize(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, to_flavor, volume_size, min_sleep=0,\n max_sleep=0, force_delete=False, confirm=True, do_delete=True,\n boot_server_kwargs=None, create_volume_kwargs=None):\n \"\"\"Boot a server from volume, then resize and delete it.\n\n The scenario first creates a volume and then a server.\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between volume creation and deletion\n (of random duration from [min_sleep, max_sleep]).\n\n This test will confirm the resize by default,\n or revert the resize if confirm is set to false.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param to_flavor: flavor to be used to resize the booted instance\n :param volume_size: volume size (in GB)\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param force_delete: True if force_delete should be used\n :param confirm: True if need to confirm resize else revert resize\n :param do_delete: True if resources needs to be deleted explicitly\n else use rally cleanup to remove resources\n :param boot_server_kwargs: optional arguments for VM creation\n :param create_volume_kwargs: optional arguments for volume creation\n \"\"\"\n boot_server_kwargs = boot_server_kwargs or {}\n create_volume_kwargs = create_volume_kwargs or {}\n\n if boot_server_kwargs.get(\"block_device_mapping\"):\n LOG.warning(\"Using already existing volume is not permitted.\")\n\n volume = self.cinder.create_volume(volume_size, imageRef=image,\n **create_volume_kwargs)\n boot_server_kwargs[\"block_device_mapping\"] = {\n \"vda\": \"%s:::0\" % volume.id}\n\n server = self._boot_server(None, flavor, **boot_server_kwargs)\n self.sleep_between(min_sleep, max_sleep)\n self._resize(server, to_flavor)\n\n if confirm:\n self._resize_confirm(server)\n else:\n self._resize_revert(server)\n\n if do_delete:\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.suspend_and_resume_server\",\n platform=\"openstack\")\nclass SuspendAndResumeServer(utils.NovaScenario):\n\n def run(self, image, flavor, force_delete=False, **kwargs):\n \"\"\"Create a server, suspend, resume and then delete it\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._suspend_server(server)\n self._resume_server(server)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.pause_and_unpause_server\",\n platform=\"openstack\")\nclass PauseAndUnpauseServer(utils.NovaScenario):\n\n def run(self, image, flavor, force_delete=False, **kwargs):\n \"\"\"Create a server, pause, unpause and then delete it\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._pause_server(server)\n self._unpause_server(server)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.shelve_and_unshelve_server\",\n platform=\"openstack\")\nclass ShelveAndUnshelveServer(utils.NovaScenario):\n\n def run(self, image, flavor, force_delete=False, **kwargs):\n \"\"\"Create a server, shelve, unshelve and then delete it\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param force_delete: True if force_delete should be used\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._shelve_server(server)\n self._unshelve_server(server)\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_live_migrate_server\",\n platform=\"openstack\")\nclass BootAndLiveMigrateServer(utils.NovaScenario):\n\n def run(self, image, flavor, block_migration=False, disk_over_commit=False,\n min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Live Migrate a server.\n\n This scenario launches a VM on a compute node available in\n the availability zone and then migrates the VM to another\n compute node on the same availability zone.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between VM booting and running live migration\n (of random duration from range [min_sleep, max_sleep]).\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param block_migration: Specifies the migration type\n :param disk_over_commit: Specifies whether to allow overcommit\n on migrated instance or not\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n\n self._live_migrate(server, block_migration, disk_over_commit)\n\n self._delete_server(server)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\", validate_disk=False)\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\n context={\"cleanup@openstack\": [\"nova\", \"cinder\"]},\n name=\"NovaServers.boot_server_from_volume_and_live_migrate\",\n platform=\"openstack\")\nclass BootServerFromVolumeAndLiveMigrate(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size, volume_type=None,\n block_migration=False, disk_over_commit=False, force_delete=False,\n min_sleep=0, max_sleep=0, **kwargs):\n \"\"\"Boot a server from volume and then migrate it.\n\n The scenario first creates a volume and a server booted from\n the volume on a compute node available in the availability zone and\n then migrates the VM to another compute node on the same availability\n zone.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between VM booting and running live migration\n (of random duration from range [min_sleep, max_sleep]).\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param volume_size: volume size (in GB)\n :param volume_type: specifies volume type when there are\n multiple backends\n :param block_migration: Specifies the migration type\n :param disk_over_commit: Specifies whether to allow overcommit\n on migrated instance or not\n :param force_delete: True if force_delete should be used\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n volume = self.cinder.create_volume(volume_size, imageRef=image,\n volume_type=volume_type)\n block_device_mapping = {\"vda\": \"%s:::0\" % volume.id}\n server = self._boot_server(None, flavor,\n block_device_mapping=block_device_mapping,\n **kwargs)\n self.sleep_between(min_sleep, max_sleep)\n\n self._live_migrate(server, block_migration, disk_over_commit)\n\n self._delete_server(server, force=force_delete)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\n context={\"cleanup@openstack\": [\"cinder\", \"nova\"]},\n name=\"NovaServers.boot_server_attach_created_volume_and_live_migrate\",\n platform=\"openstack\")\nclass BootServerAttachCreatedVolumeAndLiveMigrate(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, size, block_migration=False,\n disk_over_commit=False, boot_server_kwargs=None,\n create_volume_kwargs=None, min_sleep=0, max_sleep=0):\n \"\"\"Create a VM, attach a volume to it and live migrate.\n\n Simple test to create a VM and attach a volume, then migrate the VM,\n detach the volume and delete volume/VM.\n\n Optional 'min_sleep' and 'max_sleep' parameters allow the scenario\n to simulate a pause between attaching a volume and running live\n migration (of random duration from range [min_sleep, max_sleep]).\n\n :param image: Glance image name to use for the VM\n :param flavor: VM flavor name\n :param size: volume size (in GB)\n :param block_migration: Specifies the migration type\n :param disk_over_commit: Specifies whether to allow overcommit\n on migrated instance or not\n :param boot_server_kwargs: optional arguments for VM creation\n :param create_volume_kwargs: optional arguments for volume creation\n :param min_sleep: Minimum sleep time in seconds (non-negative)\n :param max_sleep: Maximum sleep time in seconds (non-negative)\n \"\"\"\n\n if boot_server_kwargs is None:\n boot_server_kwargs = {}\n if create_volume_kwargs is None:\n create_volume_kwargs = {}\n\n server = self._boot_server(image, flavor, **boot_server_kwargs)\n volume = self.cinder.create_volume(size, **create_volume_kwargs)\n\n self._attach_volume(server, volume)\n\n self.sleep_between(min_sleep, max_sleep)\n\n self._live_migrate(server, block_migration, disk_over_commit)\n\n self._detach_volume(server, volume)\n\n self.cinder.delete_volume(volume)\n self._delete_server(server)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_migrate_server\",\n platform=\"openstack\")\nclass BootAndMigrateServer(utils.NovaScenario):\n\n def run(self, image, flavor, **kwargs):\n \"\"\"Migrate a server.\n\n This scenario launches a VM on a compute node available in\n the availability zone, and then migrates the VM\n to another compute node on the same availability zone.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._migrate(server)\n # NOTE(wtakase): This is required because cold migration and resize\n # share same code path.\n confirm = kwargs.get(\"confirm\", True)\n if confirm:\n self._resize_confirm(server, status=\"ACTIVE\")\n else:\n self._resize_revert(server, status=\"ACTIVE\")\n self._delete_server(server)\n\n\[email protected](from_image={\"type\": \"glance_image\"},\n to_image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"from_image\")\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"to_image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_rebuild_server\",\n platform=\"openstack\")\nclass BootAndRebuildServer(utils.NovaScenario):\n\n def run(self, from_image, to_image, flavor, **kwargs):\n \"\"\"Rebuild a server.\n\n This scenario launches a VM, then rebuilds that VM with a\n different image.\n\n :param from_image: image to be used to boot an instance\n :param to_image: image to be used to rebuild the instance\n :param flavor: flavor to be used to boot an instance\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(from_image, flavor, **kwargs)\n self._rebuild_server(server, to_image)\n self._delete_server(server)\n\n\[email protected]_deprecated_args(\n \"Use 'floating_network' for additional instance parameters.\",\n \"2.1.0\", [\"create_floating_ip_args\"], once=True)\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](\n context={\"cleanup@openstack\": [\"nova\", \"neutron.floatingip\"]},\n name=\"NovaServers.boot_and_associate_floating_ip\",\n platform=\"openstack\")\nclass BootAndAssociateFloatingIp(utils.NovaScenario):\n\n def run(self, image, flavor, floating_network=None,\n create_floating_ip_args=None, **kwargs):\n \"\"\"Boot a server and associate a floating IP to it.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param floating_network: external network associated with floating IP.\n :param create_floating_ip_args: Optional additional dict for specifying\n external network associated with floating IP ('ext_network' key).\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n if floating_network is None and create_floating_ip_args:\n if \"ext_network\" in create_floating_ip_args:\n # the old way (network wrapper)\n floating_network = create_floating_ip_args[\"ext_network\"]\n elif \"floating_network\" in create_floating_ip_args:\n # the semi-old way - the time when network wrapper was replaced\n # by network service, but this compatibility layer was not\n # provided\n floating_network = create_floating_ip_args[\"floating_network\"]\n server = self._boot_server(image, flavor, **kwargs)\n floatingip = self.neutron.create_floatingip(\n floating_network=floating_network\n )\n self._associate_floating_ip(server, floatingip)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"neutron\"]},\n name=\"NovaServers.boot_server_and_attach_interface\",\n platform=\"openstack\")\nclass BootServerAndAttachInterface(utils.NovaScenario,\n neutron_utils.NeutronScenario):\n def run(self, image, flavor, network_create_args=None,\n subnet_create_args=None, subnet_cidr_start=None,\n boot_server_args=None):\n \"\"\"Create server and subnet, then attach the interface to it.\n\n This scenario measures the \"nova interface-attach\" command performance.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param network_create_args: dict, POST /v2.0/networks request\n options.\n :param subnet_create_args: dict, POST /v2.0/subnets request options\n :param subnet_cidr_start: str, start value for subnets CIDR\n :param boot_server_args: Optional additional arguments for\n server creation\n \"\"\"\n network = self._get_or_create_network(network_create_args)\n self._create_subnet(network, subnet_create_args, subnet_cidr_start)\n\n server = self._boot_server(image, flavor, **boot_server_args)\n self._attach_interface(server, net_id=network[\"network\"][\"id\"])\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_show_server\",\n platform=\"openstack\")\nclass BootAndShowServer(utils.NovaScenario):\n\n def run(self, image, flavor, **kwargs):\n \"\"\"Show server details.\n\n This simple scenario tests the nova show command by retrieving\n the server details.\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param kwargs: Optional additional arguments for server creation\n\n :returns: Server details\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._show_server(server)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_get_console_output\",\n platform=\"openstack\")\nclass BootAndGetConsoleOutput(utils.NovaScenario):\n\n def run(self, image, flavor, length=None, **kwargs):\n \"\"\"Get text console output from server.\n\n This simple scenario tests the nova console-log command by retrieving\n the text console log output.\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param length: The number of tail log lines you would like to retrieve.\n None (default value) or -1 means unlimited length.\n :param kwargs: Optional additional arguments for server creation\n\n :returns: Text console log output for server\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._get_server_console_output(server, length)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_update_server\",\n platform=\"openstack\")\nclass BootAndUpdateServer(utils.NovaScenario):\n\n def run(self, image, flavor, description=None, **kwargs):\n \"\"\"Boot a server, then update its name and description.\n\n The scenario first creates a server, then update it.\n Assumes that cleanup is done elsewhere.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param description: update the server description\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._update_server(server, description)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA,\n consts.Service.CINDER])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\", \"cinder\"]},\n name=\"NovaServers.boot_server_from_volume_snapshot\",\n platform=\"openstack\")\nclass BootServerFromVolumeSnapshot(utils.NovaScenario,\n cinder_utils.CinderBasic):\n\n def run(self, image, flavor, volume_size, volume_type=None,\n auto_assign_nic=False, **kwargs):\n \"\"\"Boot a server from a snapshot.\n\n The scenario first creates a volume and creates a\n snapshot from this volume, then boots a server from\n the created snapshot.\n Assumes that cleanup is done elsewhere.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param volume_size: volume size (in GB)\n :param volume_type: specifies volume type when there are\n multiple backends\n :param auto_assign_nic: True if NICs should be assigned\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n volume = self.cinder.create_volume(volume_size, imageRef=image,\n volume_type=volume_type)\n snapshot = self.cinder.create_snapshot(volume.id, force=False)\n block_device_mapping = {\"vda\": \"%s:snap::1\" % snapshot.id}\n self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic,\n block_device_mapping=block_device_mapping,\n **kwargs)\n\n\[email protected]_deprecated_args(\n \"Use 'floating_network' for additional instance parameters.\",\n \"2.1.0\", [\"create_floating_ip_args\"], once=True)\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](\n context={\"cleanup@openstack\": [\"nova\", \"neutron.floatingip\"]},\n name=\"NovaServers.boot_server_associate_and_dissociate_floating_ip\",\n platform=\"openstack\")\nclass BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario):\n\n def run(self, image, flavor, floating_network=None,\n create_floating_ip_args=None, **kwargs):\n \"\"\"Boot a server associate and dissociate a floating IP from it.\n\n The scenario first boot a server and create a floating IP. then\n associate the floating IP to the server.Finally dissociate the floating\n IP.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param floating_network: external network associated with floating IP.\n :param create_floating_ip_args: Optional additional dict for specifying\n external network associated with floating IP ('ext_network' key).\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n if floating_network is None and create_floating_ip_args:\n if \"ext_network\" in create_floating_ip_args:\n # the old way (network wrapper)\n floating_network = create_floating_ip_args[\"ext_network\"]\n elif \"floating_network\" in create_floating_ip_args:\n # the semi-old way - the time when network wrapper was replaced\n # by network service, but this compatibility layer was not\n # provided\n floating_network = create_floating_ip_args[\"floating_network\"]\n server = self._boot_server(image, flavor, **kwargs)\n floatingip = self.neutron.create_floatingip(\n floating_network=floating_network\n )\n self._associate_floating_ip(server, floatingip)\n self._dissociate_floating_ip(server, floatingip)\n\n\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_server_and_list_interfaces\",\n platform=\"openstack\")\nclass BootServerAndListInterfaces(utils.NovaScenario):\n\n def run(self, image, flavor, **kwargs):\n \"\"\"Boot a server and list interfaces attached to it.\n\n Measure the \"nova boot\" and \"nova interface-list\" command performance.\n\n :param image: ID of the image to be used for server creation\n :param flavor: ID of the flavor to be used for server creation\n :param **kwargs: Optional arguments for booting the instance\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._list_interfaces(server)\n\n\[email protected](\n \"enum\", param_name=\"console_type\",\n values=[\"novnc\", \"xvpvnc\", \"spice-html5\", \"rdp-html5\", \"serial\", \"webmks\"])\[email protected](image={\"type\": \"glance_image\"},\n flavor={\"type\": \"nova_flavor\"})\[email protected](\"image_valid_on_flavor\", flavor_param=\"flavor\",\n image_param=\"image\")\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"nova\"]},\n name=\"NovaServers.boot_and_get_console_url\",\n platform=\"openstack\")\nclass BootAndGetConsoleUrl(utils.NovaScenario):\n\n def run(self, image, flavor, console_type, **kwargs):\n \"\"\"Retrieve a console url of a server.\n\n This simple scenario tests retrieving the console url of a server.\n\n :param image: image to be used to boot an instance\n :param flavor: flavor to be used to boot an instance\n :param console_type: type can be novnc/xvpvnc for protocol vnc;\n spice-html5 for protocol spice; rdp-html5 for\n protocol rdp; serial for protocol serial.\n webmks for protocol mks (since version 2.8).\n :param kwargs: Optional additional arguments for server creation\n \"\"\"\n server = self._boot_server(image, flavor, **kwargs)\n self._get_console_url_server(server, console_type)\n" }, { "alpha_fraction": 0.6056089997291565, "alphanum_fraction": 0.6074718236923218, "avg_line_length": 38.121456146240234, "blob_id": "071bd5c85d752aa90e039a9021b834255e9fe25c", "content_id": "b7661738c8d4351fd6657e5c7395f843b0616c33", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9663, "license_type": "permissive", "max_line_length": 78, "num_lines": 247, "path": "/rally_openstack/common/services/identity/identity.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import service\n\n\nProject = service.make_resource_cls(\"Project\", [\"id\", \"name\", \"domain_id\"])\nUser = service.make_resource_cls(\n \"User\", properties=[\"id\", \"name\", \"project_id\", \"domain_id\"])\nService = service.make_resource_cls(\"Service\", properties=[\"id\", \"name\"])\nRole = service.make_resource_cls(\"Role\", properties=[\"id\", \"name\"])\n\n\nclass Identity(service.UnifiedService):\n @classmethod\n def is_applicable(cls, clients):\n cloud_version = clients.keystone().version.split(\".\")[0][1:]\n return cloud_version == cls._meta_get(\"impl\")._meta_get(\"version\")\n\n @service.should_be_overridden\n def create_project(self, project_name=None, domain_name=\"Default\"):\n \"\"\"Creates new project/tenant and return project object.\n\n :param project_name: Name of project to be created.\n :param domain_name: Name or id of domain where to create project, for\n those service implementations that don't support\n domains you should use None or 'Default' value.\n \"\"\"\n return self._impl.create_project(project_name,\n domain_name=domain_name)\n\n @service.should_be_overridden\n def update_project(self, project_id, name=None, enabled=None,\n description=None):\n \"\"\"Update project name, enabled and description\n\n :param project_id: Id of project to update\n :param name: project name to be set\n :param enabled: enabled status of project\n :param description: project description to be set\n \"\"\"\n self._impl.update_project(project_id, name=name, enabled=enabled,\n description=description)\n\n @service.should_be_overridden\n def delete_project(self, project_id):\n \"\"\"Deletes project.\"\"\"\n return self._impl.delete_project(project_id)\n\n @service.should_be_overridden\n def list_projects(self):\n \"\"\"List all projects.\"\"\"\n return self._impl.list_projects()\n\n @service.should_be_overridden\n def get_project(self, project_id):\n \"\"\"Get project.\"\"\"\n return self._impl.get_project(project_id)\n\n @service.should_be_overridden\n def create_user(self, username=None, password=None, project_id=None,\n domain_name=\"Default\", enabled=True,\n default_role=\"member\"):\n \"\"\"Create user.\n\n :param username: name of user\n :param password: user password\n :param project_id: user's default project\n :param domain_name: Name or id of domain where to create user, for\n those service implementations that don't support\n domains you should use None or 'Default' value.\n :param enabled: whether the user is enabled.\n :param default_role: Name of role, for implementations that don't\n support domains this argument must be None or\n 'member'.\n \"\"\"\n return self._impl.create_user(username=username,\n password=password,\n project_id=project_id,\n domain_name=domain_name,\n default_role=default_role)\n\n @service.should_be_overridden\n def create_users(self, owner_id, number_of_users, user_create_args=None):\n \"\"\"Create specified amount of users.\n\n :param owner_id: Id of tenant/project\n :param number_of_users: number of users to create\n :param user_create_args: additional user creation arguments\n \"\"\"\n return self._impl.create_users(owner_id,\n number_of_users=number_of_users,\n user_create_args=user_create_args)\n\n @service.should_be_overridden\n def delete_user(self, user_id):\n \"\"\"Deletes user by its id.\"\"\"\n self._impl.delete_user(user_id)\n\n @service.should_be_overridden\n def list_users(self):\n \"\"\"List all users.\"\"\"\n return self._impl.list_users()\n\n @service.should_be_overridden\n def update_user(self, user_id, enabled=None, name=None, email=None,\n password=None):\n return self._impl.update_user(user_id, enabled=enabled, name=name,\n email=email, password=password)\n\n @service.should_be_overridden\n def get_user(self, user_id):\n \"\"\"Get user.\"\"\"\n return self._impl.get_user(user_id)\n\n @service.should_be_overridden\n def create_service(self, name=None, service_type=None, description=None):\n \"\"\"Creates keystone service with random name.\n\n :param name: name of service to create\n :param service_type: type of the service\n :param description: description of the service\n \"\"\"\n return self._impl.create_service(name=name, service_type=service_type,\n description=description)\n\n @service.should_be_overridden\n def delete_service(self, service_id):\n \"\"\"Deletes service.\"\"\"\n self._impl.delete_service(service_id)\n\n @service.should_be_overridden\n def list_services(self):\n \"\"\"List all services.\"\"\"\n return self._impl.list_services()\n\n @service.should_be_overridden\n def get_service(self, service_id):\n \"\"\"Get service.\"\"\"\n return self._impl.get_service(service_id)\n\n @service.should_be_overridden\n def create_role(self, name=None, domain_name=None):\n \"\"\"Create role with specific name\n\n :param name: role name\n :param domain_name: Name or id of domain where to create role, for\n those service implementations that don't support\n domains you should use None or 'Default' value.\n \"\"\"\n return self._impl.create_role(name=name, domain_name=domain_name)\n\n @service.should_be_overridden\n def add_role(self, role_id, user_id, project_id):\n \"\"\"Add role to user.\"\"\"\n return self._impl.add_role(role_id=role_id, user_id=user_id,\n project_id=project_id)\n\n @service.should_be_overridden\n def delete_role(self, role_id):\n \"\"\"Deletes role.\"\"\"\n self._impl.delete_role(role_id)\n\n @service.should_be_overridden\n def revoke_role(self, role_id, user_id, project_id):\n \"\"\"Revokes a role from a user.\"\"\"\n return self._impl.revoke_role(role_id=role_id, user_id=user_id,\n project_id=project_id)\n\n @service.should_be_overridden\n def list_roles(self, user_id=None, project_id=None, domain_name=None):\n \"\"\"List all roles.\n\n :param user_id: filter in role grants for the specified user on a\n resource. Domain or project must be specified.\n :param project_id: filter in role grants on the specified project.\n user_id should be specified\n :param domain_name: filter in role grants on the specified domain.\n user_id should be specified\n \"\"\"\n return self._impl.list_roles(user_id=user_id, project_id=project_id,\n domain_name=domain_name)\n\n @service.should_be_overridden\n def get_role(self, role_id):\n \"\"\"Get role.\"\"\"\n return self._impl.get_role(role_id)\n\n @service.should_be_overridden\n def get_service_by_name(self, name):\n \"\"\"List all services to find proper one.\"\"\"\n return self._impl.get_service_by_name(name)\n\n @service.should_be_overridden\n def create_ec2credentials(self, user_id, project_id):\n \"\"\"Create ec2credentials.\n\n :param user_id: User ID for which to create credentials\n :param project_id: Project ID for which to create credentials\n\n :returns: Created ec2-credentials object\n \"\"\"\n return self._impl.create_ec2credentials(user_id=user_id,\n project_id=project_id)\n\n @service.should_be_overridden\n def list_ec2credentials(self, user_id):\n \"\"\"List of access/secret pairs for a user_id.\n\n :param user_id: List all ec2-credentials for User ID\n\n :returns: Return ec2-credentials list\n \"\"\"\n return self._impl.list_ec2credentials(user_id)\n\n @service.should_be_overridden\n def delete_ec2credential(self, user_id, access):\n \"\"\"Delete ec2credential.\n\n :param user_id: User ID for which to delete credential\n :param access: access key for ec2credential to delete\n \"\"\"\n return self._impl.delete_ec2credential(user_id=user_id, access=access)\n\n @service.should_be_overridden\n def fetch_token(self):\n \"\"\"Authenticate user token.\"\"\"\n return self._impl.fetch_token()\n\n @service.should_be_overridden\n def validate_token(self, token):\n \"\"\"Validate user token.\n\n :param token: Auth token to validate\n \"\"\"\n return self._impl.validate_token(token)\n" }, { "alpha_fraction": 0.5100671052932739, "alphanum_fraction": 0.5137423872947693, "avg_line_length": 37.15853500366211, "blob_id": "71a4cfad9a7d49ffb9f1a76317ffedadadfa4e4b", "content_id": "baddbea54530bc9a7a04bb3b91cd848c97aee4bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6258, "license_type": "permissive", "max_line_length": 79, "num_lines": 164, "path": "/rally_openstack/task/contexts/network/networks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common.services.network import neutron\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"network\", platform=\"openstack\", order=350)\nclass Network(context.OpenStackContext):\n \"\"\"Create networking resources.\n\n This creates networks for all tenants, and optionally creates\n another resources like subnets and routers.\n \"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"start_cidr\": {\n \"type\": \"string\"\n },\n \"networks_per_tenant\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"subnets_per_network\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"network_create_args\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"dns_nameservers\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"uniqueItems\": True\n },\n \"dualstack\": {\n \"type\": \"boolean\",\n },\n \"router\": {\n \"type\": \"object\",\n \"properties\": {\n \"external\": {\n \"type\": \"boolean\",\n \"description\": \"Create a new external router.\"\n },\n \"enable_snat\": {\n \"type\": \"boolean\",\n \"description\": \"Whether to enable SNAT for a router \"\n \"if there is following extension or not\"\n },\n \"external_gateway_info\": {\n \"description\": \"The external gateway information .\",\n \"type\": \"object\",\n \"properties\": {\n \"network_id\": {\"type\": \"string\"},\n \"enable_snat\": {\"type\": \"boolean\"}\n },\n \"additionalProperties\": False\n }\n },\n \"additionalProperties\": False\n }\n },\n \"additionalProperties\": False\n }\n\n DEFAULT_CONFIG = {\n \"start_cidr\": \"10.2.0.0/24\",\n \"networks_per_tenant\": 1,\n \"subnets_per_network\": 1,\n \"network_create_args\": {},\n \"router\": {\"external\": True},\n \"dualstack\": False\n }\n\n def setup(self):\n # NOTE(rkiran): Some clients are not thread-safe. Thus during\n # multithreading/multiprocessing, it is likely the\n # sockets are left open. This problem is eliminated by\n # creating a connection in setup and cleanup separately.\n\n for user, tenant_id in self._iterate_per_tenants():\n self.context[\"tenants\"][tenant_id][\"networks\"] = []\n self.context[\"tenants\"][tenant_id][\"subnets\"] = []\n\n client = neutron.NeutronService(\n user[\"credential\"].clients(),\n name_generator=self.generate_random_name,\n atomic_inst=self.atomic_actions()\n )\n network_create_args = self.config[\"network_create_args\"].copy()\n subnet_create_args = {\n \"start_cidr\": (self.config[\"start_cidr\"]\n if not self.config[\"dualstack\"] else None)}\n if \"dns_nameservers\" in self.config:\n dns_nameservers = self.config[\"dns_nameservers\"]\n subnet_create_args[\"dns_nameservers\"] = dns_nameservers\n\n router_create_args = dict(self.config[\"router\"] or {})\n if not router_create_args:\n # old behaviour - empty dict means no router create\n router_create_args = None\n elif \"external\" in router_create_args:\n external = router_create_args.pop(\"external\")\n router_create_args[\"discover_external_gw\"] = external\n\n for i in range(self.config[\"networks_per_tenant\"]):\n\n net_infra = client.create_network_topology(\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnets_dualstack=self.config[\"dualstack\"],\n subnets_count=self.config[\"subnets_per_network\"],\n router_create_args=router_create_args)\n\n if net_infra[\"routers\"]:\n router_id = net_infra[\"routers\"][0][\"id\"]\n else:\n router_id = None\n net_infra[\"network\"][\"router_id\"] = router_id\n\n self.context[\"tenants\"][tenant_id][\"networks\"].append(\n net_infra[\"network\"]\n )\n self.context[\"tenants\"][tenant_id][\"subnets\"].extend(\n net_infra[\"subnets\"]\n )\n\n def cleanup(self):\n resource_manager.cleanup(\n names=[\n \"neutron.subnet\", \"neutron.network\", \"neutron.router\",\n \"neutron.port\"\n ],\n admin=self.context.get(\"admin\"),\n users=self.context.get(\"users\", []),\n task_id=self.get_owner_id(),\n superclass=self.__class__\n )\n" }, { "alpha_fraction": 0.5960583090782166, "alphanum_fraction": 0.6183304190635681, "avg_line_length": 42.34027862548828, "blob_id": "6b47fa072b674a240257d47c1858adf04e2280b6", "content_id": "62ac53d2baa1650a4c320033ec78c9cb6d53d3f3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6241, "license_type": "permissive", "max_line_length": 79, "num_lines": 144, "path": "/tests/unit/task/scenarios/nova/test_flavors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright: 2015.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.nova import flavors\nfrom tests.unit import test\n\n\[email protected]\nclass NovaFlavorsTestCase(test.TestCase):\n\n def test_list_flavors(self):\n scenario = flavors.ListFlavors()\n scenario._list_flavors = mock.Mock()\n scenario.run(detailed=True, is_public=True, limit=None, marker=None,\n min_disk=None, min_ram=None, sort_dir=None, sort_key=None)\n scenario._list_flavors.assert_called_once_with(\n detailed=True, is_public=True, limit=None, marker=None,\n min_disk=None, min_ram=None, sort_dir=None, sort_key=None)\n\n def test_create_and_list_flavor_access(self):\n # Common parameters\n ram = 100\n vcpus = 1\n disk = 1\n\n scenario = flavors.CreateAndListFlavorAccess()\n scenario._create_flavor = mock.Mock()\n scenario._list_flavor_access = mock.Mock()\n\n # Positive case:\n scenario.run(\n ram, vcpus, disk, ephemeral=0, flavorid=\"auto\",\n is_public=False, rxtx_factor=1.0, swap=0)\n scenario._create_flavor.assert_called_once_with(\n ram, vcpus, disk, ephemeral=0, flavorid=\"auto\",\n is_public=False, rxtx_factor=1.0, swap=0)\n scenario._list_flavor_access.assert_called_once_with(\n scenario._create_flavor.return_value.id)\n\n # Negative case1: flavor wasn't created\n scenario._create_flavor.return_value = None\n self.assertRaises(exceptions.RallyAssertionError, scenario.run,\n ram, vcpus, disk, ephemeral=0, flavorid=\"auto\",\n is_public=False, rxtx_factor=1.0, swap=0)\n scenario._create_flavor.assert_called_with(\n ram, vcpus, disk, ephemeral=0, flavorid=\"auto\",\n is_public=False, rxtx_factor=1.0, swap=0)\n\n def test_create_flavor_add_tenant_access(self):\n flavor = mock.MagicMock()\n context = {\"user\": {\"tenant_id\": \"fake\"},\n \"tenant\": {\"id\": \"fake\"}}\n scenario = flavors.CreateFlavorAndAddTenantAccess()\n scenario.context = context\n scenario.generate_random_name = mock.MagicMock()\n scenario._create_flavor = mock.MagicMock(return_value=flavor)\n scenario._add_tenant_access = mock.MagicMock()\n\n # Positive case:\n scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0,\n flavorid=\"auto\", is_public=True, rxtx_factor=1.0, swap=0)\n\n scenario._create_flavor.assert_called_once_with(\n 100, 1, 1, ephemeral=0, flavorid=\"auto\", is_public=True,\n rxtx_factor=1.0, swap=0)\n scenario._add_tenant_access.assert_called_once_with(flavor.id,\n \"fake\")\n\n # Negative case1: flavor wasn't created\n scenario._create_flavor.return_value = None\n self.assertRaises(exceptions.RallyAssertionError, scenario.run,\n 100, 1, 1, ephemeral=0, flavorid=\"auto\",\n is_public=True, rxtx_factor=1.0, swap=0)\n scenario._create_flavor.assert_called_with(\n 100, 1, 1, ephemeral=0, flavorid=\"auto\", is_public=True,\n rxtx_factor=1.0, swap=0)\n\n def test_create_flavor(self):\n scenario = flavors.CreateFlavor()\n scenario._create_flavor = mock.MagicMock()\n scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid=\"auto\",\n is_public=True, rxtx_factor=1.0, swap=0)\n scenario._create_flavor.assert_called_once_with(\n 100, 1, 1, ephemeral=0,\n flavorid=\"auto\", is_public=True, rxtx_factor=1.0, swap=0)\n\n def test_create_and_get_flavor(self, **kwargs):\n scenario = flavors.CreateAndGetFlavor()\n scenario._create_flavor = mock.Mock()\n scenario._get_flavor = mock.Mock()\n scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid=\"auto\",\n is_public=True, rxtx_factor=1.0, swap=0)\n\n scenario._create_flavor.assert_called_once_with(\n 100, 1, 1, ephemeral=0, flavorid=\"auto\", is_public=True,\n rxtx_factor=1.0, swap=0)\n scenario._get_flavor.assert_called_once_with(\n scenario._create_flavor.return_value.id)\n\n def test_create_and_delete_flavor(self):\n scenario = flavors.CreateAndDeleteFlavor()\n scenario._create_flavor = mock.Mock()\n scenario._delete_flavor = mock.Mock()\n scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid=\"auto\",\n is_public=True, rxtx_factor=1.0, swap=0)\n\n scenario._create_flavor.assert_called_once_with(\n 100, 1, 1, ephemeral=0, flavorid=\"auto\", is_public=True,\n rxtx_factor=1.0, swap=0)\n scenario._delete_flavor.assert_called_once_with(\n scenario._create_flavor.return_value.id)\n\n def test_create_flavor_and_set_keys(self):\n scenario = flavors.CreateFlavorAndSetKeys()\n scenario._create_flavor = mock.MagicMock()\n scenario._set_flavor_keys = mock.MagicMock()\n specs_args = {\"fakeargs\": \"foo\"}\n scenario.run(\n ram=100, vcpus=1, disk=1, extra_specs=specs_args,\n ephemeral=0, flavorid=\"auto\", is_public=True,\n rxtx_factor=1.0, swap=0)\n\n scenario._create_flavor.assert_called_once_with(\n 100, 1, 1, ephemeral=0, flavorid=\"auto\",\n is_public=True, rxtx_factor=1.0, swap=0)\n scenario._set_flavor_keys.assert_called_once_with(\n scenario._create_flavor.return_value, specs_args)\n" }, { "alpha_fraction": 0.6246808171272278, "alphanum_fraction": 0.6282439827919006, "avg_line_length": 47.52737808227539, "blob_id": "5f68e4653de78a73dd540b06fa60470feda862f9", "content_id": "9af2d63e6f23b5661ecb1c6eda0d70f7b5555588", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16839, "license_type": "permissive", "max_line_length": 79, "num_lines": 347, "path": "/rally_openstack/task/scenarios/neutron/bgpvpn.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils\n\n\n\"\"\"Scenarios for Neutron Networking-Bgpvpn.\"\"\"\n\n\ndef _create_random_route_target():\n return \"{}:{}\".format(random.randint(0, 65535),\n random.randint(0, 4294967295))\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_and_delete_bgpvpns\",\n platform=\"openstack\")\nclass CreateAndDeleteBgpvpns(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Create bgpvpn and delete the bgpvpn.\n\n Measure the \"neutron bgpvpn-create\" and neutron bgpvpn-delete\n command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type)\n self._delete_bgpvpn(bgpvpn)\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_and_list_bgpvpns\",\n platform=\"openstack\")\nclass CreateAndListBgpvpns(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Create a bgpvpn and then list all bgpvpns\n\n Measure the \"neutron bgpvpn-list\" command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type)\n bgpvpns = self._list_bgpvpns()\n self.assertIn(bgpvpn[\"bgpvpn\"][\"id\"], [b[\"id\"] for b in bgpvpns])\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_and_update_bgpvpns\",\n platform=\"openstack\")\nclass CreateAndUpdateBgpvpns(utils.NeutronScenario):\n\n def run(self, update_name=False, route_targets=None,\n import_targets=None, export_targets=None,\n route_distinguishers=None, updated_route_targets=None,\n updated_import_targets=None, updated_export_targets=None,\n updated_route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Create and Update bgpvpns\n\n Measure the \"neutron bgpvpn-update\" command performance.\n\n :param update_name: bool, whether or not to modify BGP VPN name\n :param route_targets: Route Targets that will be both imported\n and used for export\n :param updated_route_targets: Updated Route Targets that will be both\n imported and used for export\n :param import_targets: Additional Route Targets that will be imported\n :param updated_import_targets: Updated additional Route Targets that\n will be imported\n :param export_targets: additional Route Targets that will be used\n for export.\n :param updated_export_targets: Updated additional Route Targets that\n will be used for export.\n :param route_distinguishers: list of route distinguisher strings\n :param updated_route_distinguishers: Updated list of route\n distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n create_bgpvpn_args = {\n \"route_targets\": route_targets,\n \"import_targets\": import_targets,\n \"export_targets\": export_targets,\n \"route_distinguishers\": route_distinguishers,\n \"type\": bgpvpn_type\n }\n bgpvpn = self._create_bgpvpn(**create_bgpvpn_args)\n update_bgpvpn_args = {\n \"update_name\": update_name,\n \"route_targets\": updated_route_targets,\n \"import_targets\": updated_import_targets,\n \"export_targets\": updated_export_targets,\n \"route_distinguishers\": updated_route_distinguishers,\n }\n self._update_bgpvpn(bgpvpn, **update_bgpvpn_args)\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\"required_contexts\", contexts=[\"network\", \"servers\"])\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"],\n \"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks\",\n platform=\"openstack\")\nclass CreateAndAssociateDissassociateNetworks(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Associate a network and disassociate it from a BGP VPN.\n\n Measure the \"neutron bgpvpn-create\", \"neutron bgpvpn-net-assoc-create\"\n and \"neutron bgpvpn-net-assoc-delete\" command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n network = networks[0]\n if not route_targets:\n route_targets = _create_random_route_target()\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type,\n tenant_id=network[\"tenant_id\"])\n net_asso = self._create_bgpvpn_network_assoc(bgpvpn, network)\n self._delete_bgpvpn_network_assoc(bgpvpn, net_asso)\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\"required_contexts\", contexts=[\"network\", \"servers\"])\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"],\n \"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers\",\n platform=\"openstack\")\nclass CreateAndAssociateDissassociateRouters(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Associate a router and disassociate it from a BGP VPN.\n\n Measure the \"neutron bgpvpn-create\",\n \"neutron bgpvpn-router-assoc-create\" and\n \"neutron bgpvpn-router-assoc-delete\" command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n\n router = {\n \"id\": self.context[\"tenant\"][\"networks\"][0][\"router_id\"]}\n tenant_id = self.context[\"tenant\"][\"id\"]\n if not route_targets:\n route_targets = _create_random_route_target()\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type,\n tenant_id=tenant_id)\n router_asso = self._create_bgpvpn_router_assoc(bgpvpn, router)\n self._delete_bgpvpn_router_assoc(bgpvpn, router_asso)\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\"required_contexts\", contexts=[\"network\", \"servers\"])\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_and_list_networks_associations\",\n platform=\"openstack\")\nclass CreateAndListNetworksAssocs(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Associate a network and list networks associations.\n\n Measure the \"neutron bgpvpn-create\",\n \"neutron bgpvpn-net-assoc-create\" and\n \"neutron bgpvpn-net-assoc-list\" command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n network = networks[0]\n if not route_targets:\n route_targets = _create_random_route_target()\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type,\n tenant_id=network[\"tenant_id\"])\n self._create_bgpvpn_network_assoc(bgpvpn, network)\n net_assocs = self._list_bgpvpn_network_assocs(\n bgpvpn)[\"network_associations\"]\n\n network_id = network[\"id\"]\n msg = (\"Network not included into list of associated networks\\n\"\n \"Network created: {}\\n\"\n \"List of associations: {}\").format(network, net_assocs)\n list_networks = [net_assoc[\"network_id\"] for net_assoc in net_assocs]\n self.assertIn(network_id, list_networks, err_msg=msg)\n\n\[email protected](\"enum\", param_name=\"bgpvpn_type\", values=[\"l2\", \"l3\"],\n missed=True)\[email protected](\"required_neutron_extensions\", extensions=[\"bgpvpn\"])\[email protected](\"required_services\", services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\",\n admin=True, users=True)\[email protected](\"required_contexts\", contexts=[\"network\", \"servers\"])\[email protected](context={\"admin_cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronBGPVPN.create_and_list_routers_associations\",\n platform=\"openstack\")\nclass CreateAndListRoutersAssocs(utils.NeutronScenario):\n\n def run(self, route_targets=None, import_targets=None,\n export_targets=None, route_distinguishers=None, bgpvpn_type=\"l3\"):\n \"\"\"Associate a router and list routers associations.\n\n Measure the \"neutron bgpvpn-create\",\n \"neutron bgpvpn-router-assoc-create\" and\n \"neutron bgpvpn-router-assoc-list\" command performance.\n\n :param route_targets: Route Targets that will be both imported and\n used for export\n :param import_targets: Additional Route Targets that will be imported\n :param export_targets: Additional Route Targets that will be used\n for export.\n :param route_distinguishers: List of route distinguisher strings\n :param bgpvpn_type: type of VPN and the technology behind it.\n Acceptable formats: l2 and l3\n \"\"\"\n\n router = {\n \"id\": self.context[\"tenant\"][\"networks\"][0][\"router_id\"]}\n tenant_id = self.context[\"tenant\"][\"id\"]\n if not route_targets:\n route_targets = _create_random_route_target()\n\n bgpvpn = self._create_bgpvpn(route_targets=route_targets,\n import_targets=import_targets,\n export_targets=export_targets,\n route_distinguishers=route_distinguishers,\n type=bgpvpn_type,\n tenant_id=tenant_id)\n self._create_bgpvpn_router_assoc(bgpvpn, router)\n router_assocs = self._list_bgpvpn_router_assocs(\n bgpvpn)[\"router_associations\"]\n\n router_id = router[\"id\"]\n msg = (\"Router not included into list of associated routers\\n\"\n \"Router created: {}\\n\"\n \"List of associations: {}\").format(router, router_assocs)\n\n list_routers = [r_assoc[\"router_id\"] for r_assoc in router_assocs]\n self.assertIn(router_id, list_routers, err_msg=msg)\n" }, { "alpha_fraction": 0.5965918898582458, "alphanum_fraction": 0.5998576283454895, "avg_line_length": 40.1793098449707, "blob_id": "52ab854eb0b325580256b0a0cd10d95b354437ee", "content_id": "a4d002da18d4a3beac128952aab42fe3350c5ffe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23884, "license_type": "permissive", "max_line_length": 79, "num_lines": 580, "path": "/tests/unit/common/services/identity/test_keystone_v3.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\nimport uuid\n\nimport ddt\n\nfrom rally import exceptions\n\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.identity import keystone_v3\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.services.identity.keystone_v3\"\n\n\[email protected]\nclass KeystoneV3ServiceTestCase(test.TestCase):\n def setUp(self):\n super(KeystoneV3ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.kc = self.clients.keystone.return_value\n self.name_generator = mock.MagicMock()\n self.service = keystone_v3.KeystoneV3Service(\n self.clients, name_generator=self.name_generator)\n\n def test__get_domain_id_not_found(self):\n from keystoneclient import exceptions as kc_exceptions\n\n self.kc.domains.get.side_effect = kc_exceptions.NotFound\n self.kc.domains.list.return_value = []\n domain_name_or_id = \"some\"\n\n self.assertRaises(exceptions.GetResourceNotFound,\n self.service._get_domain_id, domain_name_or_id)\n self.kc.domains.get.assert_called_once_with(domain_name_or_id)\n self.kc.domains.list.assert_called_once_with(name=domain_name_or_id)\n\n def test__get_domain_id_find_by_name(self):\n from keystoneclient import exceptions as kc_exceptions\n\n self.kc.domains.get.side_effect = kc_exceptions.NotFound\n domain = mock.MagicMock()\n self.kc.domains.list.return_value = [domain]\n domain_name_or_id = \"some\"\n\n self.assertEqual(domain.id,\n self.service._get_domain_id(domain_name_or_id))\n self.kc.domains.get.assert_called_once_with(domain_name_or_id)\n self.kc.domains.list.assert_called_once_with(name=domain_name_or_id)\n\n def test__get_domain_id_find_by_id(self):\n domain = mock.MagicMock()\n\n self.kc.domains.get.return_value = domain\n\n domain_name_or_id = \"some\"\n\n self.assertEqual(domain.id,\n self.service._get_domain_id(domain_name_or_id))\n self.kc.domains.get.assert_called_once_with(domain_name_or_id)\n self.assertFalse(self.kc.domains.list.called)\n\n @mock.patch(\"%s.KeystoneV3Service._get_domain_id\" % PATH)\n def test_create_project(self, mock__get_domain_id):\n name = \"name\"\n domain_name = \"domain\"\n domain_id = \"id\"\n\n mock__get_domain_id.return_value = domain_id\n\n project = self.service.create_project(name, domain_name=domain_name)\n\n mock__get_domain_id.assert_called_once_with(domain_name)\n self.assertEqual(project, self.kc.projects.create.return_value)\n self.kc.projects.create.assert_called_once_with(name=name,\n domain=domain_id)\n\n @ddt.data({\"project_id\": \"fake_id\", \"name\": True, \"enabled\": True,\n \"description\": True},\n {\"project_id\": \"fake_id\", \"name\": \"some\", \"enabled\": False,\n \"description\": \"descr\"})\n @ddt.unpack\n def test_update_project(self, project_id, name, enabled, description):\n\n self.service.update_project(project_id,\n name=name,\n description=description,\n enabled=enabled)\n\n if name is True:\n name = self.name_generator.return_value\n if description is True:\n description = self.name_generator.return_value\n\n self.kc.projects.update.assert_called_once_with(\n project_id, name=name, description=description, enabled=enabled)\n\n def test_delete_project(self):\n project_id = \"fake_id\"\n self.service.delete_project(project_id)\n self.kc.projects.delete.assert_called_once_with(project_id)\n\n def test_list_projects(self):\n self.assertEqual(self.kc.projects.list.return_value,\n self.service.list_projects())\n self.kc.projects.list.assert_called_once_with()\n\n def test_get_project(self):\n project_id = \"fake_id\"\n self.service.get_project(project_id)\n self.kc.projects.get.assert_called_once_with(project_id)\n\n @mock.patch(\"%s.LOG\" % PATH)\n @mock.patch(\"%s.KeystoneV3Service._get_domain_id\" % PATH)\n def test_create_user(self, mock__get_domain_id, mock_log):\n\n name = \"name\"\n password = \"passwd\"\n project_id = \"project\"\n domain_name = \"domain\"\n\n self.service.list_roles = mock.MagicMock(return_value=[])\n\n user = self.service.create_user(name, password=password,\n project_id=project_id,\n domain_name=domain_name)\n\n self.assertEqual(user, self.kc.users.create.return_value)\n self.kc.users.create.assert_called_once_with(\n name=name, password=password, default_project=project_id,\n domain=mock__get_domain_id.return_value,\n enabled=True)\n\n self.assertTrue(mock_log.warning.called)\n\n @mock.patch(\"%s.LOG\" % PATH)\n @mock.patch(\"%s.KeystoneV3Service._get_domain_id\" % PATH)\n def test_create_user_without_project_id(self, mock__get_domain_id,\n mock_log):\n\n name = \"name\"\n password = \"passwd\"\n domain_name = \"domain\"\n\n self.service.list_roles = mock.MagicMock(return_value=[])\n\n user = self.service.create_user(name, password=password,\n domain_name=domain_name)\n\n self.assertEqual(user, self.kc.users.create.return_value)\n self.kc.users.create.assert_called_once_with(\n name=name, password=password, default_project=None,\n domain=mock__get_domain_id.return_value,\n enabled=True)\n\n self.assertFalse(self.service.list_roles.called)\n self.assertFalse(mock_log.warning.called)\n\n @mock.patch(\"%s.LOG\" % PATH)\n @mock.patch(\"%s.KeystoneV3Service._get_domain_id\" % PATH)\n def test_create_user_and_add_role(\n self, mock_keystone_v3_service__get_domain_id, mock_log):\n mock__get_domain_id = mock_keystone_v3_service__get_domain_id\n\n name = \"name\"\n password = \"passwd\"\n project_id = \"project\"\n domain_name = \"domain\"\n\n class Role(object):\n def __init__(self, name):\n self.name = name\n self.id = str(uuid.uuid4())\n\n self.service.list_roles = mock.MagicMock(\n return_value=[Role(\"admin\"), Role(\"member\")])\n self.service.add_role = mock.MagicMock()\n\n user = self.service.create_user(name, password=password,\n project_id=project_id,\n domain_name=domain_name)\n\n self.assertEqual(user, self.kc.users.create.return_value)\n self.kc.users.create.assert_called_once_with(\n name=name, password=password, default_project=project_id,\n domain=mock__get_domain_id.return_value,\n enabled=True)\n\n self.assertFalse(mock_log.warning.called)\n self.service.add_role.assert_called_once_with(\n role_id=self.service.list_roles.return_value[1].id,\n user_id=user.id,\n project_id=project_id)\n\n def test_create_users(self):\n self.service.create_user = mock.MagicMock()\n\n n = 2\n project_id = \"some\"\n self.assertEqual([self.service.create_user.return_value] * n,\n self.service.create_users(number_of_users=n,\n project_id=project_id))\n self.assertEqual([mock.call(project_id=project_id)] * n,\n self.service.create_user.call_args_list)\n\n @ddt.data(None, \"some\")\n def test_update_user(self, domain_name):\n user_id = \"fake_id\"\n name = \"new name\"\n project_id = \"new project\"\n password = \"pass\"\n email = \"mail\"\n description = \"n/a\"\n enabled = False\n default_project = \"some\"\n\n self.service._get_domain_id = mock.MagicMock()\n\n self.service.update_user(user_id, name=name, domain_name=domain_name,\n project_id=project_id, password=password,\n email=email, description=description,\n enabled=enabled,\n default_project=default_project)\n\n domain = None\n if domain_name:\n self.service._get_domain_id.assert_called_once_with(domain_name)\n domain = self.service._get_domain_id.return_value\n else:\n self.assertFalse(self.service._get_domain_id.called)\n\n self.kc.users.update.assert_called_once_with(\n user_id, name=name, domain=domain, project=project_id,\n password=password, email=email, description=description,\n enabled=enabled, default_project=default_project)\n\n @ddt.data({\"name\": None, \"service_type\": None, \"description\": None,\n \"enabled\": True},\n {\"name\": \"some\", \"service_type\": \"st\", \"description\": \"d\",\n \"enabled\": False})\n @ddt.unpack\n def test_create_service(self, name, service_type, description, enabled):\n self.assertEqual(self.kc.services.create.return_value,\n self.service.create_service(name=name,\n service_type=service_type,\n description=description,\n enabled=enabled))\n name = name or self.name_generator.return_value\n service_type = service_type or \"rally_test_type\"\n description = description or self.name_generator.return_value\n self.kc.services.create.assert_called_once_with(\n name, type=service_type, description=description,\n enabled=enabled)\n\n @mock.patch(\"%s.KeystoneV3Service._get_domain_id\" % PATH)\n def test_create_role(self, mock__get_domain_id):\n\n domain_name = \"domain\"\n name = \"some\"\n\n user = self.service.create_role(name, domain_name=domain_name)\n\n self.assertEqual(user, self.kc.roles.create.return_value)\n self.kc.roles.create.assert_called_once_with(\n name, domain=mock__get_domain_id.return_value)\n\n @ddt.data({\"domain_name\": \"domain\", \"user_id\": \"user\", \"project_id\": \"pr\"},\n {\"domain_name\": None, \"user_id\": None, \"project_id\": None})\n @ddt.unpack\n def test_list_roles(self, domain_name, user_id, project_id):\n self.service._get_domain_id = mock.MagicMock()\n self.assertEqual(self.kc.roles.list.return_value,\n self.service.list_roles(user_id=user_id,\n domain_name=domain_name,\n project_id=project_id))\n domain = None\n if domain_name:\n self.service._get_domain_id.assert_called_once_with(domain_name)\n domain = self.service._get_domain_id.return_value\n else:\n self.assertFalse(self.service._get_domain_id.called)\n\n self.kc.roles.list.assert_called_once_with(user=user_id,\n domain=domain,\n project=project_id)\n\n def test_add_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"project_id\"\n\n self.service.add_role(role_id, user_id=user_id, project_id=project_id)\n self.kc.roles.grant.assert_called_once_with(\n user=user_id, role=role_id, project=project_id)\n\n def test_revoke_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"tenant_id\"\n\n self.service.revoke_role(role_id, user_id=user_id,\n project_id=project_id)\n\n self.kc.roles.revoke.assert_called_once_with(\n user=user_id, role=role_id, project=project_id)\n\n def test_get_role(self):\n role_id = \"fake_id\"\n self.service.get_role(role_id)\n self.kc.roles.get.assert_called_once_with(role_id)\n\n def test_create_domain(self):\n name = \"some_domain\"\n descr = \"descr\"\n enabled = False\n\n self.service.create_domain(name, description=descr, enabled=enabled)\n self.kc.domains.create.assert_called_once_with(\n name, description=descr, enabled=enabled)\n\n def test_create_ec2credentials(self):\n user_id = \"fake_id\"\n project_id = \"fake_id\"\n\n self.assertEqual(self.kc.ec2.create.return_value,\n self.service.create_ec2credentials(\n user_id, project_id=project_id))\n self.kc.ec2.create.assert_called_once_with(user_id,\n project_id=project_id)\n\n\[email protected]\nclass UnifiedKeystoneV3ServiceTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedKeystoneV3ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = keystone_v3.UnifiedKeystoneV3Service(self.clients)\n self.service._impl = mock.MagicMock()\n\n def test_init_identity_service(self):\n self.clients.keystone.return_value.version = \"v3\"\n self.assertIsInstance(identity.Identity(self.clients)._impl,\n keystone_v3.UnifiedKeystoneV3Service)\n\n def test__unify_project(self):\n class KeystoneV3Project(object):\n def __init__(self):\n self.id = str(uuid.uuid4())\n self.name = str(uuid.uuid4())\n self.domain_id = str(uuid.uuid4())\n\n project = KeystoneV3Project()\n unified_project = self.service._unify_project(project)\n self.assertIsInstance(unified_project, identity.Project)\n self.assertEqual(project.id, unified_project.id)\n self.assertEqual(project.name, unified_project.name)\n self.assertEqual(project.domain_id, unified_project.domain_id)\n self.assertEqual(project.domain_id, unified_project.domain_id)\n\n def test__unify_user(self):\n class KeystoneV3User(object):\n def __init__(self, project_id=None):\n self.id = str(uuid.uuid4())\n self.name = str(uuid.uuid4())\n self.domain_id = str(uuid.uuid4())\n if project_id is not None:\n self.default_project_id = project_id\n\n user = KeystoneV3User()\n\n unified_user = self.service._unify_user(user)\n self.assertIsInstance(unified_user, identity.User)\n self.assertEqual(user.id, unified_user.id)\n self.assertEqual(user.name, unified_user.name)\n self.assertEqual(user.domain_id, unified_user.domain_id)\n self.assertIsNone(unified_user.project_id)\n\n project_id = \"tenant_id\"\n user = KeystoneV3User(project_id=project_id)\n unified_user = self.service._unify_user(user)\n self.assertIsInstance(unified_user, identity.User)\n self.assertEqual(user.id, unified_user.id)\n self.assertEqual(user.name, unified_user.name)\n self.assertEqual(user.domain_id, unified_user.domain_id)\n self.assertEqual(project_id, unified_user.project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_project\" % PATH)\n def test_create_project(self,\n mock_unified_keystone_v3_service__unify_project):\n mock_unify_project = mock_unified_keystone_v3_service__unify_project\n name = \"name\"\n domain = \"domain\"\n\n self.assertEqual(mock_unify_project.return_value,\n self.service.create_project(name, domain_name=domain))\n mock_unify_project.assert_called_once_with(\n self.service._impl.create_project.return_value)\n self.service._impl.create_project.assert_called_once_with(\n name, domain_name=domain)\n\n def test_update_project(self):\n project_id = \"fake_id\"\n name = \"name\"\n description = \"descr\"\n enabled = False\n\n self.service.update_project(project_id=project_id, name=name,\n description=description, enabled=enabled)\n self.service._impl.update_project.assert_called_once_with(\n project_id=project_id, name=name, description=description,\n enabled=enabled)\n\n def test_delete_project(self):\n project_id = \"fake_id\"\n self.service.delete_project(project_id)\n self.service._impl.delete_project.assert_called_once_with(project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_project\" % PATH)\n def test_get_project(self,\n mock_unified_keystone_v3_service__unify_project):\n mock_unify_project = mock_unified_keystone_v3_service__unify_project\n project_id = \"id\"\n\n self.assertEqual(mock_unify_project.return_value,\n self.service.get_project(project_id))\n mock_unify_project.assert_called_once_with(\n self.service._impl.get_project.return_value)\n self.service._impl.get_project.assert_called_once_with(project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_project\" % PATH)\n def test_list_projects(self,\n mock_unified_keystone_v3_service__unify_project):\n mock_unify_project = mock_unified_keystone_v3_service__unify_project\n\n projects = [mock.MagicMock()]\n self.service._impl.list_projects.return_value = projects\n\n self.assertEqual([mock_unify_project.return_value],\n self.service.list_projects())\n mock_unify_project.assert_called_once_with(projects[0])\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_user\" % PATH)\n def test_create_user(self, mock_unified_keystone_v3_service__unify_user):\n mock_unify_user = mock_unified_keystone_v3_service__unify_user\n\n name = \"name\"\n password = \"passwd\"\n project_id = \"project\"\n domain_name = \"domain\"\n default_role = \"role\"\n\n self.assertEqual(mock_unify_user.return_value,\n self.service.create_user(name, password=password,\n project_id=project_id,\n domain_name=domain_name,\n default_role=default_role))\n mock_unify_user.assert_called_once_with(\n self.service._impl.create_user.return_value)\n self.service._impl.create_user.assert_called_once_with(\n username=name, password=password, project_id=project_id,\n domain_name=domain_name, default_role=default_role, enabled=True)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_user\" % PATH)\n def test_create_users(self, mock_unified_keystone_v3_service__unify_user):\n project_id = \"project\"\n n = 3\n domain_name = \"Default\"\n\n self.service.create_users(\n project_id, number_of_users=3,\n user_create_args={\"domain_name\": domain_name})\n self.service._impl.create_users.assert_called_once_with(\n project_id=project_id, number_of_users=n,\n user_create_args={\"domain_name\": domain_name})\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_user\" % PATH)\n def test_list_users(self, mock_unified_keystone_v3_service__unify_user):\n mock_unify_user = mock_unified_keystone_v3_service__unify_user\n\n users = [mock.MagicMock()]\n self.service._impl.list_users.return_value = users\n\n self.assertEqual([mock_unify_user.return_value],\n self.service.list_users())\n mock_unify_user.assert_called_once_with(users[0])\n\n @ddt.data({\"user_id\": \"id\", \"enabled\": False, \"name\": \"Fake\",\n \"email\": \"[email protected]\", \"password\": \"pass\"},\n {\"user_id\": \"id\", \"enabled\": None, \"name\": None,\n \"email\": None, \"password\": None})\n @ddt.unpack\n def test_update_user(self, user_id, enabled, name, email, password):\n self.service.update_user(user_id, enabled=enabled, name=name,\n email=email, password=password)\n self.service._impl.update_user.assert_called_once_with(\n user_id, enabled=enabled, name=name, email=email,\n password=password)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_service\" % PATH)\n def test_list_services(self,\n mock_unified_keystone_v3_service__unify_service):\n mock_unify_service = mock_unified_keystone_v3_service__unify_service\n\n services = [mock.MagicMock()]\n self.service._impl.list_services.return_value = services\n\n self.assertEqual([mock_unify_service.return_value],\n self.service.list_services())\n mock_unify_service.assert_called_once_with(services[0])\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_role\" % PATH)\n def test_create_role(self, mock_unified_keystone_v3_service__unify_role):\n mock_unify_role = mock_unified_keystone_v3_service__unify_role\n name = \"some\"\n domain = \"some\"\n\n self.assertEqual(mock_unify_role.return_value,\n self.service.create_role(name, domain_name=domain))\n\n self.service._impl.create_role.assert_called_once_with(\n name, domain_name=domain)\n mock_unify_role.assert_called_once_with(\n self.service._impl.create_role.return_value)\n\n def test_add_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"user_id\"\n\n self.service.add_role(role_id, user_id=user_id, project_id=project_id)\n\n self.service._impl.add_role.assert_called_once_with(\n user_id=user_id, role_id=role_id, project_id=project_id)\n\n def test_revoke_role(self):\n role_id = \"fake_id\"\n user_id = \"user_id\"\n project_id = \"user_id\"\n\n self.service.revoke_role(role_id, user_id=user_id,\n project_id=project_id)\n\n self.service._impl.revoke_role.assert_called_once_with(\n user_id=user_id, role_id=role_id, project_id=project_id)\n\n @mock.patch(\"%s.UnifiedKeystoneV3Service._unify_role\" % PATH)\n def test_list_roles(self, mock_unified_keystone_v3_service__unify_role):\n mock_unify_role = mock_unified_keystone_v3_service__unify_role\n\n roles = [mock.MagicMock()]\n self.service._impl.list_roles.return_value = roles\n\n self.assertEqual([mock_unify_role.return_value],\n self.service.list_roles())\n mock_unify_role.assert_called_once_with(roles[0])\n\n def test_create_ec2credentials(self):\n user_id = \"id\"\n project_id = \"project-id\"\n\n self.assertEqual(self.service._impl.create_ec2credentials.return_value,\n self.service.create_ec2credentials(\n user_id=user_id, project_id=project_id))\n\n self.service._impl.create_ec2credentials.assert_called_once_with(\n user_id=user_id, project_id=project_id)\n" }, { "alpha_fraction": 0.5702084898948669, "alphanum_fraction": 0.5736468434333801, "avg_line_length": 40.74074172973633, "blob_id": "0fc912cb1f7908aabe6be983286ac688deb66283", "content_id": "4087f0c776d45d89822ca46b0f8f66dd1fc38d06", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9016, "license_type": "permissive", "max_line_length": 79, "num_lines": 216, "path": "/tests/unit/common/services/gnocchi/test_metric.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common.services.gnocchi import metric\nfrom tests.unit import test\n\n\nclass GnocchiServiceTestCase(test.TestCase):\n def setUp(self):\n super(GnocchiServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.service = metric.GnocchiService(\n self.clients,\n name_generator=self.name_generator)\n\n def atomic_actions(self):\n return self.service._atomic_actions\n\n def test__create_archive_policy(self):\n definition = [{\"granularity\": \"0:00:01\", \"timespan\": \"1:00:00\"}]\n aggregation_methods = [\n \"std\", \"count\", \"95pct\", \"min\", \"max\", \"sum\", \"median\", \"mean\"]\n archive_policy = {\"name\": \"fake_name\"}\n archive_policy[\"definition\"] = definition\n archive_policy[\"aggregation_methods\"] = aggregation_methods\n\n self.assertEqual(\n self.service.create_archive_policy(\n name=\"fake_name\",\n definition=definition,\n aggregation_methods=aggregation_methods),\n self.service._clients.gnocchi().archive_policy.create(\n archive_policy)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.create_archive_policy\")\n\n def test__delete_archive_policy(self):\n self.service.delete_archive_policy(\"fake_name\")\n self.service._clients.gnocchi().archive_policy.delete \\\n .assert_called_once_with(\"fake_name\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.delete_archive_policy\")\n\n def test__list_archive_policy(self):\n self.assertEqual(\n self.service.list_archive_policy(),\n self.service._clients.gnocchi().archive_policy.list.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_archive_policy\")\n\n def test__create_archive_policy_rule(self):\n archive_policy_rule = {\"name\": \"fake_name\"}\n archive_policy_rule[\"metric_pattern\"] = \"cpu_*\"\n archive_policy_rule[\"archive_policy_name\"] = \"low\"\n\n self.assertEqual(\n self.service.create_archive_policy_rule(\n name=\"fake_name\",\n metric_pattern=\"cpu_*\",\n archive_policy_name=\"low\"),\n self.service._clients.gnocchi().archive_policy_rule.create(\n archive_policy_rule)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.create_archive_policy_rule\")\n\n def test__delete_archive_policy_rule(self):\n self.service.delete_archive_policy_rule(\"fake_name\")\n self.service._clients.gnocchi().archive_policy_rule \\\n .delete.assert_called_once_with(\"fake_name\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.delete_archive_policy_rule\")\n\n def test__list_archive_policy_rule(self):\n self.assertEqual(\n self.service.list_archive_policy_rule(),\n self.service._clients.gnocchi().archive_policy_rule.list\n .return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_archive_policy_rule\")\n\n def test__list_capabilities(self):\n self.assertEqual(\n self.service.list_capabilities(),\n self.service._clients.gnocchi().capabilities.list.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_capabilities\")\n\n def test__get_measures_aggregation(self):\n self.assertEqual(\n self.service.get_measures_aggregation(\n metrics=[1],\n aggregation=\"mean\",\n refresh=False),\n self.service._clients.gnocchi().metric.aggregation(\n [1], \"mean\", False)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.get_measures_aggregation\")\n\n def test__get_measures(self):\n self.assertEqual(\n self.service.get_measures(\n metric=1,\n aggregation=\"mean\",\n refresh=False),\n self.service._clients.gnocchi().metric.get_measures(\n 1, \"mean\", False)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.get_measures\")\n\n def test__create_metric(self):\n param = {\"name\": \"fake_name\"}\n param[\"archive_policy_name\"] = \"fake_archive_policy\"\n param[\"unit\"] = \"fake_unit\"\n param[\"resource_id\"] = \"fake_resource_id\"\n self.assertEqual(\n self.service.create_metric(\n name=\"fake_name\",\n archive_policy_name=\"fake_archive_policy\",\n unit=\"fake_unit\",\n resource_id=\"fake_resource_id\"),\n self.service._clients.gnocchi().metric.create(param)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.create_metric\")\n\n def test__delete_metric(self):\n self.service.delete_metric(\"fake_metric_id\")\n self.service._clients.gnocchi().metric.delete.assert_called_once_with(\n \"fake_metric_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.delete_metric\")\n\n def test__list_metric(self):\n self.service.list_metric(limit=0)\n self.assertEqual(\n 1, self.service._clients.gnocchi().metric.list.call_count)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_metric\")\n\n def test__create_resource(self):\n resource = {\"id\": \"11111\"}\n self.assertEqual(\n self.service.create_resource(\"fake_type\"),\n self.service._clients.gnocchi().resource.create(\n \"fake_type\", resource)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.create_resource\")\n\n def test__delete_resource(self):\n self.service.delete_resource(\"fake_resource_id\")\n self.service._clients.gnocchi().resource.delete \\\n .assert_called_once_with(\"fake_resource_id\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.delete_resource\")\n\n def test__list_resource(self):\n self.assertEqual(\n self.service.list_resource(),\n self.service._clients.gnocchi().resource.list.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_resource\")\n\n def test__create_resource_type(self):\n resource_type = {\"name\": \"fake_name\"}\n self.assertEqual(\n self.service.create_resource_type(\"fake_name\"),\n self.service._clients.gnocchi().resource_type.create(resource_type)\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.create_resource_type\")\n\n def test__delete_resource_type(self):\n self.service.delete_resource_type(\"fake_resource_name\")\n self.service._clients.gnocchi().resource_type.delete \\\n .assert_called_once_with(\"fake_resource_name\")\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.delete_resource_type\")\n\n def test__list_resource_type(self):\n self.assertEqual(\n self.service.list_resource_type(),\n self.service._clients.gnocchi().resource_type.list.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.list_resource_type\")\n\n def test__get_status(self,):\n self.assertEqual(\n self.service.get_status(),\n self.service._clients.gnocchi().status.get.return_value\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"gnocchi.get_status\")\n" }, { "alpha_fraction": 0.5168496966362, "alphanum_fraction": 0.5189322233200073, "avg_line_length": 35.68055725097656, "blob_id": "5234e165b8c3e52465cbe749b1beeb69cd544a91", "content_id": "c06546487dfd0771847340c667b9cedc1d6a5a34", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5282, "license_type": "permissive", "max_line_length": 79, "num_lines": 144, "path": "/rally_openstack/task/contexts/sahara/sahara_job_binaries.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport requests\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"sahara_job_binaries\", platform=\"openstack\", order=442)\nclass SaharaJobBinaries(context.OpenStackContext):\n \"\"\"Context class for setting up Job Binaries for an EDP job.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"mains\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"download_url\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"name\", \"download_url\"]\n }\n },\n \"libs\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"download_url\": {\n \"type\": \"string\"\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"name\", \"download_url\"]\n }\n }\n },\n \"additionalProperties\": False\n }\n\n # This cache will hold the downloaded libs content to prevent repeated\n # downloads for each tenant\n lib_cache = {}\n\n def setup(self):\n utils.init_sahara_context(self)\n for user, tenant_id in self._iterate_per_tenants():\n\n clients = osclients.Clients(user[\"credential\"])\n sahara = clients.sahara()\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"mains\"] = []\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"libs\"] = []\n\n for main in self.config.get(\"mains\", []):\n self.download_and_save_lib(\n sahara=sahara,\n lib_type=\"mains\",\n name=main[\"name\"],\n download_url=main[\"download_url\"],\n tenant_id=tenant_id)\n\n for lib in self.config.get(\"libs\", []):\n self.download_and_save_lib(\n sahara=sahara,\n lib_type=\"libs\",\n name=lib[\"name\"],\n download_url=lib[\"download_url\"],\n tenant_id=tenant_id)\n\n def setup_inputs(self, sahara, tenant_id, input_type, input_url):\n if input_type == \"swift\":\n raise exceptions.RallyException(\n \"Swift Data Sources are not implemented yet\")\n # Todo(nkonovalov): Add swift credentials parameters and data upload\n input_ds = sahara.data_sources.create(\n name=self.generate_random_name(),\n description=\"\",\n data_source_type=input_type,\n url=input_url)\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"input\"] = input_ds.id\n\n def download_and_save_lib(self, sahara, lib_type, name, download_url,\n tenant_id):\n if download_url not in self.lib_cache:\n lib_data = requests.get(download_url).content\n self.lib_cache[download_url] = lib_data\n else:\n lib_data = self.lib_cache[download_url]\n\n job_binary_internal = sahara.job_binary_internals.create(\n name=name,\n data=lib_data)\n\n url = \"internal-db://%s\" % job_binary_internal.id\n job_binary = sahara.job_binaries.create(name=name,\n url=url,\n description=\"\",\n extra={})\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][lib_type].append(\n job_binary.id)\n\n def cleanup(self):\n resources = [\"job_binary_internals\", \"job_binaries\"]\n\n resource_manager.cleanup(\n names=[\"sahara.%s\" % res for res in resources],\n users=self.context.get(\"users\", []),\n superclass=utils.SaharaScenario,\n task_id=self.context[\"task\"][\"uuid\"])\n" }, { "alpha_fraction": 0.68345707654953, "alphanum_fraction": 0.6871685981750488, "avg_line_length": 39.12765884399414, "blob_id": "0f32259cbbf509cbb5b01a4e735541d190cecb0e", "content_id": "0adbb1c29633deed58e7bb5e0a71ab4ed04627e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1886, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/rally_openstack/task/scenarios/magnum/cluster_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.magnum import utils\n\n\n\"\"\"Scenarios for Magnum cluster_templates.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.MAGNUM])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"magnum\"]},\n name=\"MagnumClusterTemplates.list_cluster_templates\",\n platform=\"openstack\")\nclass ListClusterTemplates(utils.MagnumScenario):\n\n def run(self, **kwargs):\n \"\"\"List all cluster_templates.\n\n Measure the \"magnum cluster_template-list\" command performance.\n\n :param limit: (Optional) The maximum number of results to return\n per request, if:\n\n 1) limit > 0, the maximum number of cluster_templates to return.\n 2) limit param is NOT specified (None), the number of items\n returned respect the maximum imposed by the Magnum API\n (see Magnum's api.max_limit option).\n :param kwargs: optional additional arguments for cluster_templates\n listing\n \"\"\"\n self._list_cluster_templates(**kwargs)\n" }, { "alpha_fraction": 0.5288949608802795, "alphanum_fraction": 0.5357010364532471, "avg_line_length": 36.28258514404297, "blob_id": "c4c37b0ef1583d60a1aeb435b3bbc6fe5dd7bd5c", "content_id": "22892319e976eef7f7f3895ce345f3b462178d47", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32324, "license_type": "permissive", "max_line_length": 78, "num_lines": 867, "path": "/tests/unit/task/scenarios/neutron/test_network.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Intel Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.neutron import network\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.neutron.network\"\n\n\[email protected]\nclass NeutronNetworksTestCase(test.TestCase):\n def setUp(self):\n super(NeutronNetworksTestCase, self).setUp()\n patch = mock.patch(\"rally_openstack.common.osclients.Clients\")\n self.clients = patch.start().return_value\n self.clients.credential.api_info = {}\n self.addCleanup(patch.stop)\n\n self.nc = self.clients.neutron.return_value\n self.context = self.get_test_context()\n\n @staticmethod\n def get_test_context():\n ctx = test.get_test_context()\n ctx.update(\n user_choice_method=\"random\",\n tenants={\"tenant-1\": {}},\n users=[\n {\n \"tenant_id\": \"tenant-1\",\n \"credential\": {}\n }\n ]\n )\n return ctx\n\n @ddt.data(\n {\"network_create_args\": {}},\n {\"network_create_args\": {\"admin_state_up\": False}},\n {\"network_create_args\": {\"provider:network_type\": \"vxlan\"}}\n )\n @ddt.unpack\n def test_create_and_list_networks(self, network_create_args):\n net = {\n \"id\": \"network-id\",\n \"name\": \"network-name\",\n \"admin_state_up\": False\n }\n self.nc.create_network.return_value = {\"network\": net}\n\n scenario = network.CreateAndListNetworks(self.context)\n scenario.run(network_create_args=network_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.nc.list_networks.assert_called_once_with()\n\n @ddt.data(\n {\"network_create_args\": {}},\n {\"network_create_args\": {\"admin_state_up\": False}},\n )\n @ddt.unpack\n def test_create_and_show_network(self, network_create_args):\n net = {\n \"id\": \"network-id\",\n \"name\": \"network-name\",\n \"admin_state_up\": False\n }\n self.nc.create_network.return_value = {\"network\": net}\n\n scenario = network.CreateAndShowNetwork(self.context)\n\n scenario.run(network_create_args=network_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.nc.show_network.assert_called_once_with(net[\"id\"])\n\n def test_create_and_update_networks(self):\n net = {\n \"id\": \"network-id\",\n \"name\": \"network-name\",\n \"admin_state_up\": False\n }\n self.nc.create_network.return_value = {\"network\": net}\n\n scenario = network.CreateAndUpdateNetworks(self.context)\n\n network_update_args = {\"admin_state_up\": True}\n\n # Default options\n scenario.run(network_update_args=network_update_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY}}\n )\n self.nc.update_network.assert_called_once_with(\n net[\"id\"], {\"network\": network_update_args}\n )\n\n self.nc.create_network.reset_mock()\n self.nc.update_network.reset_mock()\n\n # admin_state_up is specified\n network_create_args = {\n \"admin_state_up\": False\n }\n\n scenario.run(network_create_args=network_create_args,\n network_update_args=network_update_args)\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.nc.update_network.assert_called_once_with(\n net[\"id\"], {\"network\": network_update_args}\n )\n\n def test_create_and_delete_networks(self):\n net = {\n \"id\": \"network-id\",\n \"name\": \"network-name\",\n \"admin_state_up\": False\n }\n self.nc.create_network.return_value = {\"network\": net}\n\n scenario = network.CreateAndDeleteNetworks(self.context)\n\n # Default options\n network_create_args = {}\n scenario.run(network_create_args=network_create_args)\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY}}\n )\n self.assertTrue(self.nc.delete_network.called)\n\n self.nc.create_network.reset_mock()\n self.nc.delete_network.reset_mock()\n\n # Explicit network name is specified\n network_create_args = {\"admin_state_up\": True}\n scenario.run(network_create_args=network_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertTrue(self.nc.delete_network.called)\n\n def test_create_and_list_subnets(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"10.2.0.0/24\"\n subnets_per_network = 5\n net = mock.MagicMock()\n\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndListSubnets(self.context)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n\n self.nc.list_subnets.assert_called_once_with()\n\n def test_create_and_show_subnets(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n net = mock.MagicMock()\n\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndShowSubnets(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network)\n\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call(i) for i in range(subnets_per_network)],\n self.nc.show_subnet.call_args_list\n )\n\n def test_set_and_clear_router_gateway(self):\n network_create_args = {\"router:external\": True}\n router_create_args = {\"admin_state_up\": True}\n enable_snat = True\n ext_net = {\"id\": \"ext-net-1\"}\n router = {\"id\": \"router-id\"}\n\n self.nc.create_network.return_value = {\"network\": ext_net}\n self.nc.create_router.return_value = {\"router\": router}\n self.nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"ext-gw-mode\"}]\n }\n\n network.SetAndClearRouterGateway(self.context).run(\n enable_snat, network_create_args, router_create_args\n )\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n\n self.nc.create_router.assert_called_once_with(\n {\"router\": {\"name\": mock.ANY, **router_create_args}}\n )\n\n self.nc.add_gateway_router.assert_called_once_with(\n router[\"id\"], {\"network_id\": ext_net[\"id\"],\n \"enable_snat\": enable_snat}\n )\n self.nc.remove_gateway_router.assert_called_once_with(router[\"id\"])\n\n def test_create_and_update_subnets(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_update_args = {\"enable_dhcp\": True}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n net = mock.MagicMock()\n\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndUpdateSubnets(self.context)\n\n scenario.run(subnet_update_args,\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call(s, {\"subnet\": subnet_update_args})\n for s in range(subnets_per_network)],\n self.nc.update_subnet.call_args_list\n )\n\n def test_create_and_delete_subnets(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n net = mock.MagicMock()\n\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndDeleteSubnets(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network)\n\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call(s) for s in range(subnets_per_network)],\n self.nc.delete_subnet.call_args_list\n )\n\n def test_create_and_list_routers(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n router_create_args = {\"admin_state_up\": True}\n net = {\"id\": \"foo\"}\n self.nc.create_network.return_value = {\"network\": net}\n\n scenario = network.CreateAndListRouters(self.context)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network,\n router_create_args=router_create_args)\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call({\"router\": {\n \"name\": mock.ANY,\n **router_create_args}}\n )] * subnets_per_network,\n self.nc.create_router.call_args_list\n )\n self.nc.list_routers.assert_called_once_with()\n\n def test_create_and_update_routers(self):\n router_update_args = {\"admin_state_up\": False}\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n router_create_args = {\"admin_state_up\": True}\n net = {\"id\": \"foo\"}\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n self.nc.create_router.side_effect = [\n {\"router\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndUpdateRouters(self.context)\n\n scenario.run(router_update_args,\n network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network,\n router_create_args=router_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call({\"router\": {\n \"name\": mock.ANY,\n **router_create_args}}\n )] * subnets_per_network,\n self.nc.create_router.call_args_list\n )\n self.assertEqual(\n [mock.call(i, {\"router\": router_update_args})\n for i in range(subnets_per_network)],\n self.nc.update_router.call_args_list\n )\n\n def test_create_and_delete_routers(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n router_create_args = {\"admin_state_up\": True}\n net = {\"id\": \"foo\"}\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": f\"s-{i}\"}} for i in range(subnets_per_network)\n ]\n self.nc.create_router.side_effect = [\n {\"router\": {\"id\": f\"r-{i}\"}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndDeleteRouters(self.context)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network,\n router_create_args=router_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call({\"router\": {\n \"name\": mock.ANY,\n **router_create_args}}\n )] * subnets_per_network,\n self.nc.create_router.call_args_list\n )\n self.assertEqual(\n [mock.call(f\"r-{i}\", {\"subnet_id\": f\"s-{i}\"})\n for i in range(subnets_per_network)],\n self.nc.remove_interface_router.call_args_list\n )\n self.assertEqual(\n [mock.call(f\"r-{i}\") for i in range(subnets_per_network)],\n self.nc.delete_router.call_args_list\n )\n\n def test_create_and_show_routers(self):\n network_create_args = {\"router:external\": True}\n subnet_create_args = {\"allocation_pools\": []}\n subnet_cidr_start = \"1.1.0.0/30\"\n subnets_per_network = 5\n router_create_args = {\"admin_state_up\": True}\n net = {\"id\": \"foo\"}\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.side_effect = [\n {\"subnet\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n self.nc.create_router.side_effect = [\n {\"router\": {\"id\": i}} for i in range(subnets_per_network)\n ]\n\n scenario = network.CreateAndShowRouters(self.context)\n\n scenario.run(network_create_args=network_create_args,\n subnet_create_args=subnet_create_args,\n subnet_cidr_start=subnet_cidr_start,\n subnets_per_network=subnets_per_network,\n router_create_args=router_create_args)\n\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY, **network_create_args}}\n )\n self.assertEqual(\n [mock.call({\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY,\n **subnet_create_args}}\n )] * subnets_per_network,\n self.nc.create_subnet.call_args_list\n )\n self.assertEqual(\n [mock.call({\"router\": {\n \"name\": mock.ANY,\n **router_create_args}}\n )] * subnets_per_network,\n self.nc.create_router.call_args_list\n )\n self.assertEqual(\n [mock.call(i) for i in range(subnets_per_network)],\n self.nc.show_router.call_args_list\n )\n\n def test_list_agents(self):\n agent_args = {\n \"F\": \"id\",\n \"sort-dir\": \"asc\"\n }\n scenario = network.ListAgents(self.context)\n\n scenario.run(agent_args=agent_args)\n self.nc.list_agents.assert_called_once_with(**agent_args)\n\n def test_create_and_list_ports(self):\n port_create_args = {\"allocation_pools\": []}\n ports_per_network = 10\n network_create_args = {\"router:external\": True}\n net = mock.MagicMock()\n\n scenario = network.CreateAndListPorts(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n\n scenario.run(network_create_args=network_create_args,\n port_create_args=port_create_args,\n ports_per_network=ports_per_network)\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n self.assertEqual(\n [\n mock.call({\n \"port\": {\n \"network_id\": net[\"id\"],\n \"name\": mock.ANY,\n **port_create_args\n }\n }) for _ in range(ports_per_network)\n ],\n self.nc.create_port.call_args_list\n )\n\n self.nc.list_ports.assert_called_once_with()\n\n def test_create_and_update_ports(self):\n port_update_args = {\"admin_state_up\": False}\n port_create_args = {\"allocation_pools\": []}\n ports_per_network = 10\n network_create_args = {\"router:external\": True}\n net = mock.MagicMock()\n self.nc.create_port.side_effect = [\n {\"port\": {\"id\": f\"p-{i}\"}}\n for i in range(ports_per_network)\n ]\n\n scenario = network.CreateAndUpdatePorts(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n\n scenario.run(port_update_args,\n network_create_args=network_create_args,\n port_create_args=port_create_args,\n ports_per_network=ports_per_network)\n\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n self.assertEqual(\n [mock.call({\"port\": {\n \"network_id\": net[\"id\"],\n \"name\": mock.ANY,\n **port_create_args}}\n )] * ports_per_network,\n self.nc.create_port.call_args_list\n )\n self.assertEqual(\n [mock.call(f\"p-{i}\", {\"port\": port_update_args})\n for i in range(ports_per_network)],\n self.nc.update_port.call_args_list\n )\n\n def test_create_and_bind_ports(self):\n ports_per_network = 2\n port_update_args = {\n \"device_owner\": \"compute:nova\",\n \"device_id\": \"ba805478-85ff-11e9-a2e4-2b8dea218fc8\",\n \"binding:host_id\": \"fake-host\",\n }\n net = {\"id\": \"net-id\"}\n self.context.update({\n \"tenants\": {\n \"tenant-1\": {\n \"id\": \"tenant-1\",\n \"networks\": [\n net\n ],\n },\n },\n \"networking_agents\": [{\n \"host\": \"fake-host\",\n \"alive\": True,\n \"admin_state_up\": True,\n \"agent_type\": \"Open vSwitch agent\",\n }],\n })\n scenario = network.CreateAndBindPorts(self.context)\n scenario.admin_neutron = mock.MagicMock()\n\n self.nc.create_port.side_effect = [\n {\"port\": {\"id\": f\"p-{i}\"}}\n for i in range(ports_per_network)\n ]\n\n scenario.run(ports_per_network=ports_per_network)\n\n self.assertEqual(\n [mock.call({\"port\": {\n \"network_id\": net[\"id\"],\n \"name\": mock.ANY}}\n )] * ports_per_network,\n self.nc.create_port.call_args_list\n )\n self.assertEqual(\n [mock.call(port_id=f\"p-{i}\", **port_update_args)\n for i in range(ports_per_network)],\n scenario.admin_neutron.update_port.call_args_list\n )\n\n def test_create_and_show_ports(self):\n port_create_args = {\"allocation_pools\": []}\n ports_per_network = 1\n network_create_args = {\"router:external\": True}\n net = mock.MagicMock()\n\n scenario = network.CreateAndShowPorts(self.context)\n scenario._get_or_create_network = mock.MagicMock(return_value=net)\n port = {\"id\": 1, \"name\": \"f\"}\n self.nc.create_port.return_value = {\"port\": port}\n\n scenario.run(network_create_args=network_create_args,\n port_create_args=port_create_args,\n ports_per_network=ports_per_network)\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n self.nc.create_port.assert_called_with({\"port\": {\n \"network_id\": net[\"id\"], \"name\": mock.ANY, **port_create_args\n }})\n\n self.nc.show_port.assert_called_with(port[\"id\"])\n\n def test_create_and_delete_ports(self):\n port_create_args = {\"allocation_pools\": []}\n ports_per_network = 10\n network_create_args = {\"router:external\": True}\n net = mock.MagicMock()\n self.nc.create_port.side_effect = [\n {\"port\": {\"id\": f\"p-{i}\"}}\n for i in range(ports_per_network)\n ]\n\n scenario = network.CreateAndDeletePorts(self.context)\n scenario._get_or_create_network = mock.Mock(return_value=net)\n\n scenario.run(network_create_args=network_create_args,\n port_create_args=port_create_args,\n ports_per_network=ports_per_network)\n\n scenario._get_or_create_network.assert_called_once_with(\n **network_create_args)\n\n self.assertEqual(\n [mock.call({\"port\": {\n \"network_id\": net[\"id\"],\n \"name\": mock.ANY,\n **port_create_args}}\n )] * ports_per_network,\n self.nc.create_port.call_args_list\n )\n self.assertEqual(\n [mock.call(f\"p-{i}\") for i in range(ports_per_network)],\n self.nc.delete_port.call_args_list\n )\n\n @ddt.data(\n {},\n {\"floating_ip_args\": {\"floating_ip_address\": \"1.1.1.1\"}},\n )\n @ddt.unpack\n def test_create_and_list_floating_ips(self, floating_ip_args=None):\n floating_ip_args = floating_ip_args or {}\n floating_network = {\"id\": \"ext-net\"}\n\n scenario = network.CreateAndListFloatingIps(self.context)\n\n self.nc.create_floatingip.return_value = {\"floatingip\": mock.Mock()}\n self.nc.list_floatingips.return_value = {\"floatingips\": mock.Mock()}\n scenario.run(floating_network=floating_network,\n floating_ip_args=floating_ip_args)\n self.nc.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": floating_network[\"id\"],\n **floating_ip_args}})\n self.nc.list_floatingips.assert_called_once_with()\n\n @ddt.data(\n {},\n {\"floating_ip_args\": {\"floating_ip_address\": \"1.1.1.1\"}},\n )\n @ddt.unpack\n def test_create_and_delete_floating_ips(self, floating_ip_args=None):\n floating_network = {\"id\": \"ext-net\"}\n floating_ip_args = floating_ip_args or {}\n floatingip = {\"id\": \"floating-ip-id\"}\n\n self.nc.create_floatingip.return_value = {\"floatingip\": floatingip}\n\n scenario = network.CreateAndDeleteFloatingIps(self.context)\n\n scenario.run(floating_network=floating_network,\n floating_ip_args=floating_ip_args)\n self.nc.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": floating_network[\"id\"],\n **floating_ip_args}})\n self.nc.delete_floatingip.assert_called_once_with(floatingip[\"id\"])\n\n def test_associate_and_dissociate_floating_ips(self):\n floating_network = {\n \"id\": \"floating-net-id\",\n \"name\": \"public\",\n \"router:external\": True\n }\n floatingip = {\"id\": \"floating-ip-id\"}\n net = {\"id\": \"net-id\"}\n subnet = {\"id\": \"subnet-id\"}\n port = {\"id\": \"port-id\"}\n router = {\"id\": \"router-id\"}\n\n self.nc.create_floatingip.return_value = {\"floatingip\": floatingip}\n self.nc.create_network.return_value = {\"network\": net}\n self.nc.create_subnet.return_value = {\"subnet\": subnet}\n self.nc.create_port.return_value = {\"port\": port}\n self.nc.create_router.return_value = {\"router\": router}\n self.nc.list_networks.return_value = {\"networks\": [floating_network]}\n\n network.AssociateAndDissociateFloatingIps(self.context).run(\n floating_network=floating_network[\"name\"])\n\n self.nc.create_floatingip.assert_called_once_with(\n {\"floatingip\": {\"description\": mock.ANY,\n \"floating_network_id\": floating_network[\"id\"]}})\n self.nc.create_network.assert_called_once_with(\n {\"network\": {\"name\": mock.ANY}}\n )\n self.nc.create_subnet.assert_called_once_with(\n {\"subnet\": {\n \"name\": mock.ANY,\n \"network_id\": net[\"id\"],\n \"dns_nameservers\": [\"8.8.8.8\", \"8.8.4.4\"],\n \"ip_version\": 4,\n \"cidr\": mock.ANY\n }}\n )\n self.nc.create_port.assert_called_once_with(\n {\"port\": {\"name\": mock.ANY,\n \"network_id\": net[\"id\"]}}\n )\n self.nc.add_gateway_router.assert_called_once_with(\n router[\"id\"], {\"network_id\": floating_network[\"id\"]}\n )\n self.nc.add_interface_router.assert_called_once_with(\n router[\"id\"], {\"subnet_id\": subnet[\"id\"]}\n )\n\n self.assertEqual(\n [\n mock.call(\n floatingip[\"id\"],\n {\"floatingip\": {\"port_id\": port[\"id\"]}}\n ),\n mock.call(\n floatingip[\"id\"],\n {\"floatingip\": {\"port_id\": None}}\n )\n ],\n self.nc.update_floatingip.call_args_list\n )\n\n def test_delete_subnets(self):\n # do not guess what user will be used\n self.context[\"user_choice_method\"] = \"round_robin\"\n # if it is the 4th iteration, the second user from the second tenant\n # should be taken, which means that the second subnets from each\n # tenant network should be removed.\n self.context[\"iteration\"] = 4\n # in case of `round_robin` the user will be selected from the list of\n # available users of particular tenant, not from the list of all\n # tenants (i.e random choice). BUT to trigger selecting user and\n # tenant `users` key should present in context dict\n self.context[\"users\"] = []\n\n self.context[\"tenants\"] = {\n # this should not be used\n \"uuid-1\": {\n \"id\": \"uuid-1\",\n \"networks\": [{\"subnets\": [\"subnet-1\"]}],\n \"users\": [{\"id\": \"user-1\", \"credential\": mock.MagicMock()},\n {\"id\": \"user-2\", \"credential\": mock.MagicMock()}]\n },\n # this is expected user\n \"uuid-2\": {\n \"id\": \"uuid-2\",\n \"networks\": [\n {\"subnets\": [\"subnet-2\", \"subnet-3\"]},\n {\"subnets\": [\"subnet-4\", \"subnet-5\"]}],\n \"users\": [{\"id\": \"user-3\", \"credential\": mock.MagicMock()},\n {\"id\": \"user-4\", \"credential\": mock.MagicMock()}]\n }\n }\n\n scenario = network.DeleteSubnets(self.context)\n self.assertEqual(\"user-4\", scenario.context[\"user\"][\"id\"],\n \"Unexpected user is taken. The wrong subnets can be \"\n \"affected(removed).\")\n\n scenario.run()\n\n self.assertEqual(\n [\n mock.call(\"subnet-3\"),\n mock.call(\"subnet-5\")\n ],\n self.nc.delete_subnet.call_args_list)\n" }, { "alpha_fraction": 0.6197556853294373, "alphanum_fraction": 0.6225880980491638, "avg_line_length": 37.69178009033203, "blob_id": "5f08018ccf21c0c91c72c515ec5663be6e728fcb", "content_id": "0cf486443e083745d927f53c62ae084f612f4030", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5649, "license_type": "permissive", "max_line_length": 79, "num_lines": 146, "path": "/rally_openstack/task/scenarios/senlin/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally import exceptions\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass SenlinScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Senlin scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"senlin.list_clusters\")\n def _list_clusters(self, **queries):\n \"\"\"Return user cluster list.\n\n :param kwargs **queries: Optional query parameters to be sent to\n restrict the clusters to be returned. Available parameters include:\n\n * name: The name of a cluster.\n * status: The current status of a cluster.\n * sort: A list of sorting keys separated by commas. Each sorting\n key can optionally be attached with a sorting direction\n modifier which can be ``asc`` or ``desc``.\n * limit: Requests a specified size of returned items from the\n query. Returns a number of items up to the specified limit\n value.\n * marker: Specifies the ID of the last-seen item. Use the limit\n parameter to make an initial limited request and use the ID of\n the last-seen item from the response as the marker parameter\n value in a subsequent limited request.\n * global_project: A boolean value indicating whether clusters\n from all projects will be returned.\n\n :returns: list of clusters according to query.\n \"\"\"\n return list(self.admin_clients(\"senlin\").clusters(**queries))\n\n @atomic.action_timer(\"senlin.create_cluster\")\n def _create_cluster(self, profile_id, desired_capacity=0, min_size=0,\n max_size=-1, timeout=60, metadata=None):\n \"\"\"Create a new cluster from attributes.\n\n :param profile_id: ID of profile used to create cluster\n :param desired_capacity: The capacity or initial number of nodes\n owned by the cluster\n :param min_size: The minimum number of nodes owned by the cluster\n :param max_size: The maximum number of nodes owned by the cluster.\n -1 means no limit\n :param timeout: The timeout value in minutes for cluster creation\n :param metadata: A set of key value pairs to associate with the cluster\n\n :returns: object of cluster created.\n \"\"\"\n attrs = {\n \"profile_id\": profile_id,\n \"name\": self.generate_random_name(),\n \"desired_capacity\": desired_capacity,\n \"min_size\": min_size,\n \"max_size\": max_size,\n \"metadata\": metadata,\n \"timeout\": timeout\n }\n\n cluster = self.admin_clients(\"senlin\").create_cluster(**attrs)\n cluster = utils.wait_for_status(\n cluster,\n ready_statuses=[\"ACTIVE\"],\n failure_statuses=[\"ERROR\"],\n update_resource=self._get_cluster,\n timeout=CONF.openstack.senlin_action_timeout)\n\n return cluster\n\n def _get_cluster(self, cluster):\n \"\"\"Get cluster details.\n\n :param cluster: cluster to get\n\n :returns: object of cluster\n \"\"\"\n try:\n return self.admin_clients(\"senlin\").get_cluster(cluster.id)\n except Exception as e:\n if getattr(e, \"code\", getattr(e, \"http_status\", 400)) == 404:\n raise exceptions.GetResourceNotFound(resource=cluster.id)\n raise exceptions.GetResourceFailure(resource=cluster.id, err=e)\n\n @atomic.action_timer(\"senlin.delete_cluster\")\n def _delete_cluster(self, cluster):\n \"\"\"Delete given cluster.\n\n Returns after the cluster is successfully deleted.\n\n :param cluster: cluster object to delete\n \"\"\"\n self.admin_clients(\"senlin\").delete_cluster(cluster)\n utils.wait_for_status(\n cluster,\n ready_statuses=[\"DELETED\"],\n failure_statuses=[\"ERROR\"],\n check_deletion=True,\n update_resource=self._get_cluster,\n timeout=CONF.openstack.senlin_action_timeout)\n\n @atomic.action_timer(\"senlin.create_profile\")\n def _create_profile(self, spec, metadata=None):\n \"\"\"Create a new profile from attributes.\n\n :param spec: spec dictionary used to create profile\n :param metadata: A set of key value pairs to associate with the\n profile\n\n :returns: object of profile created\n \"\"\"\n attrs = {}\n attrs[\"spec\"] = spec\n attrs[\"name\"] = self.generate_random_name()\n if metadata:\n attrs[\"metadata\"] = metadata\n\n return self.clients(\"senlin\").create_profile(**attrs)\n\n @atomic.action_timer(\"senlin.delete_profile\")\n def _delete_profile(self, profile):\n \"\"\"Delete given profile.\n\n Returns after the profile is successfully deleted.\n\n :param profile: profile object to be deleted\n \"\"\"\n self.clients(\"senlin\").delete_profile(profile)\n" }, { "alpha_fraction": 0.6979695558547974, "alphanum_fraction": 0.7013536095619202, "avg_line_length": 27.829267501831055, "blob_id": "890c0b01371ad9d61f24111371d8e94bbce21ce2", "content_id": "b08948b1ee3fbdd8257d87d44615a851be629054", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1182, "license_type": "permissive", "max_line_length": 172, "num_lines": 41, "path": "/samples/deployments/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "Rally Deployments\n=================\n\nRally needs to have information about OpenStack Cloud before you actually\ncan run any tests against it.\n\nYou need create a deployment input file and run use command bellow:\n\n.. code-block::\n\n rally deployment create --file <one_of_files_from_this_dir> --name my_cloud\n\nBelow you can find samples of supported configurations.\n\nexisting.json\n-------------\n\nRegister existing OpenStack cluster.\n\nexisting-keystone-v3.json\n-------------------------\n\nRegister existing OpenStack cluster that uses Keystone v3.\n\nexisting-with-predefined-users.json\n------------------------------------\n\nIf you are using read-only backend in Keystone like LDAP, AD then\nyou need this sample. If you don't specify \"users\" rally will use already\nexisting users that you provide.\n\nexisting-keystone-v3-user.json\n------------------------------\n\nRegister an exisitng OpenStack cluster that uses Keystone v3 and a non-privileged user. The use of an admin account is optional because most tests can use a normal account.\n\nexisting-api.json\n-----------------\n\nIf you expect to specify version of some clients, you could register existing\nOpenstack cluster like this sample.\n" }, { "alpha_fraction": 0.5823429822921753, "alphanum_fraction": 0.5867289304733276, "avg_line_length": 34.164180755615234, "blob_id": "4efaec0a158a6851d939d35a102fc1f679dce6cc", "content_id": "0dd41cc766f5102c33eabb7f57062ad9c01d46a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7068, "license_type": "permissive", "max_line_length": 78, "num_lines": 201, "path": "/tests/unit/task/contexts/vm/test_custom_image.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.task import context\n\nfrom rally_openstack.task.contexts.vm import custom_image\nfrom tests.unit import test\n\n\nBASE = \"rally_openstack.task.contexts.vm.custom_image\"\n\n\[email protected](name=\"test_custom_image\", order=500)\nclass FakeImageGenerator(custom_image.BaseCustomImageGenerator):\n def _customize_image(self, *args):\n pass\n\n\nclass BaseCustomImageContextVMTestCase(test.TestCase):\n\n def setUp(self):\n super(BaseCustomImageContextVMTestCase, self).setUp()\n\n self.context = test.get_test_context()\n self.context.update({\n \"config\": {\n \"test_custom_image\": {\n \"image\": {\"name\": \"image\"},\n \"flavor\": {\"name\": \"flavor\"},\n \"username\": \"fedora\",\n \"floating_network\": \"floating\",\n \"port\": 1022,\n }\n },\n \"admin\": {\n \"credential\": mock.Mock(),\n },\n \"users\": [\n {\"tenant_id\": \"tenant_id0\"},\n {\"tenant_id\": \"tenant_id1\"},\n {\"tenant_id\": \"tenant_id2\"}\n ],\n \"tenants\": {\n \"tenant_id0\": {},\n \"tenant_id1\": {},\n \"tenant_id2\": {}\n }\n })\n\n @mock.patch(\"%s.osclients.Clients\" % BASE)\n @mock.patch(\"%s.types.GlanceImage\" % BASE)\n @mock.patch(\"%s.types.Flavor\" % BASE)\n @mock.patch(\"%s.vmtasks.BootRuncommandDelete\" % BASE)\n def test_create_one_image(\n self, mock_boot_runcommand_delete, mock_flavor,\n mock_glance_image, mock_clients):\n mock_flavor.return_value.pre_process.return_value = \"flavor\"\n mock_glance_image.return_value.pre_process.return_value = \"image\"\n ip = {\"ip\": \"foo_ip\", \"id\": \"foo_id\", \"is_floating\": True}\n fake_server = mock.Mock()\n\n fake_image = {\"id\": \"image\"}\n\n scenario = mock_boot_runcommand_delete.return_value = mock.MagicMock(\n _create_image=mock.MagicMock(return_value=fake_image),\n _boot_server_with_fip=mock.MagicMock(\n return_value=(fake_server, ip))\n )\n generator_ctx = FakeImageGenerator(self.context)\n generator_ctx._customize_image = mock.MagicMock()\n\n user = {\n \"credential\": \"credential\",\n \"keypair\": {\"name\": \"keypair_name\"},\n \"secgroup\": {\"name\": \"secgroup_name\"}\n }\n\n custom_image = generator_ctx.create_one_image(user,\n foo_arg=\"foo_value\")\n self.assertEqual({\"id\": \"image\"}, custom_image)\n\n mock_flavor.assert_called_once_with(self.context)\n mock_flavor.return_value.pre_process.assert_called_once_with(\n resource_spec={\"name\": \"flavor\"}, config={})\n mock_glance_image.assert_called_once_with(self.context)\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n resource_spec={\"name\": \"image\"}, config={})\n mock_boot_runcommand_delete.assert_called_once_with(\n self.context, clients=mock_clients.return_value)\n\n scenario._boot_server_with_fip.assert_called_once_with(\n image=\"image\", flavor=\"flavor\",\n floating_network=\"floating\",\n key_name=\"keypair_name\", security_groups=[\"secgroup_name\"],\n userdata=None, foo_arg=\"foo_value\")\n\n scenario._stop_server.assert_called_once_with(fake_server)\n\n generator_ctx._customize_image.assert_called_once_with(\n fake_server, ip, user)\n\n scenario._create_image.assert_called_once_with(fake_server)\n\n scenario._delete_server_with_fip.assert_called_once_with(\n fake_server, ip)\n\n @mock.patch(\"%s.image.Image\" % BASE)\n def test_delete_one_image(self, mock_image):\n generator_ctx = FakeImageGenerator(self.context)\n\n credential = mock.Mock()\n user = {\"credential\": credential,\n \"keypair\": {\"name\": \"keypair_name\"}}\n custom_image = mock.Mock(id=\"image\")\n\n generator_ctx.delete_one_image(user, custom_image)\n\n mock_image.return_value.delete_image.assert_called_once_with(\"image\")\n\n @mock.patch(\"%s.image.Image\" % BASE)\n def test_setup_admin(self, mock_image):\n self.context[\"tenants\"][\"tenant_id0\"][\"networks\"] = [\n {\"id\": \"network_id\"}]\n\n generator_ctx = FakeImageGenerator(self.context)\n\n image = mock.Mock(id=\"custom_image\")\n\n generator_ctx.create_one_image = mock.Mock(return_value=image)\n\n generator_ctx.setup()\n\n mock_image.return_value.set_visibility.assert_called_once_with(\n image.id)\n\n generator_ctx.create_one_image.assert_called_once_with(\n self.context[\"users\"][0], nics=[{\"net-id\": \"network_id\"}])\n\n def test_cleanup_admin(self):\n tenant = self.context[\"tenants\"][\"tenant_id0\"]\n custom_image = tenant[\"custom_image\"] = {\"id\": \"image\"}\n\n generator_ctx = FakeImageGenerator(self.context)\n\n generator_ctx.delete_one_image = mock.Mock()\n\n generator_ctx.cleanup()\n\n generator_ctx.delete_one_image.assert_called_once_with(\n self.context[\"users\"][0], custom_image)\n\n def test_setup(self):\n self.context.pop(\"admin\")\n\n generator_ctx = FakeImageGenerator(self.context)\n\n generator_ctx.create_one_image = mock.Mock(\n side_effect=[\"custom_image0\", \"custom_image1\", \"custom_image2\"])\n\n generator_ctx.setup()\n\n self.assertEqual(\n [mock.call(user) for user in self.context[\"users\"]],\n generator_ctx.create_one_image.mock_calls)\n\n for i in range(3):\n self.assertEqual(\n \"custom_image%d\" % i,\n self.context[\"tenants\"][\"tenant_id%d\" % i][\"custom_image\"]\n )\n\n def test_cleanup(self):\n self.context.pop(\"admin\")\n\n for i in range(3):\n self.context[\"tenants\"][\"tenant_id%d\" % i][\"custom_image\"] = {\n \"id\": \"custom_image%d\" % i}\n\n generator_ctx = FakeImageGenerator(self.context)\n generator_ctx.delete_one_image = mock.Mock()\n\n generator_ctx.cleanup()\n\n self.assertEqual(\n [mock.call(self.context[\"users\"][i],\n {\"id\": \"custom_image%d\" % i}) for i in range(3)],\n generator_ctx.delete_one_image.mock_calls)\n" }, { "alpha_fraction": 0.5767054557800293, "alphanum_fraction": 0.5781201124191284, "avg_line_length": 40.045162200927734, "blob_id": "64b2c0fc0fb4a100ccd48ad79749b660434c0134", "content_id": "b3d4b43e19c2ec1e6e7f51e781c8819c91ec295a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6362, "license_type": "permissive", "max_line_length": 79, "num_lines": 155, "path": "/tests/unit/task/scenarios/swift/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.swift import utils\nfrom tests.unit import test\n\nSWIFT_UTILS = \"rally_openstack.task.scenarios.swift.utils\"\n\n\[email protected]\nclass SwiftScenarioTestCase(test.ScenarioTestCase):\n\n def test__list_containers(self):\n headers_dict = mock.MagicMock()\n containers_list = mock.MagicMock()\n self.clients(\"swift\").get_account.return_value = (headers_dict,\n containers_list)\n scenario = utils.SwiftScenario(context=self.context)\n\n self.assertEqual((headers_dict, containers_list),\n scenario._list_containers(fargs=\"f\"))\n kw = {\"full_listing\": True, \"fargs\": \"f\"}\n self.clients(\"swift\").get_account.assert_called_once_with(**kw)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.list_containers\")\n\n @ddt.data(\n {},\n {\"headers\": {\"X-fake-name\": \"fake-value\"}},\n {\"public\": False,\n \"headers\": {\"X-fake-name\": \"fake-value\"}},\n {\"public\": False})\n @ddt.unpack\n def test__create_container(self, public=True, kwargs=None, headers=None):\n if kwargs is None:\n kwargs = {\"fakearg\": \"fake\"}\n if headers is None:\n headers = {}\n scenario = utils.SwiftScenario(self.context)\n scenario.generate_random_name = mock.MagicMock()\n\n container = scenario._create_container(public=public,\n headers=headers,\n **kwargs)\n self.assertEqual(container,\n scenario.generate_random_name.return_value)\n kwargs[\"headers\"] = headers\n kwargs[\"headers\"][\"X-Container-Read\"] = \".r:*,.rlistings\"\n self.clients(\"swift\").put_container.assert_called_once_with(\n scenario.generate_random_name.return_value,\n **kwargs)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.create_container\")\n\n def test__delete_container(self):\n container_name = mock.MagicMock()\n scenario = utils.SwiftScenario(context=self.context)\n scenario._delete_container(container_name, fargs=\"f\")\n\n kw = {\"fargs\": \"f\"}\n self.clients(\"swift\").delete_container.assert_called_once_with(\n container_name,\n **kw)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.delete_container\")\n\n def test__list_objects(self):\n container_name = mock.MagicMock()\n headers_dict = mock.MagicMock()\n objects_list = mock.MagicMock()\n self.clients(\"swift\").get_container.return_value = (headers_dict,\n objects_list)\n scenario = utils.SwiftScenario(context=self.context)\n\n self.assertEqual((headers_dict, objects_list),\n scenario._list_objects(container_name, fargs=\"f\"))\n kw = {\"full_listing\": True, \"fargs\": \"f\"}\n self.clients(\"swift\").get_container.assert_called_once_with(\n container_name,\n **kw)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.list_objects\")\n\n def test__upload_object(self):\n container_name = mock.MagicMock()\n content = mock.MagicMock()\n etag = mock.MagicMock()\n self.clients(\"swift\").put_object.return_value = etag\n scenario = utils.SwiftScenario(self.context)\n scenario.generate_random_name = mock.MagicMock()\n\n self.clients(\"swift\").put_object.reset_mock()\n self.assertEqual((etag, scenario.generate_random_name.return_value),\n scenario._upload_object(container_name, content,\n fargs=\"f\"))\n kw = {\"fargs\": \"f\"}\n self.clients(\"swift\").put_object.assert_called_once_with(\n container_name, scenario.generate_random_name.return_value,\n content, **kw)\n self.assertEqual(1, scenario.generate_random_name.call_count)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.upload_object\")\n\n def test__download_object(self):\n container_name = mock.MagicMock()\n object_name = mock.MagicMock()\n headers_dict = mock.MagicMock()\n content = mock.MagicMock()\n self.clients(\"swift\").get_object.return_value = (headers_dict, content)\n scenario = utils.SwiftScenario(context=self.context)\n\n self.assertEqual((headers_dict, content),\n scenario._download_object(container_name, object_name,\n fargs=\"f\"))\n kw = {\"fargs\": \"f\"}\n self.clients(\"swift\").get_object.assert_called_once_with(\n container_name, object_name,\n **kw)\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.download_object\")\n\n def test__delete_object(self):\n container_name = mock.MagicMock()\n object_name = mock.MagicMock()\n scenario = utils.SwiftScenario(context=self.context)\n scenario._delete_object(container_name, object_name, fargs=\"f\")\n\n kw = {\"fargs\": \"f\"}\n self.clients(\"swift\").delete_object.assert_called_once_with(\n container_name, object_name,\n **kw)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"swift.delete_object\")\n" }, { "alpha_fraction": 0.6629674434661865, "alphanum_fraction": 0.6670687794685364, "avg_line_length": 37.37963104248047, "blob_id": "774ea329542ba0b215025db3a2bef091606d2c8e", "content_id": "67be7cebb1f13e5597554fcb2c4183ca049833a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4145, "license_type": "permissive", "max_line_length": 78, "num_lines": 108, "path": "/tests/unit/task/scenarios/barbican/test_secrets.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018 Red Hat Inc\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.barbican import secrets\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nclass BarbicanSecretsTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(BarbicanSecretsTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(BarbicanSecretsTestCase, self).setUp()\n m = \"rally_openstack.common.services.key_manager.barbican\"\n patch = mock.patch(\"%s.BarbicanService\" % m)\n self.addCleanup(patch.stop)\n self.mock_secrets = patch.start()\n\n def test_list_secrets(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsList(self.context)\n scenario.run()\n secrets_service.list_secrets.assert_called_once_with()\n\n def test_create_secret(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsCreate(self.context)\n scenario.run()\n secrets_service.create_secret.assert_called_once_with()\n\n def test_create_and_delete_secret(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsCreateAndDelete(self.context)\n scenario.run()\n\n secrets_service.create_secret.assert_called_once_with()\n self.assertEqual(1, secrets_service.delete_secret.call_count)\n\n def test_create_and_get_secret(self):\n secrets_service = self.mock_secrets.return_value\n fake_secret = fakes.FakeSecret(id=1, name=\"secretxxx\")\n secrets_service.create_secret.return_value = fake_secret\n fake_secret_info = fakes.FakeSecret(id=1, name=\"secret1xxx\")\n secrets_service.get_secret.return_value = fake_secret_info\n scenario = secrets.BarbicanSecretsCreateAndGet(self.context)\n scenario.run()\n\n secrets_service.create_secret.assert_called_once_with()\n\n def test_get_secret(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsGet(self.context)\n scenario.run()\n\n secrets_service.create_secret.assert_called_once_with()\n\n def test_get_secret_with_secret(self):\n secret = mock.Mock()\n secret.secret_ref = mock.Mock()\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsGet(self.context)\n scenario.run()\n\n self.assertEqual(1, secrets_service.get_secret.call_count)\n\n def test_create_and_list_secret(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsCreateAndList(self.context)\n scenario.run()\n secrets_service.create_secret.assert_called_once_with()\n secrets_service.list_secrets.assert_called_once_with()\n\n def test_create_and_delete_symmetric_secret(self):\n secrets_service = self.mock_secrets.return_value\n scenario = secrets.BarbicanSecretsCreateSymmetricAndDelete(\n self.context)\n scenario.run(\n payload=\"rally_data\", algorithm=\"aes\", bit_length=256,\n mode=\"cbc\")\n self.assertEqual(1, secrets_service.create_secret.call_count)\n" }, { "alpha_fraction": 0.5807533264160156, "alphanum_fraction": 0.5852240920066833, "avg_line_length": 31.953960418701172, "blob_id": "ba85737f7acf925f70096d32828b12def6aae40c", "content_id": "2a12ffb9f8a14eccc3ecb049bc39855946f9b370", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17894, "license_type": "permissive", "max_line_length": 79, "num_lines": 543, "path": "/tests/ci/rally_verify.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport argparse\nimport collections\nimport gzip\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\nimport uuid\n\nimport jinja2\n\nfrom rally import api\nfrom rally.env import env_mgr\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\n\nLOG = logging.getLogger(\"verify-job\")\nLOG.setLevel(logging.DEBUG)\n\n# NOTE(andreykurilin): this variable is used to generate output file names\n# with prefix ${CALL_COUNT}_ .\n_call_count = 0\n\n\nclass Status(object):\n PASS = \"success\"\n ERROR = \"error\"\n SKIPPED = \"skip\"\n FAILURE = \"fail\"\n\n\nclass Step(object):\n COMMAND = None\n DEPENDS_ON = None\n CALL_ARGS = {}\n\n BASE_DIR = \"rally-verify\"\n HTML_TEMPLATE = (\"<span class=\\\"%(status)s\\\">[%(status)s]</span>\\n\"\n \"<a href=\\\"%(output_file)s\\\">%(doc)s</a>\\n\"\n \"<code>$ %(cmd)s</code>\")\n\n def __init__(self, args, rapi):\n self.args = args\n self.rapi = rapi\n self.result = {\"status\": Status.PASS,\n \"doc\": self.__doc__,\n \"cmd\": \"None command found\"}\n\n @property\n def name(self):\n return \" \".join(re.findall(\"[A-Z][^A-Z]*\",\n self.__class__.__name__)).lower()\n\n def check(self, results):\n \"\"\"Check weather this step should be executed or skipped.\"\"\"\n if self.DEPENDS_ON is not None:\n if results[self.DEPENDS_ON].result[\"status\"] in (\n Status.PASS, Status.FAILURE):\n return True\n else:\n self.result[\"status\"] = Status.SKIPPED\n msg = (\"Step '%s' is skipped, since depends on step '%s' is \"\n \"skipped or finished with an error.\" %\n (self.name, results[self.DEPENDS_ON].name))\n stdout_file = self._generate_path(\n \"%s.txt\" % self.__class__.__name__)\n\n self.result[\"output_file\"] = self._write_file(\n stdout_file, msg, compress=False)\n return False\n return True\n\n def setUp(self):\n \"\"\"Obtain variables required for execution\"\"\"\n pass\n\n def run(self):\n \"\"\"Execute step. The default action - execute the command\"\"\"\n self.setUp()\n\n cmd = \"rally --rally-debug %s\" % (self.COMMAND % self.CALL_ARGS)\n self.result[\"cmd\"] = cmd\n self.result[\"status\"], self.result[\"output\"] = self.call_rally(cmd)\n\n stdout_file = self._generate_path(\"%s.txt\" % cmd)\n self.result[\"output_file\"] = self._write_file(\n stdout_file, self.result[\"output\"], compress=False)\n\n @classmethod\n def _generate_path(cls, root):\n global _call_count\n _call_count += 1\n\n root = root.replace(\"<\", \"\").replace(\">\", \"\").replace(\"/\", \"_\")\n parts = [\"%s\" % _call_count]\n for path in root.split(\" \"):\n if path.startswith(cls.BASE_DIR):\n path = path[len(cls.BASE_DIR) + 1:]\n parts.append(path)\n return os.path.join(cls.BASE_DIR, \"_\".join(parts))\n\n @classmethod\n def _write_file(cls, path, data, compress=False):\n \"\"\"Create a file and write some data to it.\"\"\"\n if compress:\n with gzip.open(path, \"w\") as f:\n if not isinstance(data, bytes):\n data = data.encode()\n f.write(data)\n else:\n with open(path, \"w\") as f:\n f.write(data)\n return path\n\n @staticmethod\n def call_rally(command):\n \"\"\"Execute a Rally verify command.\"\"\"\n try:\n LOG.info(\"Start `%s` command.\" % command)\n stdout = subprocess.check_output(command.split(),\n stderr=subprocess.STDOUT).decode()\n except subprocess.CalledProcessError as e:\n LOG.error(\"Command `%s` failed.\" % command)\n return Status.ERROR, e.output.decode()\n else:\n return Status.PASS, stdout\n\n def to_html(self):\n if self.result[\"status\"] == Status.SKIPPED:\n return \"\"\n else:\n return self.HTML_TEMPLATE % self.result\n\n\nclass SetUpStep(Step):\n \"\"\"Validate deployment, create required resources and directories.\"\"\"\n\n ENV_NAME = \"tempest\"\n\n def run(self):\n if not os.path.exists(\"%s/extra\" % self.BASE_DIR):\n os.makedirs(\"%s/extra\" % self.BASE_DIR)\n\n # ensure that environment exit and check it\n env = env_mgr.EnvManager.get(self.ENV_NAME)\n for p_name, status in env.check_health().items():\n if not status[\"available\"]:\n self.result[\"status\"] = Status.ERROR\n return\n\n try:\n subprocess.check_call(\n [\"rally\", \"env\", \"use\", \"--env\", self.ENV_NAME],\n stdout=sys.stdout)\n except subprocess.CalledProcessError:\n self.result[\"status\"] = Status.ERROR\n return\n\n openstack_platform = env.data[\"platforms\"][\"openstack\"]\n admin_creds = credential.OpenStackCredential(\n permission=consts.EndpointPermission.ADMIN,\n **openstack_platform[\"platform_data\"][\"admin\"])\n clients = admin_creds.clients()\n\n if self.args.ctx_create_resources:\n # If the 'ctx-create-resources' arg is provided, delete images and\n # flavors, and also create a shared network to make Tempest context\n # create needed resources.\n LOG.info(\"The 'ctx-create-resources' arg is provided. Deleting \"\n \"images and flavors, and also creating a shared network \"\n \"to make Tempest context create needed resources.\")\n\n LOG.info(\"Deleting images.\")\n for image in clients.glance().images.list():\n clients.glance().images.delete(image.id)\n\n LOG.info(\"Deleting flavors.\")\n for flavor in clients.nova().flavors.list():\n clients.nova().flavors.delete(flavor.id)\n\n LOG.info(\"Creating a shared network.\")\n net_body = {\n \"network\": {\n \"name\": \"shared-net-%s\" % str(uuid.uuid4()),\n \"tenant_id\": clients.keystone.auth_ref.project_id,\n \"shared\": True\n }\n }\n clients.neutron().create_network(net_body)\n else:\n # Otherwise, just in case create only flavors with the following\n # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make\n # Tempest context discover them.\n LOG.info(\"The 'ctx-create-resources' arg is not provided. \"\n \"Creating flavors to make Tempest context discover them.\")\n for flv_ram in [64, 128]:\n params = {\n \"name\": \"flavor-%s\" % str(uuid.uuid4()),\n \"ram\": flv_ram,\n \"vcpus\": 1,\n \"disk\": 0\n }\n LOG.info(\"Creating flavor '%s' with the following properties: \"\n \"RAM = %dMB, VCPUs = 1, disk = 0GB\" %\n (params[\"name\"], flv_ram))\n clients.nova().flavors.create(**params)\n\n def to_html(self):\n return \"\"\n\n\nclass ListPlugins(Step):\n \"\"\"List plugins for verifiers management.\"\"\"\n\n COMMAND = \"verify list-plugins\"\n DEPENDS_ON = SetUpStep\n\n\nclass CreateVerifier(Step):\n \"\"\"Create a Tempest verifier.\"\"\"\n\n COMMAND = (\"verify create-verifier --type %(type)s --name %(name)s \"\n \"--source %(source)s\")\n DEPENDS_ON = ListPlugins\n CALL_ARGS = {\"type\": \"tempest\",\n \"name\": \"my-verifier\",\n \"source\": \"https://opendev.org/openstack/tempest\"}\n\n\nclass ShowVerifier(Step):\n \"\"\"Show information about the created verifier.\"\"\"\n\n COMMAND = \"verify show-verifier\"\n DEPENDS_ON = CreateVerifier\n\n\nclass ListVerifiers(Step):\n \"\"\"List all installed verifiers.\"\"\"\n\n COMMAND = \"verify list-verifiers\"\n DEPENDS_ON = CreateVerifier\n\n\nclass UpdateVerifier(Step):\n \"\"\"Switch the verifier to the penultimate version.\"\"\"\n\n COMMAND = \"verify update-verifier --version %(version)s --update-venv\"\n DEPENDS_ON = CreateVerifier\n\n def setUp(self):\n \"\"\"Obtain penultimate verifier commit for downgrading to it\"\"\"\n verifier_id = self.rapi.verifier.list()[0][\"uuid\"]\n verifications_dir = os.path.join(\n os.path.expanduser(\"~\"),\n \".rally/verification/verifier-%s/repo\" % verifier_id)\n # Get the penultimate verifier commit ID\n p_commit_id = subprocess.check_output(\n [\"git\", \"log\", \"-n\", \"1\", \"--pretty=format:%H\"],\n cwd=verifications_dir).decode().strip()\n self.CALL_ARGS = {\"version\": p_commit_id}\n\n\nclass ConfigureVerifier(Step):\n \"\"\"Generate and show the verifier config file.\"\"\"\n\n COMMAND = \"verify configure-verifier --show\"\n DEPENDS_ON = CreateVerifier\n\n\nclass ExtendVerifier(Step):\n \"\"\"Extend verifier with keystone integration tests.\"\"\"\n\n COMMAND = \"verify add-verifier-ext --source %(source)s\"\n DEPENDS_ON = CreateVerifier\n CALL_ARGS = {\"source\": \"https://opendev.org/openstack/\"\n \"keystone-tempest-plugin\"}\n\n\nclass ListVerifierExtensions(Step):\n \"\"\"List all extensions of verifier.\"\"\"\n\n COMMAND = \"verify list-verifier-exts\"\n DEPENDS_ON = ExtendVerifier\n\n\nclass ListVerifierTests(Step):\n \"\"\"List all tests of specific verifier.\"\"\"\n\n COMMAND = \"verify list-verifier-tests\"\n DEPENDS_ON = CreateVerifier\n\n\nclass RunVerification(Step):\n \"\"\"Run a verification.\"\"\"\n\n DEPENDS_ON = ConfigureVerifier\n COMMAND = (\"verify start --pattern set=%(set)s --skip-list %(skip_tests)s \"\n \"--xfail-list %(xfail_tests)s --tag %(tag)s %(set)s-set \"\n \"--detailed\")\n SKIP_TESTS = {\n \"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.\"\n \"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]\":\n \"This test was skipped intentionally\",\n }\n XFAIL_TESTS = {\n \"tempest.scenario.test_dashboard_basic_ops\"\n \".TestDashboardBasicOps.test_basic_scenario\"\n \"[dashboard,id-4f8851b1-0e69-482b-b63b-84c6e76f6c80,smoke]\":\n \"Fails for unknown reason\",\n }\n\n def setUp(self):\n self.CALL_ARGS[\"tag\"] = \"tag-1 tag-2\"\n self.CALL_ARGS[\"set\"] = \"full\" if self.args.mode == \"full\" else \"smoke\"\n # Start a verification, show results and generate reports\n skip_tests = json.dumps(self.SKIP_TESTS)\n xfail_tests = json.dumps(self.XFAIL_TESTS)\n self.CALL_ARGS[\"skip_tests\"] = self._write_file(\n self._generate_path(\"skip-list.json\"), skip_tests)\n self.CALL_ARGS[\"xfail_tests\"] = self._write_file(\n self._generate_path(\"xfail-list.json\"), xfail_tests)\n\n def run(self):\n super(RunVerification, self).run()\n if \"Success: 0\" in self.result[\"output\"]:\n self.result[\"status\"] = Status.FAILURE\n\n\nclass ReRunVerification(RunVerification):\n \"\"\"Re-Run previous verification.\"\"\"\n\n COMMAND = \"verify rerun --tag one-more-attempt\"\n\n\nclass ShowVerification(Step):\n \"\"\"Show results of verification.\"\"\"\n\n COMMAND = \"verify show\"\n DEPENDS_ON = RunVerification\n\n\nclass ShowSecondVerification(ShowVerification):\n \"\"\"Show results of verification.\"\"\"\n\n DEPENDS_ON = ReRunVerification\n\n\nclass ShowDetailedVerification(Step):\n \"\"\"Show detailed results of verification.\"\"\"\n\n COMMAND = \"verify show --detailed\"\n DEPENDS_ON = RunVerification\n\n\nclass ShowDetailedSecondVerification(ShowDetailedVerification):\n \"\"\"Show detailed results of verification.\"\"\"\n\n DEPENDS_ON = ReRunVerification\n\n\nclass ReportVerificationMixin(Step):\n \"\"\"Mixin for obtaining reports of verifications.\"\"\"\n\n COMMAND = \"verify report --uuid %(uuids)s --type %(type)s --to %(out)s\"\n\n HTML_TEMPLATE = (\"<span class=\\\"%(status)s\\\">[%(status)s]</span>\\n\"\n \"<a href=\\\"%(out)s\\\">%(doc)s</a> \"\n \"[<a href=\\\"%(output_file)s\\\">Output from CLI</a>]\\n\"\n \"<code>$ %(cmd)s</code>\")\n\n def setUp(self):\n self.CALL_ARGS[\"out\"] = \"<path>\"\n self.CALL_ARGS[\"uuids\"] = \"<uuid-1> <uuid-2>\"\n cmd = self.COMMAND % self.CALL_ARGS\n report = \"%s.%s\" % (cmd.replace(\"/\", \"_\").replace(\" \", \"_\"),\n self.CALL_ARGS[\"type\"])\n print(report)\n self.CALL_ARGS[\"out\"] = self._generate_path(report)\n self.CALL_ARGS[\"uuids\"] = \" \".join(\n [v[\"uuid\"] for v in self.rapi.verification.list()])\n print(self.COMMAND % self.CALL_ARGS)\n self.result[\"out\"] = \"<None>\"\n\n\nclass HtmlVerificationReport(ReportVerificationMixin):\n \"\"\"Generate HTML report for verification(s).\"\"\"\n\n CALL_ARGS = {\"type\": \"html-static\"}\n DEPENDS_ON = RunVerification\n\n def setUp(self):\n super(HtmlVerificationReport, self).setUp()\n self.CALL_ARGS[\"out\"] = self.CALL_ARGS[\"out\"][:-7]\n\n\nclass JsonVerificationReport(ReportVerificationMixin):\n \"\"\"Generate JSON report for verification(s).\"\"\"\n\n CALL_ARGS = {\"type\": \"json\"}\n DEPENDS_ON = RunVerification\n\n\nclass JunitVerificationReport(ReportVerificationMixin):\n \"\"\"Generate JUNIT report for verification(s).\"\"\"\n\n CALL_ARGS = {\"type\": \"junit-xml\"}\n DEPENDS_ON = RunVerification\n\n\nclass ListVerifications(Step):\n \"\"\"List all verifications.\"\"\"\n\n COMMAND = \"verify list\"\n DEPENDS_ON = CreateVerifier\n\n\nclass DeleteVerifierExtension(Step):\n \"\"\"Delete keystone extension.\"\"\"\n\n COMMAND = \"verify delete-verifier-ext --name %(name)s\"\n CALL_ARGS = {\"name\": \"keystone_tests\"}\n DEPENDS_ON = ExtendVerifier\n\n\nclass DeleteVerifier(Step):\n \"\"\"Delete only Tempest verifier.\n\n all verifications will be delete when destroy deployment.\n\n \"\"\"\n COMMAND = \"verify delete-verifier --id %(id)s --force\"\n CALL_ARGS = {\"id\": CreateVerifier.CALL_ARGS[\"name\"]}\n DEPENDS_ON = CreateVerifier\n\n\nclass DestroyDeployment(Step):\n \"\"\"Delete the deployment, and verifications of this deployment.\"\"\"\n\n COMMAND = \"deployment destroy --deployment %(id)s\"\n CALL_ARGS = {\"id\": SetUpStep.ENV_NAME}\n DEPENDS_ON = SetUpStep\n\n\ndef run(args):\n\n steps = [SetUpStep,\n ListPlugins,\n CreateVerifier,\n ShowVerifier,\n ListVerifiers,\n UpdateVerifier,\n ConfigureVerifier,\n ExtendVerifier,\n ListVerifierExtensions,\n ListVerifierTests,\n RunVerification,\n ShowVerification,\n ShowDetailedVerification,\n HtmlVerificationReport,\n JsonVerificationReport,\n JunitVerificationReport,\n ListVerifications,\n DeleteVerifierExtension,\n DestroyDeployment,\n DeleteVerifier]\n\n if args.compare:\n # need to launch one more verification\n place_to_insert = steps.index(ShowDetailedVerification) + 1\n # insert steps in reverse order to be able to use the same index\n steps.insert(place_to_insert, ShowDetailedSecondVerification)\n steps.insert(place_to_insert, ShowSecondVerification)\n steps.insert(place_to_insert, ReRunVerification)\n\n results = collections.OrderedDict()\n rapi = api.API()\n for step_cls in steps:\n step = step_cls(args, rapi=rapi)\n if step.check(results):\n step.run()\n results[step_cls] = step\n\n return results.values()\n\n\ndef create_report(results):\n template_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"pages\")\n loader = jinja2.FileSystemLoader(template_dir)\n env = jinja2.Environment(loader=loader)\n template = env.get_template(\"verify-index.html\")\n with open(os.path.join(Step.BASE_DIR, \"extra/index.html\"), \"w\") as f:\n f.write(template.render(steps=results))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Launch rally-verify job.\")\n parser.add_argument(\"--mode\", type=str, default=\"light\",\n help=\"Mode of job. The 'full' mode corresponds to the \"\n \"full set of verifier tests. The 'light' mode \"\n \"corresponds to the smoke set of verifier tests.\",\n choices=[\"light\", \"full\"])\n parser.add_argument(\"--compare\", action=\"store_true\",\n help=\"Start the second verification to generate a \"\n \"trends report for two verifications.\")\n # TODO(ylobankov): Remove hard-coded Tempest related things and make it\n # configurable.\n parser.add_argument(\"--ctx-create-resources\", action=\"store_true\",\n help=\"Make Tempest context create needed resources \"\n \"for the tests.\")\n\n args = parser.parse_args()\n\n steps = run(args)\n results = [step.to_html() for step in steps]\n\n create_report(results)\n\n if len([None for step in steps\n if step.result[\"status\"] == Status.PASS]) == len(steps):\n return 0\n return 1\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.5659277439117432, "alphanum_fraction": 0.5695868730545044, "avg_line_length": 39.87478256225586, "blob_id": "19d72b8ca06d8660fcb1860a95005cf75582357c", "content_id": "f10fcf80d4a46e3a803ea38f65fdeb523f42e227", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23503, "license_type": "permissive", "max_line_length": 79, "num_lines": 575, "path": "/tests/unit/task/scenarios/manila/test_shares.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.manila import shares\nfrom tests.unit import test\n\n\[email protected]\nclass ManilaSharesTestCase(test.ScenarioTestCase):\n\n @ddt.data(\n {\"share_proto\": \"nfs\", \"size\": 3},\n {\"share_proto\": \"cifs\", \"size\": 4,\n \"share_network\": \"foo\", \"share_type\": \"bar\"},\n )\n def test_create_and_delete_share(self, params):\n fake_share = mock.MagicMock()\n scenario = shares.CreateAndDeleteShare(self.context)\n scenario._create_share = mock.MagicMock(return_value=fake_share)\n scenario.sleep_between = mock.MagicMock()\n scenario._delete_share = mock.MagicMock()\n\n scenario.run(min_sleep=3, max_sleep=4, **params)\n\n scenario._create_share.assert_called_once_with(**params)\n scenario.sleep_between.assert_called_once_with(3, 4)\n scenario._delete_share.assert_called_once_with(fake_share)\n\n def create_env(self, scenario):\n fake_share = mock.MagicMock()\n scenario = shares.CreateShareAndAccessFromVM(self.context)\n self.ip = {\"id\": \"foo_id\", \"ip\": \"foo_ip\", \"is_floating\": True}\n scenario._boot_server_with_fip = mock.Mock(\n return_value=(\"foo_server\", self.ip))\n scenario._delete_server_with_fip = mock.Mock()\n scenario._run_command = mock.MagicMock(\n return_value=(0, \"{\\\"foo\\\": 42}\", \"foo_err\"))\n scenario.add_output = mock.Mock()\n self.context.update({\"user\": {\"keypair\": {\"name\": \"keypair_name\"},\n \"credential\": mock.MagicMock()}})\n scenario._create_share = mock.MagicMock(return_value=fake_share)\n scenario._delete_share = mock.MagicMock()\n scenario._export_location = mock.MagicMock(return_value=\"fake\")\n scenario._allow_access_share = mock.MagicMock()\n\n return scenario, fake_share\n\n @ddt.data(\n {\"image\": \"some_image\",\n \"flavor\": \"m1.small\", \"username\": \"chuck norris\"}\n )\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_create_share_and_access_from_vm(\n self,\n params,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, fake_share = self.create_env(\n shares.CreateShareAndAccessFromVM(self.context))\n scenario.run(**params)\n\n scenario._create_share.assert_called_once_with(\n share_proto=\"nfs\", size=1)\n scenario._delete_share.assert_called_once_with(fake_share)\n scenario._allow_access_share.assert_called_once_with(\n fake_share, \"ip\", \"foo_ip\", \"rw\")\n scenario._export_location.assert_called_once_with(fake_share)\n scenario._boot_server_with_fip.assert_called_once_with(\n \"some_image\", \"m1.small\", use_floating_ip=True,\n floating_network=None, key_name=\"keypair_name\",\n userdata=\"#cloud-config\\npackages:\\n - nfs-common\")\n mock_rally_task_utils_wait_for_status.assert_called_once_with(\n \"foo_server\", ready_statuses=[\"ACTIVE\"], update_resource=mock.ANY)\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", {\"id\": \"foo_id\", \"ip\": \"foo_ip\",\n \"is_floating\": True},\n force_delete=False)\n scenario.add_output.assert_called_with(\n complete={\"chart_plugin\": \"TextArea\",\n \"data\": [\n \"foo_err\"],\n \"title\": \"Script StdErr\"})\n\n @ddt.data(\n {\"image\": \"some_image\",\n \"flavor\": \"m1.small\", \"username\": \"chuck norris\"}\n )\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_create_share_and_access_from_vm_command_timeout(\n self,\n params,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, fake_share = self.create_env(\n shares.CreateShareAndAccessFromVM(self.context))\n\n scenario._run_command.side_effect = exceptions.SSHTimeout()\n self.assertRaises(exceptions.SSHTimeout,\n scenario.run,\n \"foo_flavor\", \"foo_image\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n scenario._delete_share.assert_called_once_with(fake_share)\n\n @ddt.data(\n {\"image\": \"some_image\",\n \"flavor\": \"m1.small\", \"username\": \"chuck norris\"}\n )\n @mock.patch(\"rally.task.utils.get_from_manager\")\n @mock.patch(\"rally.task.utils.wait_for_status\")\n def test_create_share_and_access_from_vm_wait_timeout(\n self,\n params,\n mock_rally_task_utils_wait_for_status,\n mock_rally_task_utils_get_from_manager):\n scenario, fake_share = self.create_env(\n shares.CreateShareAndAccessFromVM(self.context))\n\n mock_rally_task_utils_wait_for_status.side_effect = \\\n exceptions.TimeoutException(\n resource_type=\"foo_resource\",\n resource_name=\"foo_name\",\n resource_id=\"foo_id\",\n desired_status=\"foo_desired_status\",\n resource_status=\"foo_resource_status\",\n timeout=2)\n self.assertRaises(exceptions.TimeoutException,\n scenario.run,\n \"foo_flavor\", \"foo_image\", \"foo_interpreter\",\n \"foo_script\", \"foo_username\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\", self.ip, force_delete=False)\n self.assertFalse(scenario.add_output.called)\n scenario._delete_share.assert_called_once_with(fake_share)\n\n @ddt.data(\n {\"output\": (0, \"\", \"\"),\n \"expected\": [{\"complete\": {\"chart_plugin\": \"TextArea\",\n \"data\": [\"\"],\n \"title\": \"Script StdOut\"}}]},\n {\"output\": (1, \"x y z\", \"error message\"),\n \"raises\": exceptions.ScriptError},\n {\"output\": (0, \"[1, 2, 3, 4]\", \"\"), \"expected\": []}\n )\n @ddt.unpack\n def test_create_share_and_access_from_vm_add_output(self, output,\n expected=None,\n raises=None):\n scenario, fake_share = self.create_env(\n shares.CreateShareAndAccessFromVM(self.context))\n\n scenario._run_command.return_value = output\n kwargs = {\"flavor\": \"foo_flavor\",\n \"image\": \"foo_image\",\n \"username\": \"foo_username\",\n \"password\": \"foo_password\",\n \"use_floating_ip\": \"use_fip\",\n \"floating_network\": \"ext_network\",\n \"force_delete\": \"foo_force\"}\n if raises:\n self.assertRaises(raises, scenario.run, **kwargs)\n self.assertFalse(scenario.add_output.called)\n else:\n scenario.run(**kwargs)\n calls = [mock.call(**kw) for kw in expected]\n scenario.add_output.assert_has_calls(calls, any_order=True)\n\n scenario._create_share.assert_called_once_with(\n share_proto=\"nfs\", size=1)\n scenario._delete_share.assert_called_once_with(fake_share)\n scenario._allow_access_share.assert_called_once_with(\n fake_share, \"ip\", \"foo_ip\", \"rw\")\n scenario._export_location.assert_called_once_with(fake_share)\n scenario._boot_server_with_fip.assert_called_once_with(\n \"foo_image\", \"foo_flavor\", use_floating_ip=\"use_fip\",\n floating_network=\"ext_network\", key_name=\"keypair_name\",\n userdata=\"#cloud-config\\npackages:\\n - nfs-common\")\n scenario._delete_server_with_fip.assert_called_once_with(\n \"foo_server\",\n {\"id\": \"foo_id\", \"ip\": \"foo_ip\", \"is_floating\": True},\n force_delete=\"foo_force\")\n\n @ddt.data(\n {},\n {\"detailed\": True},\n {\"detailed\": False},\n {\"search_opts\": None},\n {\"search_opts\": {}},\n {\"search_opts\": {\"foo\": \"bar\"}},\n {\"detailed\": True, \"search_opts\": None},\n {\"detailed\": False, \"search_opts\": None},\n {\"detailed\": True, \"search_opts\": {\"foo\": \"bar\"}},\n {\"detailed\": False, \"search_opts\": {\"quuz\": \"foo\"}},\n )\n @ddt.unpack\n def test_list_shares(self, detailed=True, search_opts=None):\n scenario = shares.ListShares(self.context)\n scenario._list_shares = mock.MagicMock()\n\n scenario.run(detailed=detailed, search_opts=search_opts)\n\n scenario._list_shares.assert_called_once_with(\n detailed=detailed, search_opts=search_opts)\n\n @ddt.data(\n {\"params\": {\"share_proto\": \"nfs\"}, \"new_size\": 4},\n {\n \"params\": {\n \"share_proto\": \"cifs\",\n \"size\": 4,\n \"snapshot_id\": \"snapshot_foo\",\n \"description\": \"foo_description\",\n \"metadata\": {\"foo_metadata\": \"foo\"},\n \"share_network\": \"foo_network\",\n \"share_type\": \"foo_type\",\n \"is_public\": True,\n \"availability_zone\": \"foo_avz\",\n \"share_group_id\": \"foo_group_id\"\n },\n \"new_size\": 8\n }\n )\n @ddt.unpack\n def test_create_and_extend_shares(self, params, new_size):\n size = params.get(\"size\", 1)\n share_group_id = params.get(\"share_group_id\", None)\n snapshot_id = params.get(\"snapshot_id\", None)\n description = params.get(\"description\", None)\n metadata = params.get(\"metadata\", None)\n share_network = params.get(\"share_network\", None)\n share_type = params.get(\"share_type\", None)\n is_public = params.get(\"is_public\", False)\n availability_zone = params.get(\"availability_zone\", None)\n\n fake_share = mock.MagicMock()\n scenario = shares.CreateAndExtendShare(self.context)\n scenario._create_share = mock.MagicMock(return_value=fake_share)\n scenario._extend_share = mock.MagicMock()\n\n scenario.run(new_size=new_size, **params)\n\n scenario._create_share.assert_called_with(\n share_proto=params[\"share_proto\"],\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n scenario._extend_share.assert_called_with(fake_share, new_size)\n\n @ddt.data(\n {\"params\": {\"share_proto\": \"nfs\"}, \"new_size\": 4},\n {\n \"params\": {\n \"share_proto\": \"cifs\",\n \"size\": 4,\n \"snapshot_id\": \"snapshot_foo\",\n \"description\": \"foo_description\",\n \"metadata\": {\"foo_metadata\": \"foo\"},\n \"share_network\": \"foo_network\",\n \"share_type\": \"foo_type\",\n \"is_public\": True,\n \"availability_zone\": \"foo_avz\",\n \"share_group_id\": \"foo_group_id\"\n },\n \"new_size\": 8\n }\n )\n @ddt.unpack\n def test_create_and_shrink_shares(self, params, new_size):\n size = params.get(\"size\", 2)\n share_group_id = params.get(\"share_group_id\", None)\n snapshot_id = params.get(\"snapshot_id\", None)\n description = params.get(\"description\", None)\n metadata = params.get(\"metadata\", None)\n share_network = params.get(\"share_network\", None)\n share_type = params.get(\"share_type\", None)\n is_public = params.get(\"is_public\", False)\n availability_zone = params.get(\"availability_zone\", None)\n\n fake_share = mock.MagicMock()\n scenario = shares.CreateAndShrinkShare(self.context)\n scenario._create_share = mock.MagicMock(return_value=fake_share)\n scenario._shrink_share = mock.MagicMock()\n\n scenario.run(new_size=new_size, **params)\n\n scenario._create_share.assert_called_with(\n share_proto=params[\"share_proto\"],\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n scenario._shrink_share.assert_called_with(fake_share, new_size)\n\n @ddt.data(\n {\n \"share_proto\": \"nfs\",\n \"size\": 3,\n \"access\": \"127.0.0.1\",\n \"access_type\": \"ip\"\n },\n {\n \"access\": \"1.2.3.4\",\n \"access_type\": \"ip\",\n \"access_level\": \"ro\",\n \"share_proto\": \"cifs\",\n \"size\": 4,\n \"snapshot_id\": \"snapshot_foo\",\n \"description\": \"foo_description\",\n \"metadata\": {\"foo_metadata\": \"foo\"},\n \"share_network\": \"foo_network\",\n \"share_type\": \"foo_type\",\n \"is_public\": True,\n \"availability_zone\": \"foo_avz\",\n \"share_group_id\": \"foo_group_id\"\n }\n )\n def test_create_share_and_allow_and_deny_access(self, params):\n access = params[\"access\"]\n access_type = params[\"access_type\"]\n access_level = params.get(\"access_level\", \"rw\")\n size = params.get(\"size\", 1)\n share_group_id = params.get(\"share_group_id\", None)\n snapshot_id = params.get(\"snapshot_id\", None)\n description = params.get(\"description\", None)\n metadata = params.get(\"metadata\", None)\n share_network = params.get(\"share_network\", None)\n share_type = params.get(\"share_type\", None)\n is_public = params.get(\"is_public\", False)\n availability_zone = params.get(\"availability_zone\", None)\n fake_share = mock.MagicMock()\n fake_access = {\"id\": \"foo\"}\n\n scenario = shares.CreateShareThenAllowAndDenyAccess(self.context)\n scenario._create_share = mock.MagicMock(return_value=fake_share)\n scenario._allow_access_share = mock.MagicMock(return_value=fake_access)\n scenario._deny_access_share = mock.MagicMock()\n\n scenario.run(**params)\n\n scenario._create_share.assert_called_with(\n share_proto=params[\"share_proto\"],\n size=size,\n snapshot_id=snapshot_id,\n description=description,\n metadata=metadata,\n share_network=share_network,\n share_type=share_type,\n is_public=is_public,\n availability_zone=availability_zone,\n share_group_id=share_group_id\n )\n scenario._allow_access_share.assert_called_with(\n fake_share, access_type, access, access_level)\n scenario._deny_access_share.assert_called_with(\n fake_share, fake_access[\"id\"])\n\n @ddt.data(\n {},\n {\"description\": \"foo_description\"},\n {\"neutron_net_id\": \"foo_neutron_net_id\"},\n {\"neutron_subnet_id\": \"foo_neutron_subnet_id\"},\n {\"nova_net_id\": \"foo_nova_net_id\"},\n {\"description\": \"foo_description\",\n \"neutron_net_id\": \"foo_neutron_net_id\",\n \"neutron_subnet_id\": \"foo_neutron_subnet_id\",\n \"nova_net_id\": \"foo_nova_net_id\"},\n )\n def test_create_share_network_and_delete(self, params):\n fake_sn = mock.MagicMock()\n scenario = shares.CreateShareNetworkAndDelete(self.context)\n scenario._create_share_network = mock.MagicMock(return_value=fake_sn)\n scenario._delete_share_network = mock.MagicMock()\n expected_params = {\n \"description\": None,\n \"neutron_net_id\": None,\n \"neutron_subnet_id\": None,\n \"nova_net_id\": None,\n }\n expected_params.update(params)\n\n scenario.run(**params)\n\n scenario._create_share_network.assert_called_once_with(\n **expected_params)\n scenario._delete_share_network.assert_called_once_with(fake_sn)\n\n @ddt.data(\n {},\n {\"description\": \"foo_description\"},\n {\"neutron_net_id\": \"foo_neutron_net_id\"},\n {\"neutron_subnet_id\": \"foo_neutron_subnet_id\"},\n {\"nova_net_id\": \"foo_nova_net_id\"},\n {\"description\": \"foo_description\",\n \"neutron_net_id\": \"foo_neutron_net_id\",\n \"neutron_subnet_id\": \"foo_neutron_subnet_id\",\n \"nova_net_id\": \"foo_nova_net_id\"},\n )\n def test_create_share_network_and_list(self, params):\n scenario = shares.CreateShareNetworkAndList(self.context)\n fake_network = mock.Mock()\n scenario._create_share_network = mock.Mock(\n return_value=fake_network)\n scenario._list_share_networks = mock.Mock(\n return_value=[fake_network,\n mock.Mock(),\n mock.Mock()])\n expected_create_params = {\n \"description\": params.get(\"description\"),\n \"neutron_net_id\": params.get(\"neutron_net_id\"),\n \"neutron_subnet_id\": params.get(\"neutron_subnet_id\"),\n \"nova_net_id\": params.get(\"nova_net_id\"),\n }\n expected_list_params = {\n \"detailed\": params.get(\"detailed\", True),\n \"search_opts\": params.get(\"search_opts\"),\n }\n expected_create_params.update(params)\n\n scenario.run(**params)\n\n scenario._create_share_network.assert_called_once_with(\n **expected_create_params)\n scenario._list_share_networks.assert_called_once_with(\n **expected_list_params)\n\n @ddt.data(\n {},\n {\"search_opts\": None},\n {\"search_opts\": {}},\n {\"search_opts\": {\"foo\": \"bar\"}},\n )\n def test_list_share_servers(self, search_opts):\n scenario = shares.ListShareServers(self.context)\n scenario.context = {\"admin\": {\"credential\": \"fake_credential\"}}\n scenario._list_share_servers = mock.MagicMock()\n\n scenario.run(search_opts=search_opts)\n\n scenario._list_share_servers.assert_called_once_with(\n search_opts=search_opts)\n\n @ddt.data(\n {\"security_service_type\": \"fake_type\"},\n {\"security_service_type\": \"fake_type\",\n \"dns_ip\": \"fake_dns_ip\",\n \"server\": \"fake_server\",\n \"domain\": \"fake_domain\",\n \"user\": \"fake_user\",\n \"password\": \"fake_password\",\n \"description\": \"fake_description\"},\n )\n def test_create_security_service_and_delete(self, params):\n fake_ss = mock.MagicMock()\n scenario = shares.CreateSecurityServiceAndDelete(self.context)\n scenario._create_security_service = mock.MagicMock(\n return_value=fake_ss)\n scenario._delete_security_service = mock.MagicMock()\n expected_params = {\n \"security_service_type\": params.get(\"security_service_type\"),\n \"dns_ip\": params.get(\"dns_ip\"),\n \"server\": params.get(\"server\"),\n \"domain\": params.get(\"domain\"),\n \"user\": params.get(\"user\"),\n \"password\": params.get(\"password\"),\n \"description\": params.get(\"description\"),\n }\n\n scenario.run(**params)\n\n scenario._create_security_service.assert_called_once_with(\n **expected_params)\n scenario._delete_security_service.assert_called_once_with(fake_ss)\n\n @ddt.data(\"ldap\", \"kerberos\", \"active_directory\")\n def test_attach_security_service_to_share_network(self,\n security_service_type):\n scenario = shares.AttachSecurityServiceToShareNetwork(self.context)\n scenario._create_share_network = mock.MagicMock()\n scenario._create_security_service = mock.MagicMock()\n scenario._add_security_service_to_share_network = mock.MagicMock()\n\n scenario.run(security_service_type=security_service_type)\n\n scenario._create_share_network.assert_called_once_with()\n scenario._create_security_service.assert_called_once_with(\n security_service_type=security_service_type)\n scenario._add_security_service_to_share_network.assert_has_calls([\n mock.call(scenario._create_share_network.return_value,\n scenario._create_security_service.return_value)])\n\n @ddt.data(\n {\"share_proto\": \"nfs\", \"size\": 3, \"detailed\": True},\n {\"share_proto\": \"cifs\", \"size\": 4, \"detailed\": False,\n \"share_network\": \"foo\", \"share_type\": \"bar\"},\n )\n def test_create_and_list_share(self, params):\n scenario = shares.CreateAndListShare()\n scenario._create_share = mock.MagicMock()\n scenario.sleep_between = mock.MagicMock()\n scenario._list_shares = mock.MagicMock()\n\n scenario.run(min_sleep=3, max_sleep=4, **params)\n\n detailed = params.pop(\"detailed\")\n scenario._create_share.assert_called_once_with(**params)\n scenario.sleep_between.assert_called_once_with(3, 4)\n scenario._list_shares.assert_called_once_with(detailed=detailed)\n\n @ddt.data(\n ({}, 0, 0),\n ({}, 1, 1),\n ({}, 2, 2),\n ({}, 3, 0),\n ({\"sets\": 5, \"set_size\": 8, \"delete_size\": 10}, 1, 1),\n )\n @ddt.unpack\n def test_set_and_delete_metadata(self, params, iteration, share_number):\n scenario = shares.SetAndDeleteMetadata()\n share_list = [{\"id\": \"fake_share_%s_id\" % d} for d in range(3)]\n scenario.context = {\"tenant\": {\"shares\": share_list}}\n scenario.context[\"iteration\"] = iteration\n scenario._set_metadata = mock.MagicMock()\n scenario._delete_metadata = mock.MagicMock()\n expected_set_params = {\n \"share\": share_list[share_number],\n \"sets\": params.get(\"sets\", 10),\n \"set_size\": params.get(\"set_size\", 3),\n \"key_min_length\": params.get(\"key_min_length\", 1),\n \"key_max_length\": params.get(\"key_max_length\", 256),\n \"value_min_length\": params.get(\"value_min_length\", 1),\n \"value_max_length\": params.get(\"value_max_length\", 1024),\n }\n\n scenario.run(**params)\n\n scenario._set_metadata.assert_called_once_with(**expected_set_params)\n scenario._delete_metadata.assert_called_once_with(\n share=share_list[share_number],\n keys=scenario._set_metadata.return_value,\n delete_size=params.get(\"delete_size\", 3),\n )\n" }, { "alpha_fraction": 0.5815725326538086, "alphanum_fraction": 0.5829328298568726, "avg_line_length": 38.10283660888672, "blob_id": "714bc0bd21fe9f6c6fc256c42955efd92d083f71", "content_id": "ba8991d5c94b18046f19f86a822fd5b2424261bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11027, "license_type": "permissive", "max_line_length": 78, "num_lines": 282, "path": "/rally_openstack/task/cleanup/manager.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\n\nfrom rally.common import broker\nfrom rally.common import logging\nfrom rally.common.plugin import discover\nfrom rally.common.plugin import plugin\nfrom rally.common import utils as rutils\nfrom rally_openstack.task.cleanup import base\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass SeekAndDestroy(object):\n\n def __init__(self, manager_cls, admin, users,\n resource_classes=None, task_id=None):\n \"\"\"Resource deletion class.\n\n This class contains method exterminate() that finds and deletes\n all resources created by Rally.\n\n :param manager_cls: subclass of base.ResourceManager\n :param admin: admin credential like in context[\"admin\"]\n :param users: users credentials like in context[\"users\"]\n :param resource_classes: Resource classes to match resource names\n against\n :param task_id: The UUID of task to match resource names against\n \"\"\"\n self.manager_cls = manager_cls\n self.admin = admin\n self.users = users or []\n self.resource_classes = resource_classes or [\n rutils.RandomNameGeneratorMixin]\n self.task_id = task_id\n\n def _get_cached_client(self, user):\n \"\"\"Simplifies initialization and caching OpenStack clients.\"\"\"\n if not user:\n return None\n # NOTE(astudenov): Credential now supports caching by default\n return user[\"credential\"].clients()\n\n def _delete_single_resource(self, resource):\n \"\"\"Safe resource deletion with retries and timeouts.\n\n Send request to delete resource, in case of failures repeat it few\n times. After that pull status of resource until it's deleted.\n\n Writes in LOG warning with UUID of resource that wasn't deleted\n\n :param resource: instance of resource manager initiated with resource\n that should be deleted.\n \"\"\"\n\n msg_kw = {\n \"uuid\": resource.id(),\n \"name\": resource.name() or \"\",\n \"service\": resource._service,\n \"resource\": resource._resource\n }\n\n LOG.debug(\n \"Deleting %(service)s.%(resource)s object %(name)s (%(uuid)s)\"\n % msg_kw)\n\n try:\n rutils.retry(resource._max_attempts, resource.delete)\n except Exception as e:\n msg = (\"Resource deletion failed, max retries exceeded for \"\n \"%(service)s.%(resource)s: %(uuid)s.\") % msg_kw\n\n if logging.is_debug():\n LOG.exception(msg)\n else:\n LOG.warning(\"%(msg)s Reason: %(e)s\" % {\"msg\": msg, \"e\": e})\n else:\n started = time.time()\n failures_count = 0\n while time.time() - started < resource._timeout:\n try:\n if resource.is_deleted():\n return\n except Exception:\n LOG.exception(\n \"Seems like %s.%s.is_deleted(self) method is broken \"\n \"It shouldn't raise any exceptions.\"\n % (resource.__module__, type(resource).__name__))\n\n # NOTE(boris-42): Avoid LOG spamming in case of bad\n # is_deleted() method\n failures_count += 1\n if failures_count > resource._max_attempts:\n break\n\n finally:\n rutils.interruptable_sleep(resource._interval)\n\n LOG.warning(\"Resource deletion failed, timeout occurred for \"\n \"%(service)s.%(resource)s: %(uuid)s.\" % msg_kw)\n\n def _publisher(self, queue):\n \"\"\"Publisher for deletion jobs.\n\n This method iterates over all users, lists all resources\n (using manager_cls) and puts jobs for deletion.\n\n Every deletion job contains tuple with two values: user and resource\n uuid that should be deleted.\n\n In case of tenant based resource, uuids are fetched only from one user\n per tenant.\n \"\"\"\n def _publish(admin, user, manager):\n try:\n for raw_resource in rutils.retry(3, manager.list):\n queue.append((admin, user, raw_resource))\n except Exception:\n LOG.exception(\n \"Seems like %s.%s.list(self) method is broken. \"\n \"It shouldn't raise any exceptions.\"\n % (manager.__module__, type(manager).__name__))\n\n if self.admin and (not self.users\n or self.manager_cls._perform_for_admin_only):\n manager = self.manager_cls(\n admin=self._get_cached_client(self.admin))\n _publish(self.admin, None, manager)\n\n else:\n visited_tenants = set()\n admin_client = self._get_cached_client(self.admin)\n for user in self.users:\n if (self.manager_cls._tenant_resource\n and user[\"tenant_id\"] in visited_tenants):\n continue\n\n visited_tenants.add(user[\"tenant_id\"])\n manager = self.manager_cls(\n admin=admin_client,\n user=self._get_cached_client(user),\n tenant_uuid=user[\"tenant_id\"])\n _publish(self.admin, user, manager)\n\n def _consumer(self, cache, args):\n \"\"\"Method that consumes single deletion job.\"\"\"\n admin, user, raw_resource = args\n\n manager = self.manager_cls(\n resource=raw_resource,\n admin=self._get_cached_client(admin),\n user=self._get_cached_client(user),\n tenant_uuid=user and user[\"tenant_id\"])\n\n if (isinstance(manager.name(), base.NoName)\n or rutils.name_matches_object(\n manager.name(), *self.resource_classes,\n task_id=self.task_id, exact=False)):\n self._delete_single_resource(manager)\n\n def exterminate(self):\n \"\"\"Delete all resources for passed users, admin and resource_mgr.\"\"\"\n\n broker.run(self._publisher, self._consumer,\n consumers_count=self.manager_cls._threads)\n\n\ndef list_resource_names(admin_required=None):\n \"\"\"List all resource managers names.\n\n Returns all service names and all combination of service.resource names.\n\n :param admin_required: None -> returns all ResourceManagers\n True -> returns only admin ResourceManagers\n False -> returns only non admin ResourceManagers\n \"\"\"\n res_mgrs = discover.itersubclasses(base.ResourceManager)\n if admin_required is not None:\n res_mgrs = filter(lambda cls: cls._admin_required == admin_required,\n res_mgrs)\n\n names = set()\n for cls in res_mgrs:\n names.add(cls._service)\n names.add(\"%s.%s\" % (cls._service, cls._resource))\n\n return names\n\n\ndef find_resource_managers(names=None, admin_required=None):\n \"\"\"Returns resource managers.\n\n :param names: List of names in format <service> or <service>.<resource>\n that is used for filtering resource manager classes\n :param admin_required: None -> returns all ResourceManagers\n True -> returns only admin ResourceManagers\n False -> returns only non admin ResourceManagers\n \"\"\"\n names = set(names or [])\n\n resource_managers = []\n for manager in discover.itersubclasses(base.ResourceManager):\n if admin_required is not None:\n if admin_required != manager._admin_required:\n continue\n\n if (manager._service in names\n or \"%s.%s\" % (manager._service, manager._resource) in names):\n resource_managers.append(manager)\n\n resource_managers.sort(key=lambda x: x._order)\n\n found_names = set()\n for mgr in resource_managers:\n found_names.add(mgr._service)\n found_names.add(\"%s.%s\" % (mgr._service, mgr._resource))\n\n missing = names - found_names\n if missing:\n LOG.warning(\"Missing resource managers: %s\" % \", \".join(missing))\n\n return resource_managers\n\n\ndef cleanup(names=None, admin_required=None, admin=None, users=None,\n superclass=plugin.Plugin, task_id=None):\n \"\"\"Generic cleaner.\n\n This method goes through all plugins. Filter those and left only plugins\n with _service from services or _resource from resources.\n\n Then goes through all passed users and using cleaners cleans all related\n resources.\n\n :param names: Use only resource managers that have names in this list.\n There are in as _service or\n (%s.%s % (_service, _resource)) from\n :param admin_required: If None -> return all plugins\n If True -> return only admin plugins\n If False -> return only non admin plugins\n :param admin: rally.deployment.credential.Credential that corresponds to\n OpenStack admin.\n :param users: List of OpenStack users that was used during testing.\n Every user has next structure:\n {\n \"id\": <uuid1>,\n \"tenant_id\": <uuid2>,\n \"credential\": <rally.deployment.credential.Credential>\n }\n :param superclass: The plugin superclass to perform cleanup\n for. E.g., this could be\n ``rally.task.scenario.Scenario`` to cleanup all\n Scenario resources.\n :param task_id: The UUID of task\n \"\"\"\n resource_classes = [cls for cls in discover.itersubclasses(superclass)\n if issubclass(cls, rutils.RandomNameGeneratorMixin)]\n if not resource_classes and issubclass(superclass,\n rutils.RandomNameGeneratorMixin):\n resource_classes.append(superclass)\n for manager in find_resource_managers(names, admin_required):\n LOG.debug(\"Cleaning up %(service)s %(resource)s objects\"\n % {\"service\": manager._service,\n \"resource\": manager._resource})\n SeekAndDestroy(manager, admin, users,\n resource_classes=resource_classes,\n task_id=task_id).exterminate()\n" }, { "alpha_fraction": 0.6330887079238892, "alphanum_fraction": 0.6377584934234619, "avg_line_length": 41.82857131958008, "blob_id": "39d16d6bd5a37d122340d1475f4407a6535e17b2", "content_id": "96192c647ac6fed42c36f175301d1854cc683025", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2998, "license_type": "permissive", "max_line_length": 78, "num_lines": 70, "path": "/tests/unit/task/scenarios/ironic/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.ironic import utils\nfrom tests.unit import test\n\nIRONIC_UTILS = \"rally_openstack.task.scenarios.ironic.utils\"\n\n\nclass IronicScenarioTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.utils.wait_for_status\" % IRONIC_UTILS)\n def test__create_node(self, mock_wait_for_status):\n self.admin_clients(\"ironic\").node.create.return_value = \"fake_node\"\n scenario = utils.IronicScenario(self.context)\n scenario.generate_random_name = mock.Mock()\n\n scenario._create_node(driver=\"fake\", properties=\"fake_prop\",\n fake_param=\"foo\")\n\n self.admin_clients(\"ironic\").node.create.assert_called_once_with(\n driver=\"fake\", properties=\"fake_prop\", fake_param=\"foo\",\n name=scenario.generate_random_name.return_value)\n self.assertTrue(mock_wait_for_status.called)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"ironic.create_node\")\n\n @mock.patch(\"%s.utils.wait_for_status\" % IRONIC_UTILS)\n def test__delete_node(self, mock_wait_for_status):\n mock_node_delete = mock.Mock()\n self.admin_clients(\"ironic\").node.delete = mock_node_delete\n scenario = utils.IronicScenario(self.context)\n scenario._delete_node(mock.Mock(uuid=\"fake_id\"))\n self.assertTrue(mock_wait_for_status.called)\n\n self.admin_clients(\"ironic\").node.delete.assert_called_once_with(\n \"fake_id\")\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"ironic.delete_node\")\n\n def test__list_nodes(self):\n self.admin_clients(\"ironic\").node.list.return_value = [\"fake\"]\n scenario = utils.IronicScenario(self.context)\n fake_params = {\n \"sort_dir\": \"foo1\",\n \"associated\": \"foo2\",\n \"detail\": True,\n \"maintenance\": \"foo5\"\n }\n return_nodes_list = scenario._list_nodes(**fake_params)\n self.assertEqual([\"fake\"], return_nodes_list)\n self.admin_clients(\"ironic\").node.list.assert_called_once_with(\n sort_dir=\"foo1\", associated=\"foo2\", detail=True,\n maintenance=\"foo5\")\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"ironic.list_nodes\")\n" }, { "alpha_fraction": 0.574908435344696, "alphanum_fraction": 0.5816850066184998, "avg_line_length": 39.74626922607422, "blob_id": "629d060a4f4cd61ac330be1b06f5d53f4e3f6795", "content_id": "4505400647a433dc0d6e24b3f00419b3896513e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5460, "license_type": "permissive", "max_line_length": 77, "num_lines": 134, "path": "/rally_openstack/task/contexts/magnum/ca_certs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom rally.common import utils as rutils\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.magnum import utils as magnum_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"ca_certs\", platform=\"openstack\", order=490)\nclass CaCertGenerator(context.OpenStackContext):\n \"\"\"Creates ca certs.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"directory\": {\n \"type\": \"string\",\n }\n },\n \"additionalProperties\": False\n }\n\n def _generate_csr_and_key(self):\n \"\"\"Return a dict with a new csr and key.\"\"\"\n from cryptography.hazmat import backends\n from cryptography.hazmat.primitives.asymmetric import rsa\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives import serialization\n from cryptography import x509\n from cryptography.x509.oid import NameOID\n\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=backends.default_backend())\n\n csr = x509.CertificateSigningRequestBuilder().subject_name(\n x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u\"admin\"),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME,\n u\"system:masters\")\n ])).sign(key, hashes.SHA256(), backends.default_backend())\n\n result = {\n \"csr\": csr.public_bytes(encoding=serialization.Encoding.PEM),\n \"key\": key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()),\n }\n\n return result\n\n def setup(self):\n for user, tenant_id in self._iterate_per_tenants():\n\n magnum_scenario = magnum_utils.MagnumScenario({\n \"user\": user,\n \"task\": self.context[\"task\"],\n \"config\": {\"api_versions\": self.context[\"config\"].get(\n \"api_versions\", [])}\n })\n\n # get the cluster and cluster_template\n cluster_uuid = str(self.context[\"tenants\"][tenant_id][\"cluster\"])\n cluster = magnum_scenario._get_cluster(cluster_uuid)\n cluster_template = magnum_scenario._get_cluster_template(\n cluster.cluster_template_id)\n\n if not cluster_template.tls_disabled:\n tls = self._generate_csr_and_key()\n dir = \"\"\n if self.config.get(\"directory\") is not None:\n dir = self.config.get(\"directory\")\n self.context[\"ca_certs_directory\"] = dir\n fname = os.path.join(dir, cluster_uuid + \".key\")\n with open(fname, \"w\") as key_file:\n key_file.write(tls[\"key\"])\n # get CA certificate for this cluster\n ca_cert = magnum_scenario._get_ca_certificate(cluster_uuid)\n fname = os.path.join(dir, cluster_uuid + \"_ca.crt\")\n with open(fname, \"w\") as ca_cert_file:\n ca_cert_file.write(ca_cert.pem)\n # send csr to Magnum to have it signed\n csr_req = {\"cluster_uuid\": cluster_uuid,\n \"csr\": tls[\"csr\"]}\n cert = magnum_scenario._create_ca_certificate(csr_req)\n fname = os.path.join(dir, cluster_uuid + \".crt\")\n with open(fname, \"w\") as cert_file:\n cert_file.write(cert.pem)\n\n def cleanup(self):\n for user, tenant_id in rutils.iterate_per_tenants(\n self.context[\"users\"]):\n\n magnum_scenario = magnum_utils.MagnumScenario({\n \"user\": user,\n \"task\": self.context[\"task\"],\n \"config\": {\"api_versions\": self.context[\"config\"].get(\n \"api_versions\", [])}\n })\n\n # get the cluster and cluster_template\n cluster_uuid = str(self.context[\"tenants\"][tenant_id][\"cluster\"])\n cluster = magnum_scenario._get_cluster(cluster_uuid)\n cluster_template = magnum_scenario._get_cluster_template(\n cluster.cluster_template_id)\n\n if not cluster_template.tls_disabled:\n dir = self.context[\"ca_certs_directory\"]\n fname = os.path.join(dir, cluster_uuid + \".key\")\n os.remove(fname)\n fname = os.path.join(dir, cluster_uuid + \"_ca.crt\")\n os.remove(fname)\n fname = os.path.join(dir, cluster_uuid + \".crt\")\n os.remove(fname)\n" }, { "alpha_fraction": 0.7137296795845032, "alphanum_fraction": 0.7165604829788208, "avg_line_length": 42.476924896240234, "blob_id": "bf8696ff5da2190a352a4c4408ccaa7c93c8dc90", "content_id": "37b22555df0d5b868a9eaaa37b41f33ca2e51e82", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2826, "license_type": "permissive", "max_line_length": 78, "num_lines": 65, "path": "/tests/unit/task/scenarios/nova/test_hypervisors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013 Cisco Systems Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.nova import hypervisors\nfrom tests.unit import test\n\n\nclass NovaHypervisorsTestCase(test.ScenarioTestCase):\n def test_list_hypervisors(self):\n scenario = hypervisors.ListHypervisors(self.context)\n scenario._list_hypervisors = mock.Mock()\n scenario.run(detailed=False)\n scenario._list_hypervisors.assert_called_once_with(False)\n\n def test_list_and_get_hypervisors(self):\n scenario = hypervisors.ListAndGetHypervisors(self.context)\n scenario._list_hypervisors = mock.MagicMock(detailed=False)\n scenario._get_hypervisor = mock.MagicMock()\n scenario.run(detailed=False)\n\n scenario._list_hypervisors.assert_called_once_with(False)\n for hypervisor in scenario._list_hypervisors.return_value:\n scenario._get_hypervisor.assert_called_once_with(hypervisor)\n\n def test_statistics_hypervisors(self):\n scenario = hypervisors.StatisticsHypervisors(self.context)\n scenario._statistics_hypervisors = mock.Mock()\n scenario.run()\n scenario._statistics_hypervisors.assert_called_once_with()\n\n def test_list_and_get_uptime_hypervisors(self):\n scenario = hypervisors.ListAndGetUptimeHypervisors(self.context)\n scenario._list_hypervisors = mock.MagicMock(detailed=False)\n scenario._uptime_hypervisor = mock.MagicMock()\n scenario.run(detailed=False)\n\n scenario._list_hypervisors.assert_called_once_with(False)\n for hypervisor in scenario._list_hypervisors.return_value:\n scenario._uptime_hypervisor.assert_called_once_with(hypervisor)\n\n def test_list_and_search_hypervisors(self):\n fake_hypervisors = [mock.Mock(hypervisor_hostname=\"fake_hostname\")]\n scenario = hypervisors.ListAndSearchHypervisors(self.context)\n scenario._list_hypervisors = mock.MagicMock(\n return_value=fake_hypervisors)\n scenario._search_hypervisors = mock.MagicMock()\n scenario.run(detailed=False)\n\n scenario._list_hypervisors.assert_called_once_with(False)\n scenario._search_hypervisors.assert_called_once_with(\n \"fake_hostname\")\n" }, { "alpha_fraction": 0.6752136945724487, "alphanum_fraction": 0.6771129965782166, "avg_line_length": 30.909090042114258, "blob_id": "2b8e65d86e613df736b8f08fe99ec02fe0ec73e4", "content_id": "3d078aaa741d1d11d5c61beeb3fe5ebaf07e934e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1053, "license_type": "permissive", "max_line_length": 78, "num_lines": 33, "path": "/.zuul.d/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "=====================\nZuul V3 configuration\n=====================\n\nZuul is a pipeline-oriented project gating system. It facilitates running\ntests and automated tasks in response to Code Review events.\n\nSee `official doc\n<https://docs.openstack.org/infra/system-config/zuulv3.html>`_ for more\ndetails.\n\nWhat do we have in this dir?\n---------------------------------\n\n.. note:: Do not document all files and jobs here. It will (for sure) become\n outdated at some point.\n\n* **zuul.yaml** - the main configuration file. It contains a list of jobs\n which should be launched at CI for rally-openstack project\n\n* **base.yaml** - the second by importance file. It contains basic parent\n jobs.\n\n* All other files are named as like a job for which they include definition.\n\nWhere are the actual job playbooks?\n-----------------------------------\n\nUnfortunately, Zuul defines *zuul.d* (as like *.zuul.d*) as a directory for\nproject configuration and job definitions.\n\nAnsible roles, tasks cannot be here, so we placed them at *tests/ci/playbooks*\ndirectory.\n" }, { "alpha_fraction": 0.5184653997421265, "alphanum_fraction": 0.521154522895813, "avg_line_length": 37.73611068725586, "blob_id": "8a78c4a316c08ecc0ee9ce825af391ea11921386", "content_id": "d7c739f077929e885fca53981dd082a5387039bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5578, "license_type": "permissive", "max_line_length": 78, "num_lines": 144, "path": "/tests/unit/task/contexts/sahara/test_sahara_job_binaries.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.sahara import sahara_job_binaries\nfrom rally_openstack.task.scenarios.sahara import utils as sahara_utils\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.sahara\"\n\n\nclass SaharaJobBinariesTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(SaharaJobBinariesTestCase, self).setUp()\n self.tenants_num = 2\n self.users_per_tenant = 2\n self.users = self.tenants_num * self.users_per_tenant\n self.task = mock.MagicMock()\n\n self.tenants = {}\n self.users_key = []\n\n for i in range(self.tenants_num):\n self.tenants[str(i)] = {\"id\": str(i), \"name\": str(i),\n \"sahara\": {\"image\": \"42\"}}\n for j in range(self.users_per_tenant):\n self.users_key.append({\"id\": \"%s_%s\" % (str(i), str(j)),\n \"tenant_id\": str(i),\n \"credential\": \"credential\"})\n\n self.user_key = [{\"id\": i, \"tenant_id\": j, \"credential\": \"credential\"}\n for j in range(self.tenants_num)\n for i in range(self.users_per_tenant)]\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n },\n \"sahara_job_binaries\": {\n \"libs\": [\n {\n \"name\": \"test.jar\",\n \"download_url\": \"http://example.com/test.jar\"\n }\n ],\n \"mains\": [\n {\n \"name\": \"test.jar\",\n \"download_url\": \"http://example.com/test.jar\"\n }\n ]\n },\n },\n \"admin\": {\"credential\": mock.MagicMock()},\n \"task\": mock.MagicMock(),\n \"users\": self.users_key,\n \"tenants\": self.tenants\n })\n\n @mock.patch(\"%s.sahara_job_binaries.resource_manager.cleanup\" % CTX)\n @mock.patch((\"%s.sahara_job_binaries.SaharaJobBinaries.\"\n \"download_and_save_lib\") % CTX)\n @mock.patch(\"%s.sahara_job_binaries.osclients\" % CTX)\n def test_setup_and_cleanup(\n self,\n mock_osclients,\n mock_sahara_job_binaries_download_and_save_lib,\n mock_cleanup):\n\n mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()\n\n sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context)\n\n download_calls = []\n\n for i in range(self.tenants_num):\n download_calls.append(mock.call(\n sahara=mock_sahara,\n lib_type=\"mains\",\n name=\"test.jar\",\n download_url=\"http://example.com/test.jar\",\n tenant_id=str(i)))\n download_calls.append(mock.call(\n sahara=mock_sahara,\n lib_type=\"libs\",\n name=\"test.jar\",\n download_url=\"http://example.com/test.jar\",\n tenant_id=str(i)))\n\n sahara_ctx.setup()\n\n (mock_sahara_job_binaries_download_and_save_lib.\n assert_has_calls(download_calls))\n\n sahara_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"sahara.job_binary_internals\", \"sahara.job_binaries\"],\n users=self.context[\"users\"],\n superclass=sahara_utils.SaharaScenario,\n task_id=self.context[\"task\"][\"uuid\"])\n\n @mock.patch(\"%s.sahara_job_binaries.requests\" % CTX)\n @mock.patch(\"%s.sahara_job_binaries.osclients\" % CTX)\n def test_download_and_save_lib(self, mock_osclients, mock_requests):\n\n mock_requests.get.content.return_value = \"some_binary_content\"\n mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()\n mock_sahara.job_binary_internals.create.return_value = (\n mock.MagicMock(id=42))\n\n sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context)\n\n sahara_ctx.context[\"tenants\"][\"0\"][\"sahara\"] = {\"mains\": []}\n sahara_ctx.context[\"tenants\"][\"0\"][\"sahara\"][\"libs\"] = []\n\n sahara_ctx.download_and_save_lib(sahara=mock_sahara,\n lib_type=\"mains\",\n name=\"test_binary\",\n download_url=\"http://somewhere\",\n tenant_id=\"0\")\n\n sahara_ctx.download_and_save_lib(sahara=mock_sahara,\n lib_type=\"libs\",\n name=\"test_binary_2\",\n download_url=\"http://somewhere\",\n tenant_id=\"0\")\n\n mock_requests.get.assert_called_once_with(\"http://somewhere\")\n" }, { "alpha_fraction": 0.5356897711753845, "alphanum_fraction": 0.5398077964782715, "avg_line_length": 35.42499923706055, "blob_id": "af0f08f52a49dd1243b163d0ce118ae45bd0be98", "content_id": "aae983f319e83e197f443f076aed6f24269609dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2914, "license_type": "permissive", "max_line_length": 78, "num_lines": 80, "path": "/tests/functional/test_cli_task.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nfrom tests.functional import utils\n\n\nclass TaskTestCase(unittest.TestCase):\n\n def test_specify_version_by_deployment(self):\n rally = utils.Rally()\n deployment = rally(\"deployment config\", getjson=True)\n deployment[\"openstack\"][\"api_info\"] = {\n \"fakedummy\": {\n \"version\": \"2\",\n \"service_type\": \"dummyv2\"\n }\n }\n deployment = utils.JsonTempFile(deployment)\n rally(\"deployment create --name t_create_with_api_info \"\n \"--filename %s\" % deployment.filename)\n self.assertIn(\"t_create_with_api_info\", rally(\"deployment list\"))\n\n config = {\n \"FakeDummy.openstack_api\": [\n {\n \"runner\": {\n \"type\": \"constant\",\n \"times\": 1,\n \"concurrency\": 1\n }\n }\n ]\n }\n config = utils.TaskConfig(config)\n plugins = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s task start --task %s\" % (\n plugins, config.filename))\n\n def test_specify_version_by_deployment_with_existing_users(self):\n rally = utils.Rally()\n deployment = rally(\"deployment config\", getjson=True)\n deployment[\"openstack\"][\"users\"] = [deployment[\"openstack\"][\"admin\"]]\n deployment[\"openstack\"][\"api_info\"] = {\n \"fakedummy\": {\n \"version\": \"2\",\n \"service_type\": \"dummyv2\"\n }\n }\n deployment = utils.JsonTempFile(deployment)\n rally(\"deployment create --name t_create_with_api_info \"\n \"--filename %s\" % deployment.filename)\n self.assertIn(\"t_create_with_api_info\", rally(\"deployment list\"))\n config = {\n \"FakeDummy.openstack_api\": [\n {\n \"runner\": {\n \"type\": \"constant\",\n \"times\": 1,\n \"concurrency\": 1\n }\n }\n ]\n }\n config = utils.TaskConfig(config)\n plugins = \"tests/functional/extra/fake_dir/fake_plugin.py\"\n rally(\"--plugin-paths %s task start --task %s\" % (\n plugins, config.filename))\n" }, { "alpha_fraction": 0.6528158783912659, "alphanum_fraction": 0.654081404209137, "avg_line_length": 36.0390625, "blob_id": "b53f96496aa0cd2f1e737753a700815001fcb528", "content_id": "12fb05ab7bf459e08b597cec259a78359aeeca41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4741, "license_type": "permissive", "max_line_length": 78, "num_lines": 128, "path": "/tests/unit/common/services/image/test_glance_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\nimport uuid\n\nfrom glanceclient import exc as glance_exc\n\nfrom rally import exceptions\n\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.image import glance_common\nfrom rally_openstack.common.services.image import image\nfrom tests.unit import test\n\n\nclass FullGlance(service.Service, glance_common.GlanceMixin):\n \"\"\"Implementation of GlanceMixin with Service base class.\"\"\"\n pass\n\n\nclass GlanceMixinTestCase(test.TestCase):\n def setUp(self):\n super(GlanceMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.glance = self.clients.glance.return_value\n self.name_generator = mock.MagicMock()\n self.version = \"some\"\n self.service = FullGlance(\n clients=self.clients, name_generator=self.name_generator)\n self.service.version = self.version\n\n def test__get_client(self):\n self.assertEqual(self.glance,\n self.service._get_client())\n\n def test_get_image(self):\n image = \"image_id\"\n self.assertEqual(self.glance.images.get.return_value,\n self.service.get_image(image))\n self.glance.images.get.assert_called_once_with(image)\n\n def test_get_image_exception(self):\n image_id = \"image_id\"\n self.glance.images.get.side_effect = glance_exc.HTTPNotFound\n\n self.assertRaises(exceptions.GetResourceNotFound,\n self.service.get_image, image_id)\n\n def test_delete_image(self):\n image = \"image_id\"\n self.service.delete_image(image)\n self.glance.images.delete.assert_called_once_with(image)\n\n def test_download_image(self):\n image_id = \"image_id\"\n self.service.download_image(image_id)\n self.glance.images.data.assert_called_once_with(image_id,\n do_checksum=True)\n\n\nclass FullUnifiedGlance(glance_common.UnifiedGlanceMixin,\n service.Service):\n \"\"\"Implementation of UnifiedGlanceMixin with Service base class.\"\"\"\n pass\n\n\nclass UnifiedGlanceMixinTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedGlanceMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.impl = mock.MagicMock()\n self.version = \"some\"\n self.service = FullUnifiedGlance(\n clients=self.clients, name_generator=self.name_generator)\n self.service._impl = self.impl\n self.service.version = self.version\n\n def test__unify_image(self):\n class Image(object):\n def __init__(self, visibility=None, is_public=None, status=None):\n self.id = uuid.uuid4()\n self.name = str(uuid.uuid4())\n self.visibility = visibility\n self.is_public = is_public\n self.status = status\n\n visibility = \"private\"\n image_obj = Image(visibility=visibility)\n unified_image = self.service._unify_image(image_obj)\n self.assertIsInstance(unified_image, image.UnifiedImage)\n self.assertEqual(image_obj.id, unified_image.id)\n self.assertEqual(image_obj.visibility, unified_image.visibility)\n\n image_obj = Image(is_public=\"public\")\n del image_obj.visibility\n unified_image = self.service._unify_image(image_obj)\n self.assertEqual(image_obj.id, unified_image.id)\n self.assertEqual(image_obj.is_public, unified_image.visibility)\n\n def test_get_image(self):\n image_id = \"image_id\"\n self.service.get_image(image=image_id)\n self.service._impl.get_image.assert_called_once_with(image=image_id)\n\n def test_delete_image(self):\n image_id = \"image_id\"\n self.service.delete_image(image_id)\n self.service._impl.delete_image.assert_called_once_with(\n image_id=image_id)\n\n def test_download_image(self):\n image_id = \"image_id\"\n self.service.download_image(image_id)\n self.service._impl.download_image.assert_called_once_with(\n image_id, do_checksum=True)\n" }, { "alpha_fraction": 0.7151810526847839, "alphanum_fraction": 0.7207520604133606, "avg_line_length": 30.217391967773438, "blob_id": "8fb47359cd08d85dfc830ca58ac5dabc94dea9d4", "content_id": "fa35bc2694313563b3d79ba4dd437ec75c3c16a3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1436, "license_type": "permissive", "max_line_length": 78, "num_lines": 46, "path": "/rally_openstack/verification/tempest/consts.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import utils\n\n\nclass _TempestApiTestSets(utils.ImmutableMixin, utils.EnumMixin):\n BAREMETAL = \"baremetal\"\n CLUSTERING = \"clustering\"\n COMPUTE = \"compute\"\n DATA_PROCESSING = \"data_processing\"\n DATABASE = \"database\"\n IDENTITY = \"identity\"\n IMAGE = \"image\"\n MESSAGING = \"messaging\"\n NETWORK = \"network\"\n OBJECT_STORAGE = \"object_storage\"\n ORCHESTRATION = \"orchestration\"\n TELEMETRY = \"telemetry\"\n VOLUME = \"volume\"\n\n\nclass _TempestScenarioTestSets(utils.ImmutableMixin, utils.EnumMixin):\n SCENARIO = \"scenario\"\n\n\nclass _TempestTestSets(utils.ImmutableMixin, utils.EnumMixin):\n FULL = \"full\"\n SMOKE = \"smoke\"\n\n\nTempestApiTestSets = _TempestApiTestSets()\nTempestScenarioTestSets = _TempestScenarioTestSets()\nTempestTestSets = _TempestTestSets()\n" }, { "alpha_fraction": 0.5797807574272156, "alphanum_fraction": 0.5879009366035461, "avg_line_length": 40.39495849609375, "blob_id": "df9e28353d9017b3bac6aa5b76b590f4b37dd566", "content_id": "9c24378ed3e6d4635728c98c624ca2cf547958c5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4926, "license_type": "permissive", "max_line_length": 75, "num_lines": 119, "path": "/tests/unit/task/scenarios/designate/test_basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Author: Endre Karlson <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.designate import basic\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.designate.basic\"\n\n\nclass DesignateBasicTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.CreateAndListZones._list_zones\" % BASE)\n @mock.patch(\"%s.CreateAndListZones._create_zone\" % BASE)\n def test_create_and_list_zones(self,\n mock__create_zone,\n mock__list_zones):\n mock__create_zone.return_value = \"Area_51\"\n mock__list_zones.return_value = [\"Area_51\",\n \"Siachen\",\n \"Bagram\"]\n # Positive case:\n basic.CreateAndListZones(self.context).run()\n mock__create_zone.assert_called_once_with()\n mock__list_zones.assert_called_once_with()\n\n # Negative case: zone isn't created\n mock__create_zone.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n basic.CreateAndListZones(self.context).run)\n mock__create_zone.assert_called_with()\n\n # Negative case: created zone not in the list of available zones\n mock__create_zone.return_value = \"HAARP\"\n self.assertRaises(exceptions.RallyAssertionError,\n basic.CreateAndListZones(self.context).run)\n mock__create_zone.assert_called_with()\n mock__list_zones.assert_called_with()\n\n @mock.patch(\"%s.CreateAndDeleteZone._delete_zone\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteZone._create_zone\" % BASE,\n return_value={\"id\": \"123\"})\n def test_create_and_delete_zone(self,\n mock__create_zone,\n mock__delete_zone):\n basic.CreateAndDeleteZone(self.context).run()\n\n mock__create_zone.assert_called_once_with()\n mock__delete_zone.assert_called_once_with(\"123\")\n\n @mock.patch(\"%s.ListZones._list_zones\" % BASE)\n def test_list_zones(self, mock_list_zones__list_zones):\n basic.ListZones(self.context).run()\n mock_list_zones__list_zones.assert_called_once_with()\n\n @mock.patch(\"%s.ListRecordsets._list_recordsets\" % BASE)\n def test_list_recordsets(self, mock__list_recordsets):\n basic.ListRecordsets(self.context).run(\"123\")\n mock__list_recordsets.assert_called_once_with(\"123\")\n\n @mock.patch(\"%s.CreateAndDeleteRecordsets._delete_recordset\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteRecordsets._create_recordset\" % BASE,\n return_value={\"id\": \"321\"})\n def test_create_and_delete_recordsets(self,\n mock__create_recordset,\n mock__delete_recordset):\n zone = {\"id\": \"1234\"}\n self.context.update({\n \"tenant\": {\n \"zones\": [zone]\n }\n })\n\n recordsets_per_zone = 5\n\n basic.CreateAndDeleteRecordsets(self.context).run(\n recordsets_per_zone=recordsets_per_zone)\n self.assertEqual(mock__create_recordset.mock_calls,\n [mock.call(zone)]\n * recordsets_per_zone)\n self.assertEqual(mock__delete_recordset.mock_calls,\n [mock.call(zone[\"id\"],\n \"321\")]\n * recordsets_per_zone)\n\n @mock.patch(\"%s.CreateAndListRecordsets._list_recordsets\" % BASE)\n @mock.patch(\"%s.CreateAndListRecordsets._create_recordset\" % BASE)\n def test_create_and_list_recordsets(self,\n mock__create_recordset,\n mock__list_recordsets):\n zone = {\"id\": \"1234\"}\n self.context.update({\n \"tenant\": {\n \"zones\": [zone]\n }\n })\n recordsets_per_zone = 5\n\n basic.CreateAndListRecordsets(self.context).run(\n recordsets_per_zone=recordsets_per_zone)\n self.assertEqual(mock__create_recordset.mock_calls,\n [mock.call(zone)]\n * recordsets_per_zone)\n mock__list_recordsets.assert_called_once_with(zone[\"id\"])\n" }, { "alpha_fraction": 0.7159420251846313, "alphanum_fraction": 0.7223188281059265, "avg_line_length": 37.33333206176758, "blob_id": "2df79b66104953a6cd6a7ffb7dcf348d10fd93c2", "content_id": "f773fb4e899a7b6c34ad0cf1befa43d2a79f1ade", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1725, "license_type": "permissive", "max_line_length": 79, "num_lines": 45, "path": "/rally_openstack/task/contexts/network/networking_agents.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2019 Ericsson Software Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task import context\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"networking_agents\", platform=\"openstack\", order=349)\nclass NetworkingAgents(context.OpenStackContext):\n \"\"\"This context supports querying Neutron agents in Rally.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"additionalProperties\": False,\n }\n\n def setup(self):\n nc = osclients.Clients(self.context[\"admin\"][\"credential\"]).neutron()\n agents = nc.list_agents()[\"agents\"]\n # NOTE(bence romsics): If you ever add input parameters to this context\n # beware that here we use the same key in self.context as is used for\n # parameter passing, so we'll overwrite it.\n self.context[\"networking_agents\"] = agents\n\n def cleanup(self):\n \"\"\"Neutron agents were not created by Rally, so nothing to do.\"\"\"\n" }, { "alpha_fraction": 0.6957268714904785, "alphanum_fraction": 0.6974446773529053, "avg_line_length": 36.55644989013672, "blob_id": "983a45b9ee42ffdfd20966a1db6e5b391bafa08b", "content_id": "6769dc2898f34098a4417729c8d8dbf0240f063c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4657, "license_type": "permissive", "max_line_length": 79, "num_lines": 124, "path": "/rally_openstack/task/scenarios/nova/hypervisors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Cisco Systems Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.nova import utils\n\n\n\"\"\"Scenarios for Nova hypervisors.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaHypervisors.list_hypervisors\",\n platform=\"openstack\")\nclass ListHypervisors(utils.NovaScenario):\n\n def run(self, detailed=True):\n \"\"\"List hypervisors.\n\n Measure the \"nova hypervisor-list\" command performance.\n\n :param detailed: True if the hypervisor listing should contain\n detailed information about all of them\n \"\"\"\n self._list_hypervisors(detailed)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaHypervisors.list_and_get_hypervisors\",\n platform=\"openstack\")\nclass ListAndGetHypervisors(utils.NovaScenario):\n\n def run(self, detailed=True):\n \"\"\"List and Get hypervisors.\n\n The scenario first lists all hypervisors, then get detailed information\n of the listed hypervisors in turn.\n\n Measure the \"nova hypervisor-show\" command performance.\n\n :param detailed: True if the hypervisor listing should contain\n detailed information about all of them\n \"\"\"\n hypervisors = self._list_hypervisors(detailed)\n\n for hypervisor in hypervisors:\n self._get_hypervisor(hypervisor)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaHypervisors.statistics_hypervisors\",\n platform=\"openstack\")\nclass StatisticsHypervisors(utils.NovaScenario):\n\n def run(self):\n \"\"\"Get hypervisor statistics over all compute nodes.\n\n Measure the \"nova hypervisor-stats\" command performance.\n \"\"\"\n self._statistics_hypervisors()\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaHypervisors.list_and_get_uptime_hypervisors\",\n platform=\"openstack\")\nclass ListAndGetUptimeHypervisors(utils.NovaScenario):\n\n def run(self, detailed=True):\n \"\"\"List hypervisors,then display the uptime of it.\n\n The scenario first list all hypervisors,then display\n the uptime of the listed hypervisors in turn.\n\n Measure the \"nova hypervisor-uptime\" command performance.\n\n :param detailed: True if the hypervisor listing should contain\n detailed information about all of them\n \"\"\"\n hypervisors = self._list_hypervisors(detailed)\n\n for hypervisor in hypervisors:\n if hypervisor.state == \"up\":\n self._uptime_hypervisor(hypervisor)\n\n\[email protected](\"required_services\", services=[consts.Service.NOVA])\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"NovaHypervisors.list_and_search_hypervisors\",\n platform=\"openstack\")\nclass ListAndSearchHypervisors(utils.NovaScenario):\n\n def run(self, detailed=True):\n \"\"\"List all servers belonging to specific hypervisor.\n\n The scenario first list all hypervisors,then find its hostname,\n then list all servers belonging to the hypervisor\n\n Measure the \"nova hypervisor-servers <hostname>\" command performance.\n\n :param detailed: True if the hypervisor listing should contain\n detailed information about all of them\n \"\"\"\n hypervisors = self._list_hypervisors(detailed)\n\n for hypervisor in hypervisors:\n self._search_hypervisors(hypervisor.hypervisor_hostname)\n" }, { "alpha_fraction": 0.5413323640823364, "alphanum_fraction": 0.5553274750709534, "avg_line_length": 46.84821319580078, "blob_id": "541f72aec56113ab55083f90d848027e10114561", "content_id": "b8979f4d8acb982f776d89b1e84086cf075b32e9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5359, "license_type": "permissive", "max_line_length": 79, "num_lines": 112, "path": "/rally_openstack/common/cfg/heat.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\n\nOPTS = {\"openstack\": [\n cfg.FloatOpt(\"heat_stack_create_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to sleep after creating a resource before \"\n \"polling for it status.\"),\n cfg.FloatOpt(\"heat_stack_create_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for heat stack to be created.\"),\n cfg.FloatOpt(\"heat_stack_create_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack creation.\"),\n cfg.FloatOpt(\"heat_stack_delete_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for heat stack to be deleted.\"),\n cfg.FloatOpt(\"heat_stack_delete_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack deletion.\"),\n cfg.FloatOpt(\"heat_stack_check_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack to be checked.\"),\n cfg.FloatOpt(\"heat_stack_check_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack checking.\"),\n cfg.FloatOpt(\"heat_stack_update_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to sleep after updating a resource before \"\n \"polling for it status.\"),\n cfg.FloatOpt(\"heat_stack_update_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack to be updated.\"),\n cfg.FloatOpt(\"heat_stack_update_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack update.\"),\n cfg.FloatOpt(\"heat_stack_suspend_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack to be suspended.\"),\n cfg.FloatOpt(\"heat_stack_suspend_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack suspend.\"),\n cfg.FloatOpt(\"heat_stack_resume_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack to be resumed.\"),\n cfg.FloatOpt(\"heat_stack_resume_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack resume.\"),\n cfg.FloatOpt(\"heat_stack_snapshot_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack snapshot to \"\n \"be created.\"),\n cfg.FloatOpt(\"heat_stack_snapshot_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack snapshot to be created.\"),\n cfg.FloatOpt(\"heat_stack_restore_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time(in sec) to wait for stack to be restored from \"\n \"snapshot.\"),\n cfg.FloatOpt(\"heat_stack_restore_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval(in sec) between checks when waiting for \"\n \"stack to be restored.\"),\n cfg.FloatOpt(\"heat_stack_scale_timeout\",\n default=3600.0,\n deprecated_group=\"benchmark\",\n help=\"Time (in sec) to wait for stack to scale up or down.\"),\n cfg.FloatOpt(\"heat_stack_scale_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time interval (in sec) between checks when waiting for \"\n \"a stack to scale up or down.\")\n]}\n" }, { "alpha_fraction": 0.6683893203735352, "alphanum_fraction": 0.6706861853599548, "avg_line_length": 39.5, "blob_id": "485078f4a91514fdd68f8866468a98b452699909", "content_id": "cccc2ef771b2a841a36d8bd2f7ce3336c96e8864", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3483, "license_type": "permissive", "max_line_length": 78, "num_lines": 86, "path": "/tests/unit/task/scenarios/murano/test_packages.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.murano import packages\nfrom tests.unit import test\n\nMURANO_SCENARIO = (\"rally_openstack.task.scenarios.murano.\"\n \"packages.MuranoPackages\")\n\n\nclass MuranoPackagesTestCase(test.TestCase):\n\n def setUp(self):\n super(MuranoPackagesTestCase, self).setUp()\n self.mock_remove = mock.patch(\"os.remove\")\n self.mock_remove.start()\n\n def tearDown(self):\n super(MuranoPackagesTestCase, self).tearDown()\n self.mock_remove.stop()\n\n def mock_modules(self, scenario):\n scenario._import_package = mock.Mock()\n scenario._zip_package = mock.Mock()\n scenario._list_packages = mock.Mock()\n scenario._delete_package = mock.Mock()\n scenario._update_package = mock.Mock()\n scenario._filter_applications = mock.Mock()\n\n def test_make_zip_import_and_list_packages(self):\n scenario = packages.ImportAndListPackages()\n self.mock_modules(scenario)\n scenario.run(\"foo_package.zip\")\n scenario._import_package.assert_called_once_with(\n scenario._zip_package.return_value)\n scenario._zip_package.assert_called_once_with(\"foo_package.zip\")\n scenario._list_packages.assert_called_once_with(\n include_disabled=False)\n\n def test_import_and_delete_package(self):\n scenario = packages.ImportAndDeletePackage()\n self.mock_modules(scenario)\n fake_package = mock.Mock()\n scenario._import_package.return_value = fake_package\n scenario.run(\"foo_package.zip\")\n scenario._import_package.assert_called_once_with(\n scenario._zip_package.return_value)\n scenario._delete_package.assert_called_once_with(fake_package)\n\n def test_package_lifecycle(self):\n scenario = packages.PackageLifecycle()\n self.mock_modules(scenario)\n fake_package = mock.Mock()\n scenario._import_package.return_value = fake_package\n scenario.run(\"foo_package.zip\", {\"category\": \"Web\"}, \"add\")\n scenario._import_package.assert_called_once_with(\n scenario._zip_package.return_value)\n scenario._update_package.assert_called_once_with(\n fake_package, {\"category\": \"Web\"}, \"add\")\n scenario._delete_package.assert_called_once_with(fake_package)\n\n def test_import_and_filter_applications(self):\n scenario = packages.ImportAndFilterApplications()\n self.mock_modules(scenario)\n fake_package = mock.Mock()\n scenario._import_package.return_value = fake_package\n scenario.run(\"foo_package.zip\", {\"category\": \"Web\"})\n scenario._import_package.assert_called_once_with(\n scenario._zip_package.return_value)\n scenario._filter_applications.assert_called_once_with(\n {\"category\": \"Web\"}\n )\n" }, { "alpha_fraction": 0.5345073938369751, "alphanum_fraction": 0.5386323928833008, "avg_line_length": 36.07646942138672, "blob_id": "ee075d9d762f7795be7dbab8ad33e23571e5cd02", "content_id": "655c89981f483868e287b3b9b5e0e09ac98de8fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6303, "license_type": "permissive", "max_line_length": 78, "num_lines": 170, "path": "/tests/unit/task/scenarios/sahara/test_clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.sahara import clusters\nfrom tests.unit import test\n\nBASE = \"rally_openstack.task.scenarios.sahara.clusters\"\n\n\nclass SaharaClustersTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"%s.CreateAndDeleteCluster._delete_cluster\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteCluster._launch_cluster\" % BASE,\n return_value=mock.MagicMock(id=42))\n def test_create_and_delete_cluster(self,\n mock_launch_cluster,\n mock_delete_cluster):\n scenario = clusters.CreateAndDeleteCluster(self.context)\n\n scenario.context = {\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n }\n }\n }\n\n scenario.run(master_flavor=\"test_flavor_m\",\n worker_flavor=\"test_flavor_w\",\n workers_count=5,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\")\n\n mock_launch_cluster.assert_called_once_with(\n flavor_id=None,\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n image_id=\"test_image\",\n workers_count=5,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n floating_ip_pool=None,\n volumes_per_node=None,\n volumes_size=None,\n auto_security_group=None,\n security_groups=None,\n node_configs=None,\n cluster_configs=None,\n enable_anti_affinity=False,\n enable_proxy=False,\n use_autoconfig=True)\n\n mock_delete_cluster.assert_called_once_with(\n mock_launch_cluster.return_value)\n\n @mock.patch(\"%s.CreateAndDeleteCluster._delete_cluster\" % BASE)\n @mock.patch(\"%s.CreateAndDeleteCluster._launch_cluster\" % BASE,\n return_value=mock.MagicMock(id=42))\n def test_create_and_delete_cluster_deprecated_flavor(self,\n mock_launch_cluster,\n mock_delete_cluster):\n scenario = clusters.CreateAndDeleteCluster(self.context)\n\n scenario.context = {\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n }\n }\n }\n scenario.run(flavor=\"test_deprecated_arg\",\n master_flavor=None,\n worker_flavor=None,\n workers_count=5,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\")\n\n mock_launch_cluster.assert_called_once_with(\n flavor_id=\"test_deprecated_arg\",\n master_flavor_id=None,\n worker_flavor_id=None,\n image_id=\"test_image\",\n workers_count=5,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n floating_ip_pool=None,\n volumes_per_node=None,\n volumes_size=None,\n auto_security_group=None,\n security_groups=None,\n node_configs=None,\n cluster_configs=None,\n enable_anti_affinity=False,\n enable_proxy=False,\n use_autoconfig=True)\n\n mock_delete_cluster.assert_called_once_with(\n mock_launch_cluster.return_value)\n\n @mock.patch(\"%s.CreateScaleDeleteCluster._delete_cluster\" % BASE)\n @mock.patch(\"%s.CreateScaleDeleteCluster._scale_cluster\" % BASE)\n @mock.patch(\"%s.CreateScaleDeleteCluster._launch_cluster\" % BASE,\n return_value=mock.MagicMock(id=42))\n def test_create_scale_delete_cluster(self,\n mock_launch_cluster,\n mock_scale_cluster,\n mock_delete_cluster):\n self.clients(\"sahara\").clusters.get.return_value = mock.MagicMock(\n id=42, status=\"active\"\n )\n scenario = clusters.CreateScaleDeleteCluster(self.context)\n\n scenario.context = {\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n }\n }\n }\n scenario.run(master_flavor=\"test_flavor_m\",\n worker_flavor=\"test_flavor_w\",\n workers_count=5,\n deltas=[1, -1],\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\")\n\n mock_launch_cluster.assert_called_once_with(\n flavor_id=None,\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n image_id=\"test_image\",\n workers_count=5,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n floating_ip_pool=None,\n volumes_per_node=None,\n volumes_size=None,\n auto_security_group=None,\n security_groups=None,\n node_configs=None,\n cluster_configs=None,\n enable_anti_affinity=False,\n enable_proxy=False,\n use_autoconfig=True)\n\n mock_scale_cluster.assert_has_calls([\n mock.call(\n self.clients(\"sahara\").clusters.get.return_value,\n 1),\n mock.call(\n self.clients(\"sahara\").clusters.get.return_value,\n -1),\n ])\n\n mock_delete_cluster.assert_called_once_with(\n self.clients(\"sahara\").clusters.get.return_value)\n" }, { "alpha_fraction": 0.5464926362037659, "alphanum_fraction": 0.5538336038589478, "avg_line_length": 34.708736419677734, "blob_id": "bcfa3f1b3f7d8bdd529b49d179dab197daeafe99", "content_id": "2aca146302cd105f0bea44386888c6b598604571", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3678, "license_type": "permissive", "max_line_length": 78, "num_lines": 103, "path": "/tests/unit/task/contexts/nova/test_keypairs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Rackspace UK\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.nova import keypairs\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.nova\"\n\n\nclass KeyPairContextTestCase(test.TestCase):\n\n def setUp(self):\n super(KeyPairContextTestCase, self).setUp()\n self.users = 2\n\n task = {\"uuid\": \"foo_task_id\"}\n self.ctx_with_keys = {\n \"users\": [\n {\n \"keypair\": {\n \"id\": \"key_id_1\",\n \"key\": \"key_1\",\n \"name\": \"key_name_1\"\n },\n \"credential\": \"credential_1\"\n },\n {\n \"keypair\": {\n \"id\": \"key_id_2\",\n \"key\": \"key_2\",\n \"name\": \"key_name_2\"\n },\n \"credential\": \"credential_2\"\n },\n ],\n \"task\": task\n }\n self.ctx_without_keys = {\n \"users\": [{\"credential\": \"credential_1\"},\n {\"credential\": \"credential_2\"}],\n \"task\": task\n }\n\n def test_keypair_setup(self):\n keypair_ctx = keypairs.Keypair(self.ctx_without_keys)\n keypair_ctx._generate_keypair = mock.Mock(side_effect=[\n {\"id\": \"key_id_1\", \"key\": \"key_1\", \"name\": \"key_name_1\"},\n {\"id\": \"key_id_2\", \"key\": \"key_2\", \"name\": \"key_name_2\"},\n ])\n\n keypair_ctx.setup()\n self.assertEqual(keypair_ctx.context, self.ctx_with_keys)\n\n keypair_ctx._generate_keypair.assert_has_calls(\n [mock.call(\"credential_1\"), mock.call(\"credential_2\")])\n\n @mock.patch(\"%s.keypairs.resource_manager.cleanup\" % CTX)\n def test_keypair_cleanup(self, mock_cleanup):\n keypair_ctx = keypairs.Keypair(self.ctx_with_keys)\n keypair_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"nova.keypairs\"],\n users=self.ctx_with_keys[\"users\"],\n superclass=keypairs.Keypair,\n task_id=self.ctx_with_keys[\"task\"][\"uuid\"])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_keypair_generate(self, mock_clients):\n mock_keypairs = mock_clients.return_value.nova.return_value.keypairs\n mock_keypair = mock_keypairs.create.return_value\n mock_keypair.public_key = \"public_key\"\n mock_keypair.private_key = \"private_key\"\n mock_keypair.id = \"key_id\"\n keypair_ctx = keypairs.Keypair(self.ctx_without_keys)\n keypair_ctx.generate_random_name = mock.Mock()\n\n key = keypair_ctx._generate_keypair(\"credential\")\n\n self.assertEqual({\n \"id\": \"key_id\",\n \"name\": keypair_ctx.generate_random_name.return_value,\n \"private\": \"private_key\",\n \"public\": \"public_key\"\n }, key)\n\n mock_clients.assert_has_calls([\n mock.call().nova().keypairs.create(\n keypair_ctx.generate_random_name.return_value),\n ])\n" }, { "alpha_fraction": 0.599564254283905, "alphanum_fraction": 0.6027233004570007, "avg_line_length": 41.30414581298828, "blob_id": "5296b7aac5246b6c8955710ffa7c21cb97a701d3", "content_id": "911d9df1dbf8324f1df5ed22814d1875d524a282", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9180, "license_type": "permissive", "max_line_length": 78, "num_lines": 217, "path": "/rally_openstack/verification/tempest/manager.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport re\nimport shutil\nimport subprocess\n\nimport yaml\n\nfrom rally import exceptions\nfrom rally.plugins.verification import testr\nfrom rally.verification import manager\nfrom rally.verification import utils\n\nfrom rally_openstack.verification.tempest import config\nfrom rally_openstack.verification.tempest import consts\n\n\nAVAILABLE_SETS = (list(consts.TempestTestSets)\n + list(consts.TempestApiTestSets)\n + list(consts.TempestScenarioTestSets))\n\n\[email protected](name=\"tempest\", platform=\"openstack\",\n default_repo=\"https://opendev.org/openstack/tempest\",\n context={\"tempest\": {}, \"testr\": {}})\nclass TempestManager(testr.TestrLauncher):\n \"\"\"Tempest verifier.\n\n **Description**:\n\n Quote from official documentation:\n\n This is a set of integration tests to be run against a live OpenStack\n cluster. Tempest has batteries of tests for OpenStack API validation,\n Scenarios, and other specific tests useful in validating an OpenStack\n deployment.\n\n Rally supports features listed below:\n\n * *cloning Tempest*: repository and version can be specified\n * *installation*: system-wide with checking existence of required\n packages or in virtual environment\n * *configuration*: options are discovered via OpenStack API, but you can\n override them if you need\n * *running*: pre-creating all required resources(i.e images, tenants,\n etc), prepare arguments, launching Tempest, live-progress output\n * *results*: all verifications are stored in db, you can built reports,\n compare verification at whatever you want time.\n\n Appeared in Rally 0.8.0 *(actually, it appeared long time ago with first\n revision of Verification Component, but 0.8.0 is mentioned since it is\n first release after Verification Component redesign)*\n \"\"\"\n\n RUN_ARGS = {\"set\": \"Name of predefined set of tests. Known names: %s\"\n % \", \".join(AVAILABLE_SETS)}\n\n @property\n def run_environ(self):\n env = super(TempestManager, self).run_environ\n env[\"TEMPEST_CONFIG_DIR\"] = os.path.dirname(self.configfile)\n env[\"TEMPEST_CONFIG\"] = os.path.basename(self.configfile)\n # TODO(andreykurilin): move it to Testr base class\n env[\"OS_TEST_PATH\"] = os.path.join(self.repo_dir,\n \"tempest/test_discover\")\n return env\n\n @property\n def configfile(self):\n return os.path.join(self.home_dir, \"tempest.conf\")\n\n def validate_args(self, args):\n \"\"\"Validate given arguments.\"\"\"\n super(TempestManager, self).validate_args(args)\n\n if args.get(\"pattern\"):\n pattern = args[\"pattern\"].split(\"=\", 1)\n if len(pattern) == 1:\n pass # it is just a regex\n elif pattern[0] == \"set\":\n if pattern[1] not in AVAILABLE_SETS:\n raise exceptions.ValidationError(\n \"Test set '%s' not found in available \"\n \"Tempest test sets. Available sets are '%s'.\"\n % (pattern[1], \"', '\".join(AVAILABLE_SETS)))\n else:\n raise exceptions.ValidationError(\n \"'pattern' argument should be a regexp or set name \"\n \"(format: 'tempest.api.identity.v3', 'set=smoke').\")\n\n def configure(self, extra_options=None):\n \"\"\"Configure Tempest.\"\"\"\n utils.create_dir(self.home_dir)\n tcm = config.TempestConfigfileManager(self.verifier.env)\n return tcm.create(self.configfile, extra_options)\n\n def is_configured(self):\n \"\"\"Check whether Tempest is configured or not.\"\"\"\n return os.path.exists(self.configfile)\n\n def get_configuration(self):\n \"\"\"Get Tempest configuration.\"\"\"\n with open(self.configfile) as f:\n return f.read()\n\n def extend_configuration(self, extra_options):\n \"\"\"Extend Tempest configuration with extra options.\"\"\"\n return utils.extend_configfile(extra_options, self.configfile)\n\n def override_configuration(self, new_configuration):\n \"\"\"Override Tempest configuration by new configuration.\"\"\"\n with open(self.configfile, \"w\") as f:\n f.write(new_configuration)\n\n def install_extension(self, source, version=None, extra_settings=None):\n \"\"\"Install a Tempest plugin.\"\"\"\n if extra_settings:\n raise NotImplementedError(\n \"'%s' verifiers don't support extra installation settings \"\n \"for extensions.\" % self.get_name())\n version = version or \"master\"\n egg = re.sub(r\"\\.git$\", \"\", os.path.basename(source.strip(\"/\")))\n full_source = \"git+{0}@{1}#egg={2}\".format(source, version, egg)\n # NOTE(ylobankov): Use 'develop mode' installation to provide an\n # ability to advanced users to change tests or\n # develop new ones in verifier repo on the fly.\n cmd = [\"pip\", \"install\",\n \"--src\", os.path.join(self.base_dir, \"extensions\"),\n \"-e\", full_source]\n if self.verifier.system_wide:\n cmd.insert(2, \"--no-deps\")\n utils.check_output(cmd, cwd=self.base_dir, env=self.environ)\n\n # Very often Tempest plugins are inside projects and requirements\n # for plugins are listed in the test-requirements.txt file.\n test_reqs_path = os.path.join(self.base_dir, \"extensions\",\n egg, \"test-requirements.txt\")\n if os.path.exists(test_reqs_path):\n if not self.verifier.system_wide:\n utils.check_output([\"pip\", \"install\", \"-r\", test_reqs_path],\n cwd=self.base_dir, env=self.environ)\n else:\n self.check_system_wide(reqs_file_path=test_reqs_path)\n\n def list_extensions(self):\n \"\"\"List all installed Tempest plugins.\"\"\"\n # TODO(andreykurilin): find a better way to list tempest plugins\n cmd = (\"from tempest.test_discover import plugins; \"\n \"plugins_manager = plugins.TempestTestPluginManager(); \"\n \"plugins_map = plugins_manager.get_plugin_load_tests_tuple(); \"\n \"plugins_list = [\"\n \" {'name': p.name, \"\n \" 'entry_point': p.entry_point_target, \"\n \" 'location': plugins_map[p.name][1]} \"\n \" for p in plugins_manager.ext_plugins.extensions]; \"\n \"print(plugins_list)\")\n try:\n output = utils.check_output([\"python\", \"-c\", cmd],\n cwd=self.base_dir, env=self.environ,\n debug_output=False).strip()\n except subprocess.CalledProcessError:\n raise exceptions.RallyException(\n \"Cannot list installed Tempest plugins for verifier %s.\" %\n self.verifier)\n\n return yaml.safe_load(output)\n\n def uninstall_extension(self, name):\n \"\"\"Uninstall a Tempest plugin.\"\"\"\n for ext in self.list_extensions():\n if ext[\"name\"] == name and os.path.exists(ext[\"location\"]):\n shutil.rmtree(ext[\"location\"])\n break\n else:\n raise exceptions.RallyException(\n \"There is no Tempest plugin with name '%s'. \"\n \"Are you sure that it was installed?\" % name)\n\n def list_tests(self, pattern=\"\"):\n \"\"\"List all Tempest tests.\"\"\"\n if pattern:\n pattern = self._transform_pattern(pattern)\n return super(TempestManager, self).list_tests(pattern)\n\n def prepare_run_args(self, run_args):\n \"\"\"Prepare 'run_args' for testr context.\"\"\"\n if run_args.get(\"pattern\"):\n run_args[\"pattern\"] = self._transform_pattern(run_args[\"pattern\"])\n return run_args\n\n @staticmethod\n def _transform_pattern(pattern):\n \"\"\"Transform pattern into Tempest-specific pattern.\"\"\"\n parsed_pattern = pattern.split(\"=\", 1)\n if len(parsed_pattern) == 2:\n if parsed_pattern[0] == \"set\":\n if parsed_pattern[1] in consts.TempestTestSets:\n return \"smoke\" if parsed_pattern[1] == \"smoke\" else \"\"\n elif parsed_pattern[1] in consts.TempestApiTestSets:\n return \"tempest.api.%s\" % parsed_pattern[1]\n else:\n return \"tempest.%s\" % parsed_pattern[1]\n\n return pattern # it is just a regex\n" }, { "alpha_fraction": 0.6001790761947632, "alphanum_fraction": 0.6048802137374878, "avg_line_length": 41.141510009765625, "blob_id": "6d46731c1e6a4e966e9b24182526abf00fce1168", "content_id": "74442f2bb3671882afc2f8e4da7bf3c851de0c4f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4467, "license_type": "permissive", "max_line_length": 78, "num_lines": 106, "path": "/tests/unit/common/services/heat/test_main.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common.services.heat import main\nfrom tests.unit import test\n\n\nclass Stack(main.Stack):\n def __init__(self):\n self.scenario = mock.Mock()\n\n\nclass StackTestCase(test.ScenarioTestCase):\n\n @mock.patch(\"rally_openstack.common.services.heat.main.open\",\n create=True)\n def test___init__(self, mock_open):\n reads = [mock.Mock(), mock.Mock()]\n reads[0].read.return_value = \"template_contents\"\n reads[1].read.return_value = \"file1_contents\"\n mock_open.side_effect = reads\n stack = main.Stack(\"scenario\", \"task\", \"template\",\n parameters=\"parameters\",\n files={\"f1_name\": \"f1_path\"})\n self.assertEqual(\"template_contents\", stack.template)\n self.assertEqual({\"f1_name\": \"file1_contents\"}, stack.files)\n self.assertEqual([mock.call(\"template\"), mock.call(\"f1_path\")],\n mock_open.mock_calls)\n reads[0].read.assert_called_once_with()\n reads[1].read.assert_called_once_with()\n\n @mock.patch(\"rally_openstack.common.services.heat.main.utils\")\n def test__wait(self, mock_utils):\n fake_stack = mock.Mock()\n stack = Stack()\n stack.stack = fake_stack = mock.Mock()\n stack._wait([\"ready_statuses\"], [\"failure_statuses\"])\n mock_utils.wait_for_status.assert_called_once_with(\n fake_stack, check_interval=1.0,\n ready_statuses=[\"ready_statuses\"],\n failure_statuses=[\"failure_statuses\"],\n timeout=3600.0,\n update_resource=mock_utils.get_from_manager())\n\n @mock.patch(\"rally.task.atomic\")\n @mock.patch(\"rally_openstack.common.services.heat.main.open\")\n @mock.patch(\"rally_openstack.common.services.heat.main.Stack._wait\")\n def test_create(self, mock_stack__wait, mock_open, mock_task_atomic):\n mock_scenario = mock.MagicMock(_atomic_actions=[])\n mock_scenario.generate_random_name.return_value = \"fake_name\"\n mock_open().read.return_value = \"fake_content\"\n mock_new_stack = {\n \"stack\": {\n \"id\": \"fake_id\"\n }\n }\n mock_scenario.clients(\"heat\").stacks.create.return_value = (\n mock_new_stack)\n\n stack = main.Stack(\n scenario=mock_scenario, task=mock.Mock(),\n template=mock.Mock(), files={}\n )\n stack.create()\n mock_scenario.clients(\"heat\").stacks.create.assert_called_once_with(\n files={}, parameters=None, stack_name=\"fake_name\",\n template=\"fake_content\"\n )\n mock_scenario.clients(\"heat\").stacks.get.assert_called_once_with(\n \"fake_id\")\n mock_stack__wait.assert_called_once_with([\"CREATE_COMPLETE\"],\n [\"CREATE_FAILED\"])\n\n @mock.patch(\"rally.task.atomic\")\n @mock.patch(\"rally_openstack.common.services.heat.main.open\")\n @mock.patch(\"rally_openstack.common.services.heat.main.Stack._wait\")\n def test_update(self, mock_stack__wait, mock_open, mock_task_atomic):\n mock_scenario = mock.MagicMock(\n stack_id=\"fake_id\", _atomic_actions=[])\n mock_parameters = mock.Mock()\n mock_open().read.return_value = \"fake_content\"\n stack = main.Stack(\n scenario=mock_scenario, task=mock.Mock(),\n template=None, files={}, parameters=mock_parameters\n )\n stack.stack_id = \"fake_id\"\n stack.parameters = mock_parameters\n stack.update({\"foo\": \"bar\"})\n mock_scenario.clients(\"heat\").stacks.update.assert_called_once_with(\n \"fake_id\", files={}, template=\"fake_content\",\n parameters=mock_parameters\n )\n mock_stack__wait.assert_called_once_with([\"UPDATE_COMPLETE\"],\n [\"UPDATE_FAILED\"])\n" }, { "alpha_fraction": 0.6628034710884094, "alphanum_fraction": 0.6645516157150269, "avg_line_length": 39.22135543823242, "blob_id": "54ae0592869b86206f44bbc24aaf8d8191be8bb1", "content_id": "8e022d5dbdeae320d02a98a679fc5672f06f6886", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15445, "license_type": "permissive", "max_line_length": 79, "num_lines": 384, "path": "/tests/unit/task/scenarios/keystone/test_basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.keystone import basic\nfrom tests.unit import test\n\n\[email protected]\nclass KeystoneBasicTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(KeystoneBasicTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant_id\",\n \"name\": \"fake_tenant_name\"}\n })\n return context\n\n def setUp(self):\n super(KeystoneBasicTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.identity.identity.Identity\")\n self.addCleanup(patch.stop)\n self.mock_identity = patch.start()\n\n def test_create_user(self):\n scenario = basic.CreateUser(self.context)\n\n scenario.run(password=\"tttt\", project_id=\"id\")\n self.mock_identity.return_value.create_user.assert_called_once_with(\n password=\"tttt\", project_id=\"id\")\n\n def test_create_delete_user(self):\n identity_service = self.mock_identity.return_value\n\n fake_email = \"abcd\"\n fake_user = identity_service.create_user.return_value\n\n scenario = basic.CreateDeleteUser(self.context)\n\n scenario.run(email=fake_email, enabled=True)\n\n identity_service.create_user.assert_called_once_with(\n email=fake_email, enabled=True)\n identity_service.delete_user.assert_called_once_with(fake_user.id)\n\n def test_create_user_set_enabled_and_delete(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateUserSetEnabledAndDelete(self.context)\n\n fake_email = \"abcd\"\n fake_user = identity_service.create_user.return_value\n scenario.run(enabled=True, email=fake_email)\n\n identity_service.create_user.assert_called_once_with(\n email=fake_email, enabled=True)\n identity_service.update_user.assert_called_once_with(\n fake_user.id, enabled=False)\n identity_service.delete_user.assert_called_once_with(fake_user.id)\n\n def test_user_authenticate_and_validate_token(self):\n identity_service = self.mock_identity.return_value\n scenario = basic.AuthenticateUserAndValidateToken(self.context)\n\n fake_token = identity_service.fetch_token.return_value\n\n scenario.run()\n\n identity_service.fetch_token.assert_called_once_with()\n identity_service.validate_token.assert_called_once_with(fake_token)\n\n def test_create_tenant(self):\n scenario = basic.CreateTenant(self.context)\n\n scenario.run(enabled=True)\n\n self.mock_identity.return_value.create_project.assert_called_once_with(\n enabled=True)\n\n def test_create_tenant_with_users(self):\n identity_service = self.mock_identity.return_value\n\n fake_project = identity_service.create_project.return_value\n number_of_users = 1\n\n scenario = basic.CreateTenantWithUsers(self.context)\n\n scenario.run(users_per_tenant=number_of_users, enabled=True)\n\n identity_service.create_project.assert_called_once_with(enabled=True)\n identity_service.create_users.assert_called_once_with(\n fake_project.id, number_of_users=number_of_users)\n\n def test_create_and_list_users(self):\n scenario = basic.CreateAndListUsers(self.context)\n\n passwd = \"tttt\"\n project_id = \"id\"\n\n scenario.run(password=passwd, project_id=project_id)\n self.mock_identity.return_value.create_user.assert_called_once_with(\n password=passwd, project_id=project_id)\n self.mock_identity.return_value.list_users.assert_called_once_with()\n\n def test_create_and_list_tenants(self):\n identity_service = self.mock_identity.return_value\n scenario = basic.CreateAndListTenants(self.context)\n scenario.run(enabled=True)\n identity_service.create_project.assert_called_once_with(enabled=True)\n identity_service.list_projects.assert_called_once_with()\n\n def test_assign_and_remove_user_role(self):\n fake_tenant = self.context[\"tenant\"][\"id\"]\n fake_user = self.context[\"user\"][\"id\"]\n fake_role = mock.MagicMock()\n\n self.mock_identity.return_value.create_role.return_value = fake_role\n\n scenario = basic.AddAndRemoveUserRole(self.context)\n scenario.run()\n\n self.mock_identity.return_value.create_role.assert_called_once_with()\n self.mock_identity.return_value.add_role.assert_called_once_with(\n role_id=fake_role.id, user_id=fake_user, project_id=fake_tenant)\n\n self.mock_identity.return_value.revoke_role.assert_called_once_with(\n fake_role.id, user_id=fake_user, project_id=fake_tenant)\n\n def test_create_and_delete_role(self):\n fake_role = mock.MagicMock()\n self.mock_identity.return_value.create_role.return_value = fake_role\n\n scenario = basic.CreateAndDeleteRole(self.context)\n scenario.run()\n\n self.mock_identity.return_value.create_role.assert_called_once_with()\n self.mock_identity.return_value.delete_role.assert_called_once_with(\n fake_role.id)\n\n def test_create_and_get_role(self):\n fake_role = mock.MagicMock()\n self.mock_identity.return_value.create_role.return_value = fake_role\n\n scenario = basic.CreateAndGetRole(self.context)\n scenario.run()\n\n self.mock_identity.return_value.create_role.assert_called_once_with()\n self.mock_identity.return_value.get_role.assert_called_once_with(\n fake_role.id)\n\n def test_create_and_list_user_roles(self):\n scenario = basic.CreateAddAndListUserRoles(self.context)\n fake_tenant = self.context[\"tenant\"][\"id\"]\n fake_user = self.context[\"user\"][\"id\"]\n fake_role = mock.MagicMock()\n self.mock_identity.return_value.create_role.return_value = fake_role\n\n scenario.run()\n\n self.mock_identity.return_value.create_role.assert_called_once_with()\n self.mock_identity.return_value.add_role.assert_called_once_with(\n user_id=fake_user, role_id=fake_role.id, project_id=fake_tenant)\n self.mock_identity.return_value.list_roles.assert_called_once_with(\n user_id=fake_user, project_id=fake_tenant)\n\n def test_create_and_list_roles(self):\n # Positive case\n scenario = basic.CreateAddListRoles(self.context)\n create_kwargs = {\"fakewargs\": \"name\"}\n list_kwargs = {\"fakewargs\": \"f\"}\n self.mock_identity.return_value.create_role = mock.Mock(\n return_value=\"role1\")\n self.mock_identity.return_value.list_roles = mock.Mock(\n return_value=(\"role1\", \"role2\"))\n scenario.run(create_role_kwargs=create_kwargs,\n list_role_kwargs=list_kwargs)\n self.mock_identity.return_value.create_role.assert_called_once_with(\n **create_kwargs)\n self.mock_identity.return_value.list_roles.assert_called_once_with(\n **list_kwargs)\n\n # Negative case 1: role isn't created\n self.mock_identity.return_value.create_role.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, create_role_kwargs=create_kwargs,\n list_role_kwargs=list_kwargs)\n self.mock_identity.return_value.create_role.assert_called_with(\n **create_kwargs)\n\n # Negative case 2: role was created but included into list\n self.mock_identity.return_value.create_role.return_value = \"role3\"\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, create_role_kwargs=create_kwargs,\n list_role_kwargs=list_kwargs)\n self.mock_identity.return_value.create_role.assert_called_with(\n **create_kwargs)\n self.mock_identity.return_value.list_roles.assert_called_with(\n **list_kwargs)\n\n @ddt.data(None, \"keystone\", \"fooservice\")\n def test_get_entities(self, service_name):\n identity_service = self.mock_identity.return_value\n\n fake_project = identity_service.create_project.return_value\n fake_user = identity_service.create_user.return_value\n fake_role = identity_service.create_role.return_value\n fake_service = identity_service.create_service.return_value\n\n scenario = basic.GetEntities(self.context)\n\n scenario.run(service_name)\n\n identity_service.create_project.assert_called_once_with()\n identity_service.create_user.assert_called_once_with(\n project_id=fake_project.id)\n identity_service.create_role.assert_called_once_with()\n\n identity_service.get_project.assert_called_once_with(fake_project.id)\n identity_service.get_user.assert_called_once_with(fake_user.id)\n identity_service.get_role.assert_called_once_with(fake_role.id)\n\n if service_name is None:\n identity_service.create_service.assert_called_once_with()\n self.assertFalse(identity_service.get_service_by_name.called)\n identity_service.get_service.assert_called_once_with(\n fake_service.id)\n else:\n identity_service.get_service_by_name.assert_called_once_with(\n service_name)\n self.assertFalse(identity_service.create_service.called)\n identity_service.get_service.assert_called_once_with(\n identity_service.get_service_by_name.return_value.id)\n\n def test_create_and_delete_service(self):\n identity_service = self.mock_identity.return_value\n scenario = basic.CreateAndDeleteService(self.context)\n\n service_type = \"test_service_type\"\n description = \"test_description\"\n fake_service = identity_service.create_service.return_value\n\n scenario.run(service_type=service_type, description=description)\n\n identity_service.create_service.assert_called_once_with(\n service_type=service_type, description=description)\n identity_service.delete_service.assert_called_once_with(\n fake_service.id)\n\n def test_create_update_and_delete_tenant(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateUpdateAndDeleteTenant(self.context)\n\n gen_name = mock.MagicMock()\n basic.CreateUpdateAndDeleteTenant.generate_random_name = gen_name\n fake_project = identity_service.create_project.return_value\n\n scenario.run()\n\n identity_service.create_project.assert_called_once_with()\n identity_service.update_project.assert_called_once_with(\n fake_project.id, description=gen_name.return_value,\n name=gen_name.return_value)\n identity_service.delete_project(fake_project.id)\n\n def test_create_user_update_password(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateUserUpdatePassword(self.context)\n\n fake_password = \"pswd\"\n fake_user = identity_service.create_user.return_value\n scenario.generate_random_name = mock.MagicMock(\n return_value=fake_password)\n\n scenario.run()\n\n scenario.generate_random_name.assert_called_once_with()\n identity_service.create_user.assert_called_once_with()\n identity_service.update_user.assert_called_once_with(\n fake_user.id, password=fake_password)\n\n def test_create_and_update_user(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateAndUpdateUser(self.context)\n scenario.admin_clients(\"keystone\").users.get = mock.MagicMock()\n fake_user = identity_service.create_user.return_value\n\n create_args = {\"fakearg1\": \"f\"}\n update_args = {\"fakearg1\": \"fakearg\"}\n setattr(self.admin_clients(\"keystone\").users.get.return_value,\n \"fakearg1\", \"fakearg\")\n\n scenario.run(create_user_kwargs=create_args,\n update_user_kwargs=update_args)\n\n identity_service.create_user.assert_called_once_with(**create_args)\n identity_service.update_user.assert_called_once_with(\n fake_user.id, **update_args)\n\n def test_create_and_list_services(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateAndListServices(self.context)\n service_type = \"test_service_type\"\n description = \"test_description\"\n\n scenario.run(service_type=service_type, description=description)\n\n identity_service.create_service.assert_called_once_with(\n service_type=service_type, description=description)\n identity_service.list_services.assert_called_once_with()\n\n def test_create_and_list_ec2credentials(self):\n identity_service = self.mock_identity.return_value\n\n scenario = basic.CreateAndListEc2Credentials(self.context)\n\n scenario.run()\n\n identity_service.create_ec2credentials.assert_called_once_with(\n self.context[\"user\"][\"id\"],\n project_id=self.context[\"tenant\"][\"id\"])\n identity_service.list_ec2credentials.assert_called_with(\n self.context[\"user\"][\"id\"])\n\n def test_create_and_delete_ec2credential(self):\n identity_service = self.mock_identity.return_value\n\n fake_creds = identity_service.create_ec2credentials.return_value\n\n scenario = basic.CreateAndDeleteEc2Credential(self.context)\n\n scenario.run()\n\n identity_service.create_ec2credentials.assert_called_once_with(\n self.context[\"user\"][\"id\"],\n project_id=self.context[\"tenant\"][\"id\"])\n identity_service.delete_ec2credential.assert_called_once_with(\n self.context[\"user\"][\"id\"], access=fake_creds.access)\n\n def test_add_and_remove_user_role(self):\n context = self.context\n tenant_id = context[\"tenant\"][\"id\"]\n user_id = context[\"user\"][\"id\"]\n\n fake_role = mock.MagicMock()\n self.mock_identity.return_value.create_role.return_value = fake_role\n\n scenario = basic.AddAndRemoveUserRole(context)\n scenario.run()\n\n self.mock_identity.return_value.create_role.assert_called_once_with()\n self.mock_identity.return_value.add_role.assert_called_once_with(\n role_id=fake_role.id, user_id=user_id, project_id=tenant_id)\n self.mock_identity.return_value.revoke_role.assert_called_once_with(\n fake_role.id, user_id=user_id, project_id=tenant_id)\n" }, { "alpha_fraction": 0.645116925239563, "alphanum_fraction": 0.6574965715408325, "avg_line_length": 41.764705657958984, "blob_id": "9f1b72188842676d246ade72000ffd9cc5a3d50e", "content_id": "996254f2793feab2b24f97df85b657981f352c64", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1454, "license_type": "permissive", "max_line_length": 75, "num_lines": 34, "path": "/tests/unit/task/scenarios/senlin/test_clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.senlin import clusters\nfrom tests.unit import test\n\n\nclass SenlinClustersTestCase(test.ScenarioTestCase):\n\n def test_create_and_delete_cluster(self):\n mock_cluster = mock.Mock()\n self.context[\"tenant\"] = {\"profile\": \"fake_profile_id\"}\n scenario = clusters.CreateAndDeleteCluster(self.context)\n scenario._create_cluster = mock.Mock(return_value=mock_cluster)\n scenario._delete_cluster = mock.Mock()\n\n scenario.run(desired_capacity=1, min_size=0,\n max_size=3, timeout=60, metadata={\"k2\": \"v2\"})\n\n scenario._create_cluster.assert_called_once_with(\"fake_profile_id\",\n 1, 0, 3, 60,\n {\"k2\": \"v2\"})\n scenario._delete_cluster.assert_called_once_with(mock_cluster)\n" }, { "alpha_fraction": 0.5366576313972473, "alphanum_fraction": 0.5391857624053955, "avg_line_length": 39.82206344604492, "blob_id": "c88da7b0bd3378de8fff96cf92f39fe36109fdf2", "content_id": "1e2d5099a8048c973fce62c25e45e95db36eb9d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11471, "license_type": "permissive", "max_line_length": 79, "num_lines": 281, "path": "/tests/unit/task/contexts/quotas/test_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Dassault Systemes\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nimport ddt\nfrom rally.common import logging\nfrom rally.task import context\n\nfrom rally_openstack.task.contexts.quotas import quotas\nfrom tests.unit import test\n\nQUOTAS_PATH = \"rally_openstack.task.contexts.quotas\"\n\n\[email protected]\nclass QuotasTestCase(test.TestCase):\n\n def setUp(self):\n super(QuotasTestCase, self).setUp()\n self.unlimited = -1\n self.context = {\n \"config\": {\n },\n \"tenants\": {\n \"t1\": {\"credential\": mock.MagicMock()},\n \"t2\": {\"credential\": mock.MagicMock()}},\n \"admin\": {\"credential\": mock.MagicMock()},\n \"task\": mock.MagicMock()\n }\n\n @ddt.data((\"cinder\", \"backup_gigabytes\"),\n (\"cinder\", \"backups\"),\n (\"cinder\", \"gigabytes\"),\n (\"cinder\", \"snapshots\"),\n (\"cinder\", \"volumes\"),\n (\"manila\", \"gigabytes\"),\n (\"manila\", \"share_networks\"),\n (\"manila\", \"shares\"),\n (\"manila\", \"snapshot_gigabytes\"),\n (\"manila\", \"snapshots\"),\n (\"neutron\", \"floatingip\"),\n (\"neutron\", \"health_monitor\"),\n (\"neutron\", \"network\"),\n (\"neutron\", \"pool\"),\n (\"neutron\", \"port\"),\n (\"neutron\", \"router\"),\n (\"neutron\", \"security_group\"),\n (\"neutron\", \"security_group_rule\"),\n (\"neutron\", \"subnet\"),\n (\"neutron\", \"vip\"),\n (\"nova\", \"cores\"),\n (\"nova\", \"fixed_ips\"),\n (\"nova\", \"floating_ips\"),\n (\"nova\", \"injected_file_content_bytes\"),\n (\"nova\", \"injected_file_path_bytes\"),\n (\"nova\", \"injected_files\"),\n (\"nova\", \"instances\"),\n (\"nova\", \"key_pairs\"),\n (\"nova\", \"metadata_items\"),\n (\"nova\", \"ram\"),\n (\"nova\", \"security_group_rules\"),\n (\"nova\", \"security_groups\"),\n (\"nova\", \"server_group_members\"),\n (\"nova\", \"server_groups\"))\n @ddt.unpack\n def test_validate(self, group, parameter):\n configs = [\n ({group: {parameter: self.unlimited}}, True),\n ({group: {parameter: 0}}, True),\n ({group: {parameter: 10000}}, True),\n ({group: {parameter: 2.5}}, False),\n ({group: {parameter: \"-1\"}}, False),\n ({group: {parameter: -2}}, False),\n ]\n for config, valid in configs:\n results = context.Context.validate(\n \"quotas\", None, None, config, vtype=\"syntax\")\n if valid:\n self.assertEqual([], results)\n else:\n self.assertGreater(len(results), 0)\n\n @mock.patch(\"%s.quotas.osclients.Clients\" % QUOTAS_PATH)\n @mock.patch(\"%s.cinder_quotas.CinderQuotas\" % QUOTAS_PATH)\n @ddt.data(True, False)\n def test_cinder_quotas(self, ex_users, mock_cinder_quotas, mock_clients):\n cinder_quo = mock_cinder_quotas.return_value\n ctx = copy.deepcopy(self.context)\n if ex_users:\n ctx[\"existing_users\"] = None\n ctx[\"config\"][\"quotas\"] = {\n \"cinder\": {\n \"volumes\": self.unlimited,\n \"snapshots\": self.unlimited,\n \"gigabytes\": self.unlimited\n }\n }\n\n tenants = ctx[\"tenants\"]\n cinder_quotas = ctx[\"config\"][\"quotas\"][\"cinder\"]\n cinder_quo.get.return_value = cinder_quotas\n with quotas.Quotas(ctx) as quotas_ctx:\n quotas_ctx.setup()\n if ex_users:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n cinder_quo.get.call_args_list)\n self.assertEqual([mock.call(tenant, **cinder_quotas)\n for tenant in tenants],\n cinder_quo.update.call_args_list)\n mock_cinder_quotas.reset_mock()\n\n if ex_users:\n self.assertEqual([mock.call(tenant, **cinder_quotas)\n for tenant in tenants],\n cinder_quo.update.call_args_list)\n else:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n cinder_quo.delete.call_args_list)\n\n @mock.patch(\"%s.quotas.osclients.Clients\" % QUOTAS_PATH)\n @mock.patch(\"%s.nova_quotas.NovaQuotas\" % QUOTAS_PATH)\n @ddt.data(True, False)\n def test_nova_quotas(self, ex_users, mock_nova_quotas, mock_clients):\n nova_quo = mock_nova_quotas.return_value\n ctx = copy.deepcopy(self.context)\n if ex_users:\n ctx[\"existing_users\"] = None\n\n ctx[\"config\"][\"quotas\"] = {\n \"nova\": {\n \"instances\": self.unlimited,\n \"cores\": self.unlimited,\n \"ram\": self.unlimited,\n \"floating-ips\": self.unlimited,\n \"fixed-ips\": self.unlimited,\n \"metadata_items\": self.unlimited,\n \"injected_files\": self.unlimited,\n \"injected_file_content_bytes\": self.unlimited,\n \"injected_file_path_bytes\": self.unlimited,\n \"key_pairs\": self.unlimited,\n \"security_groups\": self.unlimited,\n \"security_group_rules\": self.unlimited,\n }\n }\n\n tenants = ctx[\"tenants\"]\n nova_quotas = ctx[\"config\"][\"quotas\"][\"nova\"]\n nova_quo.get.return_value = nova_quotas\n with quotas.Quotas(ctx) as quotas_ctx:\n quotas_ctx.setup()\n if ex_users:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n nova_quo.get.call_args_list)\n self.assertEqual([mock.call(tenant, **nova_quotas)\n for tenant in tenants],\n nova_quo.update.call_args_list)\n mock_nova_quotas.reset_mock()\n\n if ex_users:\n self.assertEqual([mock.call(tenant, **nova_quotas)\n for tenant in tenants],\n nova_quo.update.call_args_list)\n else:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n nova_quo.delete.call_args_list)\n\n @mock.patch(\"%s.quotas.osclients.Clients\" % QUOTAS_PATH)\n @mock.patch(\"%s.neutron_quotas.NeutronQuotas\" % QUOTAS_PATH)\n @ddt.data(True, False)\n def test_neutron_quotas(self, ex_users, mock_neutron_quotas, mock_clients):\n neutron_quo = mock_neutron_quotas.return_value\n ctx = copy.deepcopy(self.context)\n if ex_users:\n ctx[\"existing_users\"] = None\n\n ctx[\"config\"][\"quotas\"] = {\n \"neutron\": {\n \"network\": self.unlimited,\n \"subnet\": self.unlimited,\n \"port\": self.unlimited,\n \"router\": self.unlimited,\n \"floatingip\": self.unlimited,\n \"security_group\": self.unlimited,\n \"security_group_rule\": self.unlimited\n }\n }\n\n tenants = ctx[\"tenants\"]\n neutron_quotas = ctx[\"config\"][\"quotas\"][\"neutron\"]\n neutron_quo.get.return_value = neutron_quotas\n with quotas.Quotas(ctx) as quotas_ctx:\n quotas_ctx.setup()\n if ex_users:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n neutron_quo.get.call_args_list)\n self.assertEqual([mock.call(tenant, **neutron_quotas)\n for tenant in tenants],\n neutron_quo.update.call_args_list)\n neutron_quo.reset_mock()\n\n if ex_users:\n self.assertEqual([mock.call(tenant, **neutron_quotas)\n for tenant in tenants],\n neutron_quo.update.call_args_list)\n else:\n self.assertEqual([mock.call(tenant) for tenant in tenants],\n neutron_quo.delete.call_args_list)\n\n @mock.patch(\"rally_openstack.task.contexts.\"\n \"quotas.quotas.osclients.Clients\")\n @mock.patch(\"rally_openstack.task.contexts.\"\n \"quotas.nova_quotas.NovaQuotas\")\n @mock.patch(\"rally_openstack.task.contexts.\"\n \"quotas.cinder_quotas.CinderQuotas\")\n @mock.patch(\"rally_openstack.task.contexts.\"\n \"quotas.neutron_quotas.NeutronQuotas\")\n def test_no_quotas(self, mock_neutron_quotas, mock_cinder_quotas,\n mock_nova_quotas, mock_clients):\n ctx = copy.deepcopy(self.context)\n if \"quotas\" in ctx[\"config\"]:\n del ctx[\"config\"][\"quotas\"]\n\n with quotas.Quotas(ctx) as quotas_ctx:\n quotas_ctx.setup()\n self.assertFalse(mock_cinder_quotas.update.called)\n self.assertFalse(mock_nova_quotas.update.called)\n self.assertFalse(mock_neutron_quotas.update.called)\n\n self.assertFalse(mock_cinder_quotas.delete.called)\n self.assertFalse(mock_nova_quotas.delete.called)\n self.assertFalse(mock_neutron_quotas.delete.called)\n\n @ddt.data(\n {\"quotas_ctxt\": {\"nova\": {\"cpu\": 1}},\n \"quotas_class_path\": \"nova_quotas.NovaQuotas\"},\n {\"quotas_ctxt\": {\"neutron\": {\"network\": 2}},\n \"quotas_class_path\": \"neutron_quotas.NeutronQuotas\"},\n {\"quotas_ctxt\": {\"cinder\": {\"volumes\": 3}},\n \"quotas_class_path\": \"cinder_quotas.CinderQuotas\"},\n {\"quotas_ctxt\": {\"manila\": {\"shares\": 4}},\n \"quotas_class_path\": \"manila_quotas.ManilaQuotas\"},\n {\"quotas_ctxt\": {\"designate\": {\"domains\": 5}},\n \"quotas_class_path\": \"designate_quotas.DesignateQuotas\"},\n )\n @ddt.unpack\n def test_exception_during_cleanup(self, quotas_ctxt, quotas_class_path):\n quotas_path = \"%s.%s\" % (QUOTAS_PATH, quotas_class_path)\n with mock.patch(quotas_path) as mock_quotas:\n mock_quotas.return_value.update.side_effect = Exception\n\n ctx = copy.deepcopy(self.context)\n ctx[\"config\"][\"quotas\"] = quotas_ctxt\n\n quotas_instance = quotas.Quotas(ctx)\n quotas_instance.original_quotas = []\n for service in quotas_ctxt:\n for tenant in self.context[\"tenants\"]:\n quotas_instance.original_quotas.append(\n (service, tenant, quotas_ctxt[service]))\n # NOTE(boris-42): ensure that cleanup didn't raise exceptions.\n with logging.LogCatcher(quotas.LOG) as log:\n quotas_instance.cleanup()\n\n log.assertInLogs(\"Failed to restore quotas for tenant\")\n\n self.assertEqual(mock_quotas.return_value.update.call_count,\n len(self.context[\"tenants\"]))\n" }, { "alpha_fraction": 0.5296711921691895, "alphanum_fraction": 0.535685658454895, "avg_line_length": 32.93197250366211, "blob_id": "a5cdc8f975d9ec9c979bfbf7dc3fc04d2d33d02b", "content_id": "551c4373a61bf6ccea452f7883d121f872fefd48", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4988, "license_type": "permissive", "max_line_length": 77, "num_lines": 147, "path": "/tests/unit/task/contexts/cinder/test_volumes.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nimport ddt\n\nfrom rally.task import context\n\nfrom rally_openstack.task.contexts.cinder import volumes\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts\"\nSERVICE = \"rally_openstack.common.services.storage\"\n\n\[email protected]\nclass VolumeGeneratorTestCase(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n return tenants\n\n def test_init(self):\n self.context.update({\n \"config\": {\n \"volumes\": {\n \"size\": 1,\n \"volumes_per_tenant\": 5,\n }\n }\n })\n\n inst = volumes.VolumeGenerator(self.context)\n self.assertEqual(inst.config, self.context[\"config\"][\"volumes\"])\n\n @ddt.data({\"config\": {\"size\": 1, \"volumes_per_tenant\": 5}},\n {\"config\": {\"size\": 1, \"type\": None, \"volumes_per_tenant\": 5}},\n {\"config\": {\"size\": 1, \"type\": -1, \"volumes_per_tenant\": 5},\n \"valid\": False})\n @ddt.unpack\n @mock.patch(\"%s.block.BlockStorage\" % SERVICE)\n def test_setup(self, mock_block_storage, config, valid=True):\n results = context.Context.validate(\"volumes\", None, None, config)\n if valid:\n self.assertEqual([], results)\n else:\n self.assertEqual(1, len(results))\n\n from rally_openstack.common.services.storage import block\n created_volume = block.Volume(id=\"uuid\", size=config[\"size\"],\n name=\"vol\", status=\"avaiable\")\n\n mock_service = mock_block_storage.return_value\n mock_service.create_volume.return_value = created_volume\n users_per_tenant = 5\n volumes_per_tenant = config.get(\"volumes_per_tenant\", 5)\n tenants = self._gen_tenants(2)\n users = []\n for id_ in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"volumes\": config\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n new_context = copy.deepcopy(self.context)\n for id_ in tenants.keys():\n new_context[\"tenants\"][id_].setdefault(\"volumes\", [])\n for i in range(volumes_per_tenant):\n new_context[\"tenants\"][id_][\"volumes\"].append(\n mock_service.create_volume.return_value._as_dict())\n\n volumes_ctx = volumes.VolumeGenerator(self.context)\n volumes_ctx.setup()\n self.assertEqual(new_context, self.context)\n\n @mock.patch(\"%s.cinder.volumes.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup):\n tenants_count = 2\n users_per_tenant = 5\n volumes_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for id_ in tenants.keys():\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": id_,\n \"credential\": \"credential\"})\n tenants[id_].setdefault(\"volumes\", [])\n for j in range(volumes_per_tenant):\n tenants[id_][\"volumes\"].append({\"id\": \"uuid\"})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": 2,\n \"users_per_tenant\": 5,\n \"concurrent\": 10,\n },\n \"volumes\": {\n \"size\": 1,\n \"volumes_per_tenant\": 5,\n }\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n volumes_ctx = volumes.VolumeGenerator(self.context)\n volumes_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"cinder.volumes\"], users=self.context[\"users\"],\n superclass=volumes_ctx.__class__,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.6744997501373291, "alphanum_fraction": 0.6796184182167053, "avg_line_length": 40.32692337036133, "blob_id": "c73d249156807e52bcaa2851ea0388f385e8c060", "content_id": "b420ec8e7606d355232974903c22c071d344ecd3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4298, "license_type": "permissive", "max_line_length": 78, "num_lines": 104, "path": "/tests/unit/task/scenarios/nova/test_keypairs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.nova import keypairs\nfrom tests.unit import fakes\nfrom tests.unit import test\n\n\nclass NovaKeypairTestCase(test.ScenarioTestCase):\n\n def test_create_and_list_keypairs(self):\n\n fake_nova_client = fakes.FakeNovaClient()\n fake_nova_client.keypairs.create(\"keypair\")\n fake_keypair = list(fake_nova_client.keypairs.cache.values())[0]\n\n scenario = keypairs.CreateAndListKeypairs(self.context)\n scenario._create_keypair = mock.MagicMock()\n scenario._list_keypairs = mock.MagicMock()\n\n scenario._list_keypairs.return_value = [fake_keypair] * 3\n # Positive case:\n scenario._create_keypair.return_value = fake_keypair.id\n scenario.run(fakearg=\"fakearg\")\n\n scenario._create_keypair.assert_called_once_with(fakearg=\"fakearg\")\n scenario._list_keypairs.assert_called_once_with()\n\n # Negative case1: keypair isn't created\n scenario._create_keypair.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, fakearg=\"fakearg\")\n scenario._create_keypair.assert_called_with(fakearg=\"fakearg\")\n\n # Negative case2: new keypair not in the list of keypairs\n scenario._create_keypair.return_value = \"fake_keypair\"\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, fakearg=\"fakearg\")\n scenario._create_keypair.assert_called_with(fakearg=\"fakearg\")\n scenario._list_keypairs.assert_called_with()\n\n def test_create_and_get_keypair(self):\n scenario = keypairs.CreateAndGetKeypair(self.context)\n fake_keypair = mock.MagicMock()\n scenario._create_keypair = mock.MagicMock()\n scenario._get_keypair = mock.MagicMock()\n\n scenario._create_keypair.return_value = fake_keypair\n scenario.run(fakearg=\"fakearg\")\n\n scenario._create_keypair.assert_called_once_with(fakearg=\"fakearg\")\n scenario._get_keypair.assert_called_once_with(fake_keypair)\n\n def test_create_and_delete_keypair(self):\n scenario = keypairs.CreateAndDeleteKeypair(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._create_keypair = mock.MagicMock(return_value=\"foo_keypair\")\n scenario._delete_keypair = mock.MagicMock()\n\n scenario.run(fakearg=\"fakearg\")\n\n scenario._create_keypair.assert_called_once_with(fakearg=\"fakearg\")\n scenario._delete_keypair.assert_called_once_with(\"foo_keypair\")\n\n def test_boot_and_delete_server_with_keypair(self):\n scenario = keypairs.BootAndDeleteServerWithKeypair(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario._create_keypair = mock.MagicMock(return_value=\"foo_keypair\")\n scenario._boot_server = mock.MagicMock(return_value=\"foo_server\")\n scenario._delete_server = mock.MagicMock()\n scenario._delete_keypair = mock.MagicMock()\n\n fake_server_args = {\n \"foo\": 1,\n \"bar\": 2,\n }\n\n scenario.run(\"img\", 1, boot_server_kwargs=fake_server_args,\n fake_arg1=\"foo\", fake_arg2=\"bar\")\n\n scenario._create_keypair.assert_called_once_with(\n fake_arg1=\"foo\", fake_arg2=\"bar\")\n\n scenario._boot_server.assert_called_once_with(\n \"img\", 1, foo=1, bar=2, key_name=\"foo_keypair\")\n\n scenario._delete_server.assert_called_once_with(\"foo_server\")\n\n scenario._delete_keypair.assert_called_once_with(\"foo_keypair\")\n" }, { "alpha_fraction": 0.5851365923881531, "alphanum_fraction": 0.5889583230018616, "avg_line_length": 44.45305252075195, "blob_id": "80ac429a2758e012ed34c572a9c850dacfa55795", "content_id": "0618b17a6b45a4f09fcf1b1f872fc2ff31368c3f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19363, "license_type": "permissive", "max_line_length": 78, "num_lines": 426, "path": "/tests/unit/common/services/storage/test_cinder_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\n\nfrom rally_openstack.common.services.storage import cinder_v2\nfrom tests.unit import fakes\nfrom tests.unit import test\n\nBASE_PATH = \"rally_openstack.common.services.storage\"\nCONF = cfg.CONF\n\n\nclass CinderV2ServiceTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(CinderV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.cinder = self.clients.cinder.return_value\n self.name_generator = mock.MagicMock()\n self.service = cinder_v2.CinderV2Service(\n self.clients, name_generator=self.name_generator)\n\n def atomic_actions(self):\n return self.service._atomic_actions\n\n def test_create_volume(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"volume\")\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n\n return_volume = self.service.create_volume(1)\n\n kwargs = {\"name\": \"volume\",\n \"description\": None,\n \"consistencygroup_id\": None,\n \"snapshot_id\": None,\n \"source_volid\": None,\n \"volume_type\": None,\n \"availability_zone\": None,\n \"metadata\": None,\n \"imageRef\": None,\n \"scheduler_hints\": None}\n self.cinder.volumes.create.assert_called_once_with(1, **kwargs)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.volumes.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_volume)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_volume\")\n\n @mock.patch(\"%s.cinder_v2.random\" % BASE_PATH)\n def test_create_volume_with_size_range(self, mock_random):\n mock_random.randint.return_value = 3\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n\n return_volume = self.service.create_volume(\n size={\"min\": 1, \"max\": 5}, name=\"volume\")\n\n kwargs = {\"name\": \"volume\",\n \"description\": None,\n \"consistencygroup_id\": None,\n \"snapshot_id\": None,\n \"source_volid\": None,\n \"volume_type\": None,\n \"availability_zone\": None,\n \"metadata\": None,\n \"imageRef\": None,\n \"scheduler_hints\": None}\n self.cinder.volumes.create.assert_called_once_with(\n 3, **kwargs)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.volumes.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_volume)\n\n def test_update_volume(self):\n return_value = {\"volume\": fakes.FakeVolume()}\n self.cinder.volumes.update.return_value = return_value\n\n self.assertEqual(return_value[\"volume\"],\n self.service.update_volume(1))\n self.cinder.volumes.update.assert_called_once_with(1)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.update_volume\")\n\n def test_update_volume_with_name_description(self):\n return_value = {\"volume\": fakes.FakeVolume()}\n self.cinder.volumes.update.return_value = return_value\n\n return_volume = self.service.update_volume(\n 1, name=\"volume\", description=\"fake\")\n\n self.cinder.volumes.update.assert_called_once_with(\n 1, name=\"volume\", description=\"fake\")\n self.assertEqual(return_value[\"volume\"], return_volume)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.update_volume\")\n\n def test_list_volumes(self):\n self.assertEqual(self.cinder.volumes.list.return_value,\n self.service.list_volumes(\n detailed=False, search_opts=None, limit=1,\n marker=None, sort=None\n ))\n self.cinder.volumes.list.assert_called_once_with(\n detailed=False, search_opts=None, limit=1,\n marker=None, sort=None\n )\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.list_volumes\")\n\n def test_list_types(self):\n self.assertEqual(self.cinder.volume_types.list.return_value,\n self.service.list_types(search_opts=None,\n is_public=None))\n\n self.cinder.volume_types.list.assert_called_once_with(\n search_opts=None, is_public=None)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.list_types\")\n\n def test_create_snapshot(self):\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"snapshot\")\n\n return_snapshot = self.service.create_snapshot(1)\n\n self.cinder.volume_snapshots.create.assert_called_once_with(\n 1, name=\"snapshot\", description=None, force=False,\n metadata=None)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.volume_snapshots.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_snapshot)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_snapshot\")\n\n def test_create_snapshot_with_name(self):\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n\n return_snapshot = self.service.create_snapshot(1, name=\"snapshot\")\n\n self.cinder.volume_snapshots.create.assert_called_once_with(\n 1, name=\"snapshot\", description=None, force=False,\n metadata=None)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.volume_snapshots.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_snapshot)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_snapshot\")\n\n def test_create_backup(self):\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"backup\")\n\n return_backup = self.service.create_backup(1)\n\n self.cinder.backups.create.assert_called_once_with(\n 1, name=\"backup\", description=None, container=None,\n incremental=False, force=False, snapshot_id=None)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.backups.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_backup)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_backup\")\n\n def test_create_backup_with_name(self):\n self.service._wait_available_volume = mock.MagicMock()\n self.service._wait_available_volume.return_value = fakes.FakeVolume()\n\n return_backup = self.service.create_backup(1, name=\"backup\")\n\n self.cinder.backups.create.assert_called_once_with(\n 1, name=\"backup\", description=None, container=None,\n incremental=False, force=False, snapshot_id=None)\n self.service._wait_available_volume.assert_called_once_with(\n self.cinder.backups.create.return_value)\n self.assertEqual(self.service._wait_available_volume.return_value,\n return_backup)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_backup\")\n\n def test_create_volume_type(self):\n self.service.generate_random_name = mock.MagicMock(\n return_value=\"volume_type\")\n return_type = self.service.create_volume_type(name=None,\n description=None,\n is_public=True)\n\n self.cinder.volume_types.create.assert_called_once_with(\n name=\"volume_type\", description=None, is_public=True)\n self.assertEqual(self.cinder.volume_types.create.return_value,\n return_type)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_volume_type\")\n\n def test_create_volume_type_with_name_(self):\n return_type = self.service.create_volume_type(name=\"type\",\n description=None,\n is_public=True)\n\n self.cinder.volume_types.create.assert_called_once_with(\n name=\"type\", description=None, is_public=True)\n self.assertEqual(self.cinder.volume_types.create.return_value,\n return_type)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.create_volume_type\")\n\n def test_update_volume_type(self):\n volume_type = mock.Mock()\n name = \"random_name\"\n self.service.generate_random_name = mock.MagicMock(\n return_value=name)\n description = \"test update\"\n\n result = self.service.update_volume_type(\n volume_type,\n description=description,\n name=self.service.generate_random_name(),\n is_public=None\n )\n self.assertEqual(\n self.cinder.volume_types.update.return_value,\n result)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.update_volume_type\")\n\n def test_add_type_access(self):\n volume_type = mock.Mock()\n project = mock.Mock()\n type_access = self.service.add_type_access(volume_type,\n project=project)\n add_project_access = self.cinder.volume_type_access.add_project_access\n add_project_access.assert_called_once_with(\n volume_type, project)\n self.assertEqual(add_project_access.return_value,\n type_access)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.add_type_access\")\n\n def test_list_type_access(self):\n volume_type = mock.Mock()\n type_access = self.service.list_type_access(volume_type)\n self.cinder.volume_type_access.list.assert_called_once_with(\n volume_type)\n self.assertEqual(self.cinder.volume_type_access.list.return_value,\n type_access)\n self._test_atomic_action_timer(self.atomic_actions(),\n \"cinder_v2.list_type_access\")\n\n\nclass UnifiedCinderV2ServiceTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedCinderV2ServiceTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.service = cinder_v2.UnifiedCinderV2Service(self.clients)\n self.service._impl = mock.MagicMock()\n\n def test__unify_volume(self):\n class SomeVolume(object):\n id = 1\n name = \"volume\"\n size = 1\n status = \"st\"\n volume = self.service._unify_volume(SomeVolume())\n self.assertEqual(1, volume.id)\n self.assertEqual(\"volume\", volume.name)\n self.assertEqual(1, volume.size)\n self.assertEqual(\"st\", volume.status)\n\n def test__unify_volume_with_dict(self):\n some_volume = {\"name\": \"volume\", \"id\": 1, \"size\": 1, \"status\": \"st\"}\n volume = self.service._unify_volume(some_volume)\n self.assertEqual(1, volume.id)\n self.assertEqual(\"volume\", volume.name)\n self.assertEqual(1, volume.size)\n self.assertEqual(\"st\", volume.status)\n\n def test__unify_snapshot(self):\n class SomeSnapshot(object):\n id = 1\n name = \"snapshot\"\n volume_id = \"volume\"\n status = \"st\"\n snapshot = self.service._unify_snapshot(SomeSnapshot())\n self.assertEqual(1, snapshot.id)\n self.assertEqual(\"snapshot\", snapshot.name)\n self.assertEqual(\"volume\", snapshot.volume_id)\n self.assertEqual(\"st\", snapshot.status)\n\n def test_create_volume(self):\n self.service._unify_volume = mock.MagicMock()\n self.assertEqual(self.service._unify_volume.return_value,\n self.service.create_volume(1))\n self.service._impl.create_volume.assert_called_once_with(\n 1, availability_zone=None, consistencygroup_id=None,\n description=None, imageRef=None,\n metadata=None, name=None,\n scheduler_hints=None, snapshot_id=None,\n source_volid=None, volume_type=None)\n self.service._unify_volume.assert_called_once_with(\n self.service._impl.create_volume.return_value)\n\n def test_list_volumes(self):\n self.service._unify_volume = mock.MagicMock()\n self.service._impl.list_volumes.return_value = [\"vol\"]\n self.assertEqual([self.service._unify_volume.return_value],\n self.service.list_volumes(detailed=True))\n self.service._impl.list_volumes.assert_called_once_with(\n detailed=True, limit=None, marker=None, search_opts=None,\n sort=None)\n self.service._unify_volume.assert_called_once_with(\"vol\")\n\n def test_get_volume(self):\n self.service._unify_volume = mock.MagicMock()\n self.assertEqual(self.service._unify_volume.return_value,\n self.service.get_volume(1))\n self.service._impl.get_volume.assert_called_once_with(1)\n self.service._unify_volume.assert_called_once_with(\n self.service._impl.get_volume.return_value)\n\n def test_extend_volume(self):\n self.service._unify_volume = mock.MagicMock()\n self.assertEqual(self.service._unify_volume.return_value,\n self.service.extend_volume(\"volume\", new_size=1))\n self.service._impl.extend_volume.assert_called_once_with(\"volume\",\n new_size=1)\n self.service._unify_volume.assert_called_once_with(\n self.service._impl.extend_volume.return_value)\n\n def test_update_volume(self):\n self.service._unify_volume = mock.MagicMock()\n self.assertEqual(\n self.service._unify_volume.return_value,\n self.service.update_volume(1, name=\"volume\",\n description=\"fake\"))\n self.service._impl.update_volume.assert_called_once_with(\n 1, description=\"fake\", name=\"volume\")\n self.service._unify_volume.assert_called_once_with(\n self.service._impl.update_volume.return_value)\n\n def test_list_types(self):\n self.assertEqual(\n self.service._impl.list_types.return_value,\n self.service.list_types(search_opts=None, is_public=True))\n self.service._impl.list_types.assert_called_once_with(\n search_opts=None, is_public=True)\n\n def test_create_snapshot(self):\n self.service._unify_snapshot = mock.MagicMock()\n self.assertEqual(\n self.service._unify_snapshot.return_value,\n self.service.create_snapshot(1, force=False,\n name=None,\n description=None,\n metadata=None))\n self.service._impl.create_snapshot.assert_called_once_with(\n 1, force=False, name=None, description=None, metadata=None)\n self.service._unify_snapshot.assert_called_once_with(\n self.service._impl.create_snapshot.return_value)\n\n def test_list_snapshots(self):\n self.service._unify_snapshot = mock.MagicMock()\n self.service._impl.list_snapshots.return_value = [\"snapshot\"]\n self.assertEqual([self.service._unify_snapshot.return_value],\n self.service.list_snapshots(detailed=True))\n self.service._impl.list_snapshots.assert_called_once_with(\n detailed=True)\n self.service._unify_snapshot.assert_called_once_with(\n \"snapshot\")\n\n def test_create_backup(self):\n self.service._unify_backup = mock.MagicMock()\n self.assertEqual(\n self.service._unify_backup.return_value,\n self.service.create_backup(1, container=None,\n name=None,\n description=None,\n incremental=False,\n force=False,\n snapshot_id=None))\n self.service._impl.create_backup.assert_called_once_with(\n 1, container=None, name=None, description=None,\n incremental=False, force=False, snapshot_id=None)\n self.service._unify_backup(\n self.service._impl.create_backup.return_value)\n\n def test_create_volume_type(self):\n self.assertEqual(\n self.service._impl.create_volume_type.return_value,\n self.service.create_volume_type(name=\"type\",\n description=\"desp\",\n is_public=True))\n self.service._impl.create_volume_type.assert_called_once_with(\n name=\"type\", description=\"desp\", is_public=True)\n\n def test_restore_backup(self):\n self.service._unify_volume = mock.MagicMock()\n self.assertEqual(self.service._unify_volume.return_value,\n self.service.restore_backup(1, volume_id=1))\n self.service._impl.restore_backup.assert_called_once_with(1,\n volume_id=1)\n self.service._unify_volume.assert_called_once_with(\n self.service._impl.restore_backup.return_value)\n" }, { "alpha_fraction": 0.4182509481906891, "alphanum_fraction": 0.4334600865840912, "avg_line_length": 27.178571701049805, "blob_id": "cb5554dddd008652634061d5dc0debc9a1b06219", "content_id": "c084c0f8092cb8b7302a019535648b049e55eb8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1578, "license_type": "permissive", "max_line_length": 109, "num_lines": 56, "path": "/rally-jobs/extra/hook_example_script.sh", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nrand_int() {\n od -An -tu -N1 /dev/urandom | tr -d ' '\n}\n\ncat << EOF\n{\n \"additive\": [\n {\n \"title\": \"Statistics table from Hook\",\n \"chart_plugin\": \"StatsTable\",\n \"data\": [\n [\"Alice\", $(rand_int)],\n [\"Bob\", $(rand_int)],\n [\"Carol\", $(rand_int)]]\n },\n {\n \"title\": \"StackedArea chart from Hook\",\n \"description\": \"This is generated by ${0}\",\n \"chart_plugin\": \"StackedArea\",\n \"data\": [\n [\"Alpha\", $(rand_int)],\n [\"Beta\", $(rand_int)],\n [\"Gamma\", $(rand_int)]]\n }\n ],\n \"complete\": [\n {\n \"title\": \"Lines chart from Hook\",\n \"description\": \"Random data generated by ${0}\",\n \"chart_plugin\": \"Lines\",\n \"axis_label\": \"X-axis label\",\n \"label\": \"Y-axis label\",\n \"data\": [\n [\"Foo\", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],\n [\"Bar\", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],\n [\"Spam\", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]],\n [\"Quiz\", [[1, $(rand_int)], [2, $(rand_int)], [3, $(rand_int)], [4, $(rand_int)], [5, $(rand_int)]]]\n ]\n },\n {\n \"title\": \"Pie chart from Hook\",\n \"description\": \"Yet another data generated by ${0}\",\n \"chart_plugin\": \"Pie\",\n \"data\": [\n [\"Cat\", $(rand_int)],\n [\"Tiger\", $(rand_int)],\n [\"Jaguar\", $(rand_int)],\n [\"Panther\", $(rand_int)],\n [\"Lynx\", $(rand_int)]\n ]\n }\n ]\n}\nEOF\n" }, { "alpha_fraction": 0.7472256422042847, "alphanum_fraction": 0.7533908486366272, "avg_line_length": 44.05555725097656, "blob_id": "5fea3640c1f49136a7f5b2af040681d2b91f2849", "content_id": "e77a6c624b817171dce48c5f36d198f0b32d22ce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 811, "license_type": "permissive", "max_line_length": 133, "num_lines": 18, "path": "/Dockerfile", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "FROM xrally/xrally:3.4.0\n\n# \"rally\" user (which is selected by-default) is owner of \"/rally\" directory,\n# so there is no need to call chown or switch the user\nCOPY . /rally/xrally_openstack\nWORKDIR /rally/xrally_openstack\n\n# to install package system-wide, we need to temporary switch to root user\nUSER root\n# ensure that we have latest ca-certs\nRUN apt update && apt install --reinstall ca-certificates --yes\n# ensure that we have all system dependencies installed\nRUN pip3 install --no-cache-dir -U bindep && DEBIAN_FRONTEND=noninteractive apt install --yes $(bindep -b | tr '\\n' ' ') && apt clean\n# disabling cache since we do not expect to install other packages\nRUN pip3 install . --no-cache-dir --constraint ./upper-constraints.txt\n\n# switch back to rally user for avoid permission conflicts\nUSER rally\n" }, { "alpha_fraction": 0.5971208810806274, "alphanum_fraction": 0.6038256883621216, "avg_line_length": 39.56800079345703, "blob_id": "2342cebb76403a1f9d6f5796b04b48bf8d270f0c", "content_id": "ff9e3853a55ecc7280a82d40936397f47aba76c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5071, "license_type": "permissive", "max_line_length": 79, "num_lines": 125, "path": "/tests/unit/task/scenarios/magnum/test_clusters.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally import exceptions\nfrom rally_openstack.task.scenarios.magnum import clusters\nfrom tests.unit import test\n\n\[email protected]\nclass MagnumClustersTestCase(test.ScenarioTestCase):\n\n @staticmethod\n def _get_context():\n context = test.get_test_context()\n context.update({\n \"tenant\": {\n \"id\": \"rally_tenant_id\"\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"config\": {}\n })\n return context\n\n @ddt.data(\n {\"kwargs\": {}},\n {\"kwargs\": {\"fakearg\": \"f\"}})\n def test_list_clusters(self, kwargs):\n scenario = clusters.ListClusters()\n scenario._list_clusters = mock.Mock()\n\n scenario.run(**kwargs)\n\n scenario._list_clusters.assert_called_once_with(**kwargs)\n\n def test_create_cluster_with_existing_ct_and_list_clusters(self):\n context = self._get_context()\n scenario = clusters.CreateAndListClusters(context)\n kwargs = {\"fakearg\": \"f\"}\n fake_cluster1 = mock.Mock(uuid=\"a\")\n fake_cluster2 = mock.Mock(uuid=\"b\")\n fake_cluster3 = mock.Mock(uuid=\"c\")\n scenario._create_cluster = mock.Mock(return_value=fake_cluster1)\n scenario._list_clusters = mock.Mock(return_value=[fake_cluster1,\n fake_cluster2,\n fake_cluster3])\n\n run_kwargs = kwargs.copy()\n run_kwargs[\"cluster_template_uuid\"] = \"existing_cluster_template_uuid\"\n # Positive case\n scenario.run(2, **run_kwargs)\n\n scenario._create_cluster.assert_called_once_with(\n \"existing_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n scenario._list_clusters.assert_called_once_with(**kwargs)\n\n # Negative case1: cluster isn't created\n scenario._create_cluster.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, 2, **run_kwargs)\n scenario._create_cluster.assert_called_with(\n \"existing_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n\n # Negative case2: created cluster not in the list of available clusters\n scenario._create_cluster.return_value = mock.Mock(uuid=\"foo\")\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, 2, **run_kwargs)\n scenario._create_cluster.assert_called_with(\n \"existing_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n scenario._list_clusters.assert_called_with(**kwargs)\n\n def test_create_and_list_clusters(self):\n context = self._get_context()\n context.update({\n \"tenant\": {\n \"cluster_template\": \"rally_cluster_template_uuid\"\n }\n })\n\n scenario = clusters.CreateAndListClusters(context)\n fake_cluster1 = mock.Mock(uuid=\"a\")\n fake_cluster2 = mock.Mock(uuid=\"b\")\n fake_cluster3 = mock.Mock(uuid=\"c\")\n kwargs = {\"fakearg\": \"f\"}\n scenario._create_cluster = mock.Mock(return_value=fake_cluster1)\n scenario._list_clusters = mock.Mock(return_value=[fake_cluster1,\n fake_cluster2,\n fake_cluster3])\n\n # Positive case\n scenario.run(2, **kwargs)\n\n scenario._create_cluster.assert_called_once_with(\n \"rally_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n scenario._list_clusters.assert_called_once_with(**kwargs)\n\n # Negative case1: cluster isn't created\n scenario._create_cluster.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, 2, **kwargs)\n scenario._create_cluster.assert_called_with(\n \"rally_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n\n # Negative case2: created cluster not in the list of available clusters\n scenario._create_cluster.return_value = mock.Mock(uuid=\"foo\")\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, 2, **kwargs)\n scenario._create_cluster.assert_called_with(\n \"rally_cluster_template_uuid\", 2, keypair=mock.ANY, **kwargs)\n scenario._list_clusters.assert_called_with(**kwargs)\n" }, { "alpha_fraction": 0.4581982493400574, "alphanum_fraction": 0.4694302976131439, "avg_line_length": 37.38222122192383, "blob_id": "5253cdd2e1677a82294424dffd895f53e162d6f9", "content_id": "efaa8900e5f24447cb4b50b3b241d70f71116cb2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8636, "license_type": "permissive", "max_line_length": 78, "num_lines": 225, "path": "/tests/unit/task/contexts/swift/test_objects.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Cisco Systems, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.swift import objects\nfrom tests.unit import test\n\n\nclass SwiftObjectGeneratorTestCase(test.TestCase):\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup(self, mock_clients):\n containers_per_tenant = 2\n objects_per_container = 7\n context = test.get_test_context()\n context.update({\n \"config\": {\n \"swift_objects\": {\n \"containers_per_tenant\": containers_per_tenant,\n \"objects_per_container\": objects_per_container,\n \"object_size\": 1024,\n \"resource_management_workers\": 10\n }\n },\n \"tenants\": {\n \"t1\": {\"name\": \"t1_name\"},\n \"t2\": {\"name\": \"t2_name\"}\n },\n \"users\": [\n {\n \"id\": \"u1\",\n \"tenant_id\": \"t1\",\n \"credential\": mock.MagicMock()\n },\n {\n \"id\": \"u2\",\n \"tenant_id\": \"t2\",\n \"credential\": mock.MagicMock()\n }\n ]\n })\n\n objects_ctx = objects.SwiftObjectGenerator(context)\n objects_ctx.setup()\n\n for tenant_id in context[\"tenants\"]:\n containers = context[\"tenants\"][tenant_id][\"containers\"]\n self.assertEqual(containers_per_tenant, len(containers))\n for container in containers:\n self.assertEqual(objects_per_container,\n len(container[\"objects\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n @mock.patch(\"rally_openstack.task.contexts.swift.utils.\"\n \"swift_utils.SwiftScenario\")\n def test_cleanup(self, mock_swift_scenario, mock_clients):\n context = test.get_test_context()\n context.update({\n \"config\": {\n \"swift_objects\": {\n \"resource_management_workers\": 1\n }\n },\n \"tenants\": {\n \"t1\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\"id\": \"u1\", \"tenant_id\": \"t1\",\n \"credential\": \"c1\"},\n \"container\": \"c1\",\n \"objects\": [\"o1\", \"o2\", \"o3\"]}\n ]\n },\n \"t2\": {\n \"name\": \"t2_name\",\n \"containers\": [\n {\"user\": {\"id\": \"u2\", \"tenant_id\": \"t2\",\n \"credential\": \"c2\"},\n \"container\": \"c2\",\n \"objects\": [\"o4\", \"o5\", \"o6\"]}\n ]\n }\n }\n })\n\n objects_ctx = objects.SwiftObjectGenerator(context)\n objects_ctx.cleanup()\n\n expected_containers = [\"c1\", \"c2\"]\n mock_swift_scenario.return_value._delete_container.assert_has_calls(\n [mock.call(con) for con in expected_containers], any_order=True)\n\n expected_objects = [(\"c1\", \"o1\"), (\"c1\", \"o2\"), (\"c1\", \"o3\"),\n (\"c2\", \"o4\"), (\"c2\", \"o5\"), (\"c2\", \"o6\")]\n mock_swift_scenario.return_value._delete_object.assert_has_calls(\n [mock.call(con, obj) for con, obj in expected_objects],\n any_order=True)\n\n for tenant_id in context[\"tenants\"]:\n self.assertEqual(0,\n len(context[\"tenants\"][tenant_id][\"containers\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_failure_clients_put_container(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"config\": {\n \"swift_objects\": {\n \"containers_per_tenant\": 2,\n \"object_size\": 10,\n \"resource_management_workers\": 5\n }\n },\n \"tenants\": {\n \"t1\": {\"name\": \"t1_name\"},\n \"t2\": {\"name\": \"t2_name\"}\n },\n \"users\": [\n {\n \"id\": \"u1\",\n \"tenant_id\": \"t1\",\n \"credential\": mock.MagicMock()\n },\n {\n \"id\": \"u2\",\n \"tenant_id\": \"t2\",\n \"credential\": mock.MagicMock()\n }\n ]\n })\n mock_swift = mock_clients.return_value.swift.return_value\n mock_swift.put_container.side_effect = [Exception, True,\n Exception, Exception]\n objects_ctx = objects.SwiftObjectGenerator(context)\n self.assertRaisesRegex(exceptions.ContextSetupFailure,\n \"containers, expected 4 but got 1\",\n objects_ctx.setup)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup_failure_clients_put_object(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"t1\": {\"name\": \"t1_name\"},\n \"t2\": {\"name\": \"t2_name\"}\n },\n \"users\": [\n {\n \"id\": \"u1\",\n \"tenant_id\": \"t1\",\n \"credential\": mock.MagicMock()\n },\n {\n \"id\": \"u2\",\n \"tenant_id\": \"t2\",\n \"credential\": mock.MagicMock()\n }\n ]\n })\n mock_swift = mock_clients.return_value.swift.return_value\n mock_swift.put_object.side_effect = [Exception, True]\n objects_ctx = objects.SwiftObjectGenerator(context)\n self.assertRaisesRegex(exceptions.ContextSetupFailure,\n \"objects, expected 2 but got 1\",\n objects_ctx.setup)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_cleanup_failure_clients_delete_container(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"t1\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\"id\": \"u1\", \"tenant_id\": \"t1\",\n \"credential\": mock.MagicMock()},\n \"container\": \"coooon\",\n \"objects\": []}] * 3\n }\n }\n })\n mock_swift = mock_clients.return_value.swift.return_value\n mock_swift.delete_container.side_effect = [True, True, Exception]\n objects_ctx = objects.SwiftObjectGenerator(context)\n objects_ctx.cleanup()\n self.assertEqual(1, len(context[\"tenants\"][\"t1\"][\"containers\"]))\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_cleanup_failure_clients_delete_object(self, mock_clients):\n context = test.get_test_context()\n context.update({\n \"tenants\": {\n \"t1\": {\n \"name\": \"t1_name\",\n \"containers\": [\n {\"user\": {\"id\": \"u1\", \"tenant_id\": \"t1\",\n \"credential\": mock.MagicMock()},\n \"container\": \"c1\",\n \"objects\": [\"oooo\"] * 3}\n ]\n }\n }\n })\n mock_swift = mock_clients.return_value.swift.return_value\n mock_swift.delete_object.side_effect = [True, Exception, True]\n objects_ctx = objects.SwiftObjectGenerator(context)\n objects_ctx._delete_containers = mock.MagicMock()\n objects_ctx.cleanup()\n self.assertEqual(\n 1, sum([len(container[\"objects\"])\n for container in context[\"tenants\"][\"t1\"][\"containers\"]]))\n" }, { "alpha_fraction": 0.6062701344490051, "alphanum_fraction": 0.6110166907310486, "avg_line_length": 36.796234130859375, "blob_id": "82778b92eea489f9c62af751763ab0c3fb48eb0d", "content_id": "71bb4d0da85474839431506aebd1be1725e09320", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34130, "license_type": "permissive", "max_line_length": 113, "num_lines": 903, "path": "/rally_openstack/common/osclients.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nimport os\nfrom urllib.parse import urlparse\nfrom urllib.parse import urlunparse\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common.plugin import plugin\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential as oscred\n\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass AuthenticationFailed(exceptions.AuthenticationFailed):\n error_code = 220\n\n msg_fmt = (\"Failed to authenticate to %(url)s for user '%(username)s'\"\n \" in project '%(project)s': %(message)s\")\n msg_fmt_2 = \"%(message)s\"\n\n def __init__(self, error, url, username, project):\n kwargs = {\n \"error\": error,\n \"url\": url,\n \"username\": username,\n \"project\": project\n }\n self._helpful_trace = False\n\n from keystoneauth1 import exceptions as ks_exc\n\n if isinstance(error, (ks_exc.ConnectionError,\n ks_exc.DiscoveryFailure)):\n # this type of errors is general for all users no need to include\n # username, project name. The original error message should be\n # self-sufficient\n self.msg_fmt = self.msg_fmt_2\n message = error.message\n if (message.startswith(\"Unable to establish connection to\")\n or isinstance(error, ks_exc.DiscoveryFailure)):\n if \"Max retries exceeded with url\" in message:\n if \"HTTPConnectionPool\" in message:\n splitter = \": HTTPConnectionPool\"\n else:\n splitter = \": HTTPSConnectionPool\"\n message = message.split(splitter, 1)[0]\n elif isinstance(error, ks_exc.Unauthorized):\n message = error.message.split(\" (HTTP 401)\", 1)[0]\n else:\n # something unexpected. include exception class as well.\n self._helpful_trace = True\n message = \"[%s] %s\" % (error.__class__.__name__, str(error))\n super(AuthenticationFailed, self).__init__(message=message, **kwargs)\n\n def is_trace_helpful(self):\n return self._helpful_trace\n\n\ndef configure(name, default_version=None, default_service_type=None,\n supported_versions=None):\n \"\"\"OpenStack client class wrapper.\n\n Each client class has to be wrapped by configure() wrapper. It\n sets essential configuration of client classes.\n\n :param name: Name of the client\n :param default_version: Default version for client\n :param default_service_type: Default service type of endpoint(If this\n variable is not specified, validation will assume that your client\n doesn't allow to specify service type.\n :param supported_versions: List of supported versions(If this variable is\n not specified, `OSClient.validate_version` method will raise an\n exception that client doesn't support setting any versions. If this\n logic is wrong for your client, you should override `validate_version`\n in client object)\n \"\"\"\n def wrapper(cls):\n cls = plugin.configure(name=name, platform=\"openstack\")(cls)\n cls._meta_set(\"default_version\", default_version)\n cls._meta_set(\"default_service_type\", default_service_type)\n cls._meta_set(\"supported_versions\", supported_versions or [])\n return cls\n\n return wrapper\n\n\[email protected]()\nclass OSClient(plugin.Plugin):\n \"\"\"Base class for OpenStack clients\"\"\"\n\n def __init__(self, credential, cache_obj=None):\n self.credential = credential\n if not isinstance(self.credential, oscred.OpenStackCredential):\n self.credential = oscred.OpenStackCredential(**self.credential)\n self.cache = cache_obj if cache_obj is not None else {}\n\n def choose_version(self, version=None):\n \"\"\"Return version string.\n\n Choose version between transmitted(preferable value if present),\n version from api_info(configured from a context) and default.\n \"\"\"\n # NOTE(andreykurilin): The result of choose is converted to string,\n # since most of clients contain map for versioned modules, where a key\n # is a string value of version. Example of map and its usage:\n #\n # from oslo_utils import importutils\n # ...\n # version_map = {\"1\": \"someclient.v1.client.Client\",\n # \"2\": \"someclient.v2.client.Client\"}\n #\n # def Client(version, *args, **kwargs):\n # cls = importutils.import_class(version_map[version])\n # return cls(*args, **kwargs)\n #\n # That is why type of version so important and we should ensure that\n # version is a string object.\n # For those clients which doesn't accept string value(for example\n # zaqarclient), this method should be overridden.\n version = (version\n or self.credential.api_info.get(self.get_name(), {}).get(\n \"version\") or self._meta_get(\"default_version\"))\n if version is not None:\n version = str(version)\n return version\n\n @classmethod\n def get_supported_versions(cls):\n return cls._meta_get(\"supported_versions\")\n\n @classmethod\n def validate_version(cls, version):\n supported_versions = cls.get_supported_versions()\n if supported_versions:\n if str(version) not in supported_versions:\n raise exceptions.ValidationError(\n \"'%(vers)s' is not supported. Should be one of \"\n \"'%(supported)s'\"\n % {\"vers\": version, \"supported\": supported_versions})\n else:\n raise exceptions.RallyException(\"Setting version is not supported\")\n try:\n float(version)\n except ValueError:\n raise exceptions.ValidationError(\n \"'%s' is invalid. Should be numeric value.\" % version\n ) from None\n\n def choose_service_type(self, service_type=None):\n \"\"\"Return service_type string.\n\n Choose service type between transmitted(preferable value if present),\n service type from api_info(configured from a context) and default.\n \"\"\"\n return (service_type\n or self.credential.api_info.get(self.get_name(), {}).get(\n \"service_type\") or self._meta_get(\"default_service_type\"))\n\n @classmethod\n def is_service_type_configurable(cls):\n \"\"\"Just checks that client supports setting service type.\"\"\"\n if cls._meta_get(\"default_service_type\") is None:\n raise exceptions.RallyException(\n \"Setting service type is not supported.\")\n\n @property\n def keystone(self):\n return OSClient.get(\"keystone\")(self.credential, self.cache)\n\n def _get_endpoint(self, service_type=None):\n kw = {\"service_type\": self.choose_service_type(service_type),\n \"region_name\": self.credential.region_name}\n if self.credential.endpoint_type:\n kw[\"interface\"] = self.credential.endpoint_type\n api_url = self.keystone.service_catalog.url_for(**kw)\n return api_url\n\n def _get_auth_info(self, user_key=\"username\",\n password_key=\"password\",\n auth_url_key=\"auth_url\",\n project_name_key=\"project_id\",\n domain_name_key=\"domain_name\",\n user_domain_name_key=\"user_domain_name\",\n project_domain_name_key=\"project_domain_name\",\n cacert_key=\"cacert\",\n endpoint_type=\"endpoint_type\",\n ):\n kw = {\n user_key: self.credential.username,\n password_key: self.credential.password,\n auth_url_key: self.credential.auth_url,\n cacert_key: self.credential.https_cacert,\n }\n if project_name_key:\n kw.update({project_name_key: self.credential.tenant_name})\n\n if \"v2.0\" not in self.credential.auth_url:\n kw.update({\n domain_name_key: self.credential.domain_name})\n kw.update({\n user_domain_name_key:\n self.credential.user_domain_name or \"Default\"})\n kw.update({\n project_domain_name_key:\n self.credential.project_domain_name or \"Default\"})\n if self.credential.endpoint_type:\n kw[endpoint_type] = self.credential.endpoint_type\n return kw\n\n @abc.abstractmethod\n def create_client(self, *args, **kwargs):\n \"\"\"Create new instance of client.\"\"\"\n\n def __call__(self, *args, **kwargs):\n \"\"\"Return initialized client instance.\"\"\"\n key = \"{0}{1}{2}\".format(self.get_name(),\n str(args) if args else \"\",\n str(kwargs) if kwargs else \"\")\n if key not in self.cache:\n self.cache[key] = self.create_client(*args, **kwargs)\n return self.cache[key]\n\n @classmethod\n def get(cls, name, **kwargs):\n # NOTE(boris-42): Remove this after we finish rename refactoring.\n kwargs.pop(\"platform\", None)\n kwargs.pop(\"namespace\", None)\n return super(OSClient, cls).get(name, platform=\"openstack\", **kwargs)\n\n\n@configure(\"keystone\", supported_versions=(\"2\", \"3\"))\nclass Keystone(OSClient):\n \"\"\"Wrapper for KeystoneClient which hides OpenStack auth details.\"\"\"\n\n @property\n def keystone(self):\n raise exceptions.RallyException(\n \"Method 'keystone' is restricted for keystoneclient. :)\")\n\n @property\n def service_catalog(self):\n return self.auth_ref.service_catalog\n\n @property\n def auth_ref(self):\n try:\n if \"keystone_auth_ref\" not in self.cache:\n sess, plugin = self.get_session()\n self.cache[\"keystone_auth_ref\"] = plugin.get_access(sess)\n except Exception as original_e:\n e = AuthenticationFailed(\n error=original_e,\n username=self.credential.username,\n project=self.credential.tenant_name,\n url=self.credential.auth_url\n )\n if logging.is_debug() and e.is_trace_helpful():\n LOG.exception(\"Unable to authenticate for user\"\n \" %(username)s in project\"\n \" %(tenant_name)s\" %\n {\"username\": self.credential.username,\n \"tenant_name\": self.credential.tenant_name})\n\n raise e from None\n return self.cache[\"keystone_auth_ref\"]\n\n def get_session(self, version=None):\n key = \"keystone_session_and_plugin_%s\" % version\n if key not in self.cache:\n from keystoneauth1 import discover\n from keystoneauth1 import identity\n from keystoneauth1 import session\n\n version = self.choose_version(version)\n auth_url = self.credential.auth_url\n if version is not None:\n auth_url = self._remove_url_version()\n\n password_args = {\n \"auth_url\": auth_url,\n \"username\": self.credential.username,\n \"password\": self.credential.password,\n \"tenant_name\": self.credential.tenant_name\n }\n\n if version is None:\n # NOTE(rvasilets): If version not specified than we discover\n # available version with the smallest number. To be able to\n # discover versions we need session\n temp_session = session.Session(\n verify=(self.credential.https_cacert\n or not self.credential.https_insecure),\n cert=self.credential.https_cert,\n timeout=CONF.openstack_client_http_timeout)\n version = str(discover.Discover(\n temp_session,\n password_args[\"auth_url\"]).version_data()[0][\"version\"][0])\n temp_session.session.close()\n\n if \"v2.0\" not in password_args[\"auth_url\"] and version != \"2\":\n password_args.update({\n \"user_domain_name\": self.credential.user_domain_name,\n \"domain_name\": self.credential.domain_name,\n \"project_domain_name\": self.credential.project_domain_name\n })\n identity_plugin = identity.Password(**password_args)\n sess = session.Session(\n auth=identity_plugin,\n verify=(self.credential.https_cacert\n or not self.credential.https_insecure),\n cert=self.credential.https_cert,\n timeout=CONF.openstack_client_http_timeout)\n self.cache[key] = (sess, identity_plugin)\n return self.cache[key]\n\n def _remove_url_version(self):\n \"\"\"Remove any version from the auth_url.\n\n The keystone Client code requires that auth_url be the root url\n if a version override is used.\n \"\"\"\n url = urlparse(self.credential.auth_url)\n path = url.path.rstrip(\"/\")\n if path.endswith(\"v2.0\") or path.endswith(\"v3\"):\n path = os.path.join(*os.path.split(path)[:-1])\n parts = (url.scheme, url.netloc, path, url.params, url.query,\n url.fragment)\n return urlunparse(parts)\n return self.credential.auth_url\n\n def create_client(self, version=None):\n \"\"\"Return a keystone client.\n\n :param version: Keystone API version, can be one of:\n (\"2\", \"3\")\n\n If this object was constructed with a version in the api_info\n then that will be used unless the version parameter is passed.\n \"\"\"\n import keystoneclient\n from keystoneclient import client\n\n # Use the version in the api_info if provided, otherwise fall\n # back to the passed version (which may be None, in which case\n # keystoneclient chooses).\n version = self.choose_version(version)\n\n sess, auth_plugin = self.get_session(version=version)\n\n kw = {\"version\": version, \"session\": sess,\n \"timeout\": CONF.openstack_client_http_timeout}\n # check for keystone version\n if auth_plugin._user_domain_name and self.credential.region_name:\n kw[\"region_name\"] = self.credential.region_name\n\n if keystoneclient.__version__[0] == \"1\":\n # NOTE(andreykurilin): let's leave this hack for envs which uses\n # old(<2.0.0) keystoneclient version. Upstream fix:\n # https://github.com/openstack/python-keystoneclient/commit/d9031c252848d89270a543b67109a46f9c505c86\n from keystoneauth1 import plugin\n kw[\"auth_url\"] = sess.get_endpoint(interface=plugin.AUTH_INTERFACE)\n if self.credential.endpoint_type:\n kw[\"interface\"] = self.credential.endpoint_type\n\n # NOTE(amyge):\n # In auth_ref(), plugin.get_access(sess) only returns a auth_ref object\n # and won't check the authentication access until it is actually being\n # called. To catch the authentication failure in auth_ref(), we will\n # have to call self.auth_ref.auth_token here to actually use auth_ref.\n self.auth_ref # noqa\n\n return client.Client(**kw)\n\n\n@configure(\"nova\", default_version=\"2\", default_service_type=\"compute\")\nclass Nova(OSClient):\n \"\"\"Wrapper for NovaClient which returns a authenticated native client.\"\"\"\n\n @classmethod\n def validate_version(cls, version):\n from novaclient import api_versions\n from novaclient import exceptions as nova_exc\n\n try:\n api_versions.get_api_version(version)\n except nova_exc.UnsupportedVersion:\n raise exceptions.RallyException(\n \"Version string '%s' is unsupported.\" % version) from None\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return nova client.\"\"\"\n from novaclient import client as nova\n\n client = nova.Client(\n session=self.keystone.get_session()[0],\n version=self.choose_version(version),\n endpoint_override=self._get_endpoint(service_type))\n return client\n\n\n@configure(\"neutron\", default_version=\"2.0\", default_service_type=\"network\",\n supported_versions=[\"2.0\"])\nclass Neutron(OSClient):\n \"\"\"Wrapper for NeutronClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return neutron client.\"\"\"\n from neutronclient.neutron import client as neutron\n\n kw_args = {}\n if self.credential.endpoint_type:\n kw_args[\"endpoint_type\"] = self.credential.endpoint_type\n\n client = neutron.Client(\n self.choose_version(version),\n session=self.keystone.get_session()[0],\n endpoint_override=self._get_endpoint(service_type),\n **kw_args)\n return client\n\n\n@configure(\"octavia\", default_version=\"2\",\n default_service_type=\"load-balancer\", supported_versions=[\"2\"])\nclass Octavia(OSClient):\n \"\"\"Wrapper for OctaviaClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return octavia client.\"\"\"\n from octaviaclient.api.v2 import octavia\n\n kw_args = {}\n if self.credential.endpoint_type:\n kw_args[\"endpoint_type\"] = self.credential.endpoint_type\n\n client = octavia.OctaviaAPI(\n endpoint=self._get_endpoint(service_type),\n session=self.keystone.get_session()[0],\n **kw_args)\n return client\n\n\n@configure(\"glance\", default_version=\"2\", default_service_type=\"image\",\n supported_versions=[\"1\", \"2\"])\nclass Glance(OSClient):\n \"\"\"Wrapper for GlanceClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return glance client.\"\"\"\n import glanceclient as glance\n\n session = self.keystone.get_session()[0]\n client = glance.Client(\n version=self.choose_version(version),\n endpoint_override=self._get_endpoint(service_type),\n session=session)\n return client\n\n\n@configure(\"heat\", default_version=\"1\", default_service_type=\"orchestration\",\n supported_versions=[\"1\"])\nclass Heat(OSClient):\n \"\"\"Wrapper for HeatClient which returns an authenticated native client.\"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return heat client.\"\"\"\n from heatclient import client as heat\n\n # ToDo: Remove explicit endpoint_type or interface initialization\n # when heatclient no longer uses it.\n kw_args = {}\n if self.credential.endpoint_type:\n kw_args[\"interface\"] = self.credential.endpoint_type\n\n client = heat.Client(\n self.choose_version(version),\n session=self.keystone.get_session()[0],\n endpoint_override=self._get_endpoint(service_type),\n **kw_args)\n return client\n\n\n@configure(\"cinder\", default_version=\"3\", default_service_type=\"block-storage\",\n supported_versions=[\"1\", \"2\", \"3\"])\nclass Cinder(OSClient):\n \"\"\"Wrapper for CinderClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return cinder client.\"\"\"\n from cinderclient import client as cinder\n\n client = cinder.Client(\n self.choose_version(version),\n session=self.keystone.get_session()[0],\n endpoint_override=self._get_endpoint(service_type))\n return client\n\n\n@configure(\"manila\", default_version=\"1\", default_service_type=\"share\")\nclass Manila(OSClient):\n \"\"\"Wrapper for ManilaClient which returns an authenticated native client.\n\n \"\"\"\n @classmethod\n def validate_version(cls, version):\n from manilaclient import api_versions\n from manilaclient import exceptions as manila_exc\n\n try:\n api_versions.get_api_version(version)\n except manila_exc.UnsupportedVersion:\n raise exceptions.RallyException(\n \"Version string '%s' is unsupported.\" % version) from None\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return manila client.\"\"\"\n from manilaclient import client as manila\n manila_client = manila.Client(\n self.choose_version(version),\n insecure=self.credential.https_insecure,\n session=self.keystone.get_session()[0],\n service_catalog_url=self._get_endpoint(service_type))\n return manila_client\n\n\n@configure(\"gnocchi\", default_service_type=\"metric\", default_version=\"1\",\n supported_versions=[\"1\"])\nclass Gnocchi(OSClient):\n \"\"\"Wrapper for GnocchiClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return gnocchi client.\"\"\"\n # NOTE(sumantmurke): gnocchiclient requires keystoneauth1 for\n # authenticating and creating a session.\n from gnocchiclient import client as gnocchi\n\n service_type = self.choose_service_type(service_type)\n sess = self.keystone.get_session()[0]\n gclient = gnocchi.Client(\n version=self.choose_version(version), session=sess,\n adapter_options={\"service_type\": service_type,\n \"interface\": self.credential.endpoint_type})\n return gclient\n\n\n@configure(\"ironic\", default_version=\"1\", default_service_type=\"baremetal\",\n supported_versions=[\"1\"])\nclass Ironic(OSClient):\n \"\"\"Wrapper for IronicClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return Ironic client.\"\"\"\n from ironicclient import client as ironic\n\n client = ironic.get_client(\n self.choose_version(version),\n session=self.keystone.get_session()[0],\n endpoint=self._get_endpoint(service_type))\n return client\n\n\n@configure(\"sahara\", default_version=\"1.1\", supported_versions=[\"1.0\", \"1.1\"],\n default_service_type=\"data-processing\")\nclass Sahara(OSClient):\n \"\"\"Wrapper for SaharaClient which returns an authenticated native client.\n\n \"\"\"\n\n # NOTE(andreykurilin): saharaclient supports \"1.0\" version and doesn't\n # support \"1\". `choose_version` and `validate_version` methods are written\n # as a hack to covert 1 -> 1.0, which can simplify setting saharaclient\n # for end-users.\n def choose_version(self, version=None):\n return float(super(Sahara, self).choose_version(version))\n\n @classmethod\n def validate_version(cls, version):\n super(Sahara, cls).validate_version(float(version))\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return Sahara client.\"\"\"\n from saharaclient import client as sahara\n\n client = sahara.Client(\n self.choose_version(version),\n session=self.keystone.get_session()[0],\n sahara_url=self._get_endpoint(service_type))\n\n return client\n\n\n@configure(\"zaqar\", default_version=\"1.1\", default_service_type=\"messaging\",\n supported_versions=[\"1\", \"1.1\"])\nclass Zaqar(OSClient):\n \"\"\"Wrapper for ZaqarClient which returns an authenticated native client.\n\n \"\"\"\n\n def choose_version(self, version=None):\n # zaqarclient accepts only int or float obj as version\n return float(super(Zaqar, self).choose_version(version))\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return Zaqar client.\"\"\"\n from zaqarclient.queues import client as zaqar\n client = zaqar.Client(url=self._get_endpoint(),\n version=self.choose_version(version),\n session=self.keystone.get_session()[0])\n return client\n\n\n@configure(\"murano\", default_version=\"1\",\n default_service_type=\"application-catalog\",\n supported_versions=[\"1\"])\nclass Murano(OSClient):\n \"\"\"Wrapper for MuranoClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return Murano client.\"\"\"\n from muranoclient import client as murano\n\n client = murano.Client(self.choose_version(version),\n endpoint=self._get_endpoint(service_type),\n token=self.keystone.auth_ref.auth_token)\n\n return client\n\n\n@configure(\"designate\", default_version=\"2\", default_service_type=\"dns\",\n supported_versions=[\"2\"])\nclass Designate(OSClient):\n \"\"\"Wrapper for DesignateClient which returns authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return designate client.\"\"\"\n from designateclient import client\n\n version = self.choose_version(version)\n\n api_url = self._get_endpoint(service_type)\n api_url += \"/v%s\" % version\n\n session = self.keystone.get_session()[0]\n return client.Client(version, session=session,\n endpoint_override=api_url)\n\n\n@configure(\"trove\", default_version=\"1.0\", supported_versions=[\"1.0\"],\n default_service_type=\"database\")\nclass Trove(OSClient):\n \"\"\"Wrapper for TroveClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Returns trove client.\"\"\"\n from troveclient import client as trove\n\n client = trove.Client(self.choose_version(version),\n session=self.keystone.get_session()[0],\n endpoint=self._get_endpoint(service_type))\n return client\n\n\n@configure(\"mistral\", default_service_type=\"workflowv2\")\nclass Mistral(OSClient):\n \"\"\"Wrapper for MistralClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, service_type=None):\n \"\"\"Return Mistral client.\"\"\"\n from mistralclient.api import client as mistral\n\n client = mistral.client(\n mistral_url=self._get_endpoint(service_type),\n service_type=self.choose_service_type(service_type),\n auth_token=self.keystone.auth_ref.auth_token)\n return client\n\n\n@configure(\"swift\", default_service_type=\"object-store\")\nclass Swift(OSClient):\n \"\"\"Wrapper for SwiftClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, service_type=None):\n \"\"\"Return swift client.\"\"\"\n from swiftclient import client as swift\n\n auth_token = self.keystone.auth_ref.auth_token\n client = swift.Connection(retries=1,\n preauthurl=self._get_endpoint(service_type),\n preauthtoken=auth_token,\n insecure=self.credential.https_insecure,\n cacert=self.credential.https_cacert,\n user=self.credential.username,\n tenant_name=self.credential.tenant_name,\n )\n return client\n\n\n@configure(\"monasca\", default_version=\"2_0\",\n default_service_type=\"monitoring\", supported_versions=[\"2_0\"])\nclass Monasca(OSClient):\n \"\"\"Wrapper for MonascaClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return monasca client.\"\"\"\n from monascaclient import client as monasca\n\n # Change this to use session once it's supported by monascaclient\n client = monasca.Client(\n self.choose_version(version),\n self._get_endpoint(service_type),\n token=self.keystone.auth_ref.auth_token,\n timeout=CONF.openstack_client_http_timeout,\n insecure=self.credential.https_insecure,\n **self._get_auth_info(project_name_key=\"tenant_name\"))\n return client\n\n\n@configure(\"senlin\", default_version=\"1\", default_service_type=\"clustering\",\n supported_versions=[\"1\"])\nclass Senlin(OSClient):\n \"\"\"Wrapper for SenlinClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return senlin client.\"\"\"\n from senlinclient import client as senlin\n\n return senlin.Client(\n self.choose_version(version),\n **self._get_auth_info(project_name_key=\"project_name\",\n cacert_key=\"cert\",\n endpoint_type=\"interface\"))\n\n\n@configure(\"magnum\", default_version=\"1\", supported_versions=[\"1\"],\n default_service_type=\"container-infra\",)\nclass Magnum(OSClient):\n \"\"\"Wrapper for MagnumClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return magnum client.\"\"\"\n from magnumclient import client as magnum\n\n api_url = self._get_endpoint(service_type)\n session = self.keystone.get_session()[0]\n\n return magnum.Client(\n session=session,\n interface=self.credential.endpoint_type,\n magnum_url=api_url)\n\n\n@configure(\"watcher\", default_version=\"1\", default_service_type=\"infra-optim\",\n supported_versions=[\"1\"])\nclass Watcher(OSClient):\n \"\"\"Wrapper for WatcherClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return watcher client.\"\"\"\n from watcherclient import client as watcher_client\n watcher_api_url = self._get_endpoint(\n self.choose_service_type(service_type))\n client = watcher_client.Client(\n self.choose_version(version),\n endpoint=watcher_api_url,\n session=self.keystone.get_session()[0])\n return client\n\n\n@configure(\"barbican\", default_version=\"1\", default_service_type=\"key-manager\")\nclass Barbican(OSClient):\n \"\"\"Wrapper for BarbicanClient which returns an authenticated native client.\n\n \"\"\"\n\n def create_client(self, version=None, service_type=None):\n \"\"\"Return Barbican client.\"\"\"\n from barbicanclient import client as barbican_client\n\n version = \"v%s\" % self.choose_version(version)\n\n client = barbican_client.Client(\n version=self.choose_version(version),\n session=self.keystone.get_session()[0])\n\n return client\n\n\nclass Clients(object):\n \"\"\"This class simplify and unify work with OpenStack python clients.\"\"\"\n\n def __init__(self, credential, cache=None):\n self.credential = credential\n self.cache = cache or {}\n\n def __getattr__(self, client_name):\n \"\"\"Lazy load of clients.\"\"\"\n return OSClient.get(client_name)(self.credential, self.cache)\n\n @classmethod\n def create_from_env(cls):\n from rally_openstack.common import credential\n from rally_openstack.environment.platforms import existing\n\n spec = existing.OpenStack.create_spec_from_sys_environ(os.environ)\n if not spec[\"available\"]:\n raise ValueError(spec[\"message\"]) from None\n\n creds = spec[\"spec\"]\n oscred = credential.OpenStackCredential(\n auth_url=creds[\"auth_url\"],\n username=creds[\"admin\"][\"username\"],\n password=creds[\"admin\"][\"password\"],\n tenant_name=creds[\"admin\"].get(\n \"tenant_name\", creds[\"admin\"].get(\"project_name\")),\n endpoint_type=creds[\"endpoint_type\"],\n user_domain_name=creds[\"admin\"].get(\"user_domain_name\"),\n project_domain_name=creds[\"admin\"].get(\"project_domain_name\"),\n region_name=creds[\"region_name\"],\n https_cacert=creds[\"https_cacert\"],\n https_insecure=creds[\"https_insecure\"])\n return cls(oscred)\n\n def clear(self):\n \"\"\"Remove all cached client handles.\"\"\"\n self.cache = {}\n\n def verified_keystone(self):\n \"\"\"Ensure keystone endpoints are valid and then authenticate\n\n :returns: Keystone Client\n \"\"\"\n # Ensure that user is admin\n if \"admin\" not in [role.lower() for role in\n self.keystone.auth_ref.role_names]:\n raise exceptions.InvalidAdminException(\n username=self.credential.username)\n return self.keystone()\n\n def services(self):\n \"\"\"Return available services names and types.\n\n :returns: dict, {\"service_type\": \"service_name\", ...}\n \"\"\"\n if \"services_data\" not in self.cache:\n services_data = {}\n available_services = self.keystone.service_catalog.get_endpoints()\n for stype in available_services.keys():\n if stype in consts.ServiceType:\n services_data[stype] = consts.ServiceType[stype]\n else:\n services_data[stype] = \"__unknown__\"\n self.cache[\"services_data\"] = services_data\n\n return self.cache[\"services_data\"]\n" }, { "alpha_fraction": 0.5585689544677734, "alphanum_fraction": 0.5606847405433655, "avg_line_length": 35.35664367675781, "blob_id": "b2186f3fcfd5aa71cf0e91460fd490eb4d827f51", "content_id": "2eab24f3670b8b9f05b5d7eb9f2727bdbcc5d5e3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5199, "license_type": "permissive", "max_line_length": 79, "num_lines": 143, "path": "/rally_openstack/task/ui/charts/osprofilerchart.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import opts\nfrom rally.common.plugin import plugin\nfrom rally.task.processing import charts\n\n\nOPTS = {\n \"openstack\": [\n cfg.StrOpt(\n \"osprofiler_chart_mode\",\n default=None,\n help=\"Mode of embedding OSProfiler's chart. Can be 'text' \"\n \"(embed only trace id), 'raw' (embed raw osprofiler's native \"\n \"report) or a path to directory (raw osprofiler's native \"\n \"reports for each iteration will be saved separately there \"\n \"to decrease the size of rally report itself)\")\n ]\n}\n\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\ndef _datetime_json_serialize(obj):\n if hasattr(obj, \"isoformat\"):\n return obj.isoformat()\n else:\n return obj\n\n\[email protected](name=\"OSProfiler\")\nclass OSProfilerChart(charts.OutputEmbeddedChart,\n charts.OutputEmbeddedExternalChart,\n charts.OutputTextArea):\n \"\"\"Chart for embedding OSProfiler data.\"\"\"\n\n @classmethod\n def _fetch_osprofiler_data(cls, connection_str, trace_id):\n from osprofiler.drivers import base\n from osprofiler import opts as osprofiler_opts\n\n opts.register_opts(osprofiler_opts.list_opts()) # noqa\n\n try: # noqa\n engine = base.get_driver(connection_str)\n except Exception:\n msg = \"Error while fetching OSProfiler results.\"\n if logging.is_debug():\n LOG.exception(msg)\n else:\n LOG.error(msg)\n return None\n\n return engine.get_report(trace_id)\n\n @classmethod\n def _generate_osprofiler_report(cls, osp_data):\n from osprofiler import cmd\n\n path = \"%s/template.html\" % os.path.dirname(cmd.__file__)\n with open(path) as f:\n html_obj = f.read()\n\n osp_data = json.dumps(osp_data,\n indent=4,\n separators=(\",\", \": \"),\n default=_datetime_json_serialize)\n return html_obj.replace(\"$DATA\", osp_data).replace(\"$LOCAL\", \"false\")\n\n @classmethod\n def _return_raw_response_for_complete_data(cls, data):\n return charts.OutputTextArea.render_complete_data({\n \"title\": data[\"title\"],\n \"widget\": \"TextArea\",\n \"data\": [data[\"data\"][\"trace_id\"]]\n })\n\n @classmethod\n def render_complete_data(cls, data):\n mode = CONF.openstack.osprofiler_chart_mode\n\n if isinstance(data[\"data\"][\"trace_id\"], list):\n # NOTE(andreykurilin): it is an adoption for the format that we\n # used before rally-openstack 1.5.0 .\n data[\"data\"][\"trace_id\"] = data[\"data\"][\"trace_id\"][0]\n\n if data[\"data\"].get(\"conn_str\") and mode != \"text\":\n osp_data = cls._fetch_osprofiler_data(\n data[\"data\"][\"conn_str\"],\n trace_id=data[\"data\"][\"trace_id\"]\n )\n if not osp_data:\n # for some reasons we failed to fetch data from OSProfiler's\n # backend. in this case we can display just trace ID\n return cls._return_raw_response_for_complete_data(data)\n\n osp_report = cls._generate_osprofiler_report(osp_data)\n title = \"{0} : {1}\".format(data[\"title\"],\n data[\"data\"][\"trace_id\"])\n\n if (mode and mode != \"raw\") and \"workload_uuid\" in data[\"data\"]:\n # NOTE(andreykurilin): we need to rework our charts plugin\n # mechanism so it is available out of box\n workload_uuid = data[\"data\"][\"workload_uuid\"]\n iteration = data[\"data\"][\"iteration\"]\n file_name = \"w_%s-%s.html\" % (workload_uuid, iteration)\n path = os.path.join(mode, file_name)\n with open(path, \"w\") as f:\n f.write(osp_report)\n return charts.OutputEmbeddedExternalChart.render_complete_data(\n {\n \"title\": title,\n \"widget\": \"EmbeddedChart\",\n \"data\": path\n }\n )\n else:\n return charts.OutputEmbeddedChart.render_complete_data(\n {\"title\": title,\n \"widget\": \"EmbeddedChart\",\n \"data\": osp_report}\n )\n\n return cls._return_raw_response_for_complete_data(data)\n" }, { "alpha_fraction": 0.4479255974292755, "alphanum_fraction": 0.465059369802475, "avg_line_length": 34.52608871459961, "blob_id": "1371e1e8df9ec7bcd623ce6bdd436c8ae38d6ae6", "content_id": "868854ac04d2921a7a179ac19a28bd8188dc33b6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8171, "license_type": "permissive", "max_line_length": 78, "num_lines": 230, "path": "/tests/unit/task/scenarios/sahara/test_jobs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\nfrom rally_openstack.task.scenarios.sahara import jobs\nfrom tests.unit import test\n\nCONF = cfg.CONF\n\nBASE = \"rally_openstack.task.scenarios.sahara.jobs\"\n\n\nclass SaharaJobTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(SaharaJobTestCase, self).setUp()\n\n self.context = test.get_test_context()\n CONF.set_override(\"sahara_cluster_check_interval\", 0, \"openstack\")\n CONF.set_override(\"sahara_job_check_interval\", 0, \"openstack\")\n\n @mock.patch(\"%s.CreateLaunchJob._run_job_execution\" % BASE)\n def test_create_launch_job_java(self, mock_run_job):\n self.clients(\"sahara\").jobs.create.return_value = mock.MagicMock(\n id=\"42\")\n\n self.context.update({\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n \"mains\": [\"main_42\"],\n \"libs\": [\"lib_42\"],\n \"cluster\": \"cl_42\",\n \"input\": \"in_42\"\n }\n }\n })\n scenario = jobs.CreateLaunchJob(self.context)\n scenario.generate_random_name = mock.Mock(\n return_value=\"job_42\")\n\n scenario.run(job_type=\"java\",\n configs={\"conf_key\": \"conf_val\"},\n job_idx=0)\n self.clients(\"sahara\").jobs.create.assert_called_once_with(\n name=\"job_42\",\n type=\"java\",\n description=\"\",\n mains=[\"main_42\"],\n libs=[\"lib_42\"]\n )\n\n mock_run_job.assert_called_once_with(\n job_id=\"42\",\n cluster_id=\"cl_42\",\n input_id=None,\n output_id=None,\n configs={\"conf_key\": \"conf_val\"},\n job_idx=0\n )\n\n @mock.patch(\"%s.CreateLaunchJob._run_job_execution\" % BASE)\n @mock.patch(\"%s.CreateLaunchJob._create_output_ds\" % BASE,\n return_value=mock.MagicMock(id=\"out_42\"))\n def test_create_launch_job_pig(self,\n mock_create_output,\n mock_run_job):\n self.clients(\"sahara\").jobs.create.return_value = mock.MagicMock(\n id=\"42\")\n\n self.context.update({\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n \"mains\": [\"main_42\"],\n \"libs\": [\"lib_42\"],\n \"cluster\": \"cl_42\",\n \"input\": \"in_42\"\n }\n }\n })\n scenario = jobs.CreateLaunchJob(self.context)\n scenario.generate_random_name = mock.Mock(return_value=\"job_42\")\n\n scenario.run(job_type=\"pig\",\n configs={\"conf_key\": \"conf_val\"},\n job_idx=0)\n self.clients(\"sahara\").jobs.create.assert_called_once_with(\n name=\"job_42\",\n type=\"pig\",\n description=\"\",\n mains=[\"main_42\"],\n libs=[\"lib_42\"]\n )\n\n mock_run_job.assert_called_once_with(\n job_id=\"42\",\n cluster_id=\"cl_42\",\n input_id=\"in_42\",\n output_id=\"out_42\",\n configs={\"conf_key\": \"conf_val\"},\n job_idx=0\n )\n\n @mock.patch(\"%s.CreateLaunchJob._run_job_execution\" % BASE)\n @mock.patch(\"%s.CreateLaunchJob.generate_random_name\" % BASE,\n return_value=\"job_42\")\n def test_create_launch_job_sequence(self,\n mock__random_name,\n mock_run_job):\n self.clients(\"sahara\").jobs.create.return_value = mock.MagicMock(\n id=\"42\")\n\n self.context.update({\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n \"mains\": [\"main_42\"],\n \"libs\": [\"lib_42\"],\n \"cluster\": \"cl_42\",\n \"input\": \"in_42\"\n }\n }\n })\n scenario = jobs.CreateLaunchJobSequence(self.context)\n\n scenario.run(\n jobs=[\n {\n \"job_type\": \"java\",\n \"configs\": {\"conf_key\": \"conf_val\"}\n }, {\n \"job_type\": \"java\",\n \"configs\": {\"conf_key2\": \"conf_val2\"}\n }])\n\n jobs_create_call = mock.call(name=\"job_42\",\n type=\"java\",\n description=\"\",\n mains=[\"main_42\"],\n libs=[\"lib_42\"])\n\n self.clients(\"sahara\").jobs.create.assert_has_calls(\n [jobs_create_call, jobs_create_call])\n\n mock_run_job.assert_has_calls([\n mock.call(job_id=\"42\",\n cluster_id=\"cl_42\",\n input_id=None,\n output_id=None,\n configs={\"conf_key\": \"conf_val\"},\n job_idx=0),\n mock.call(job_id=\"42\",\n cluster_id=\"cl_42\",\n input_id=None,\n output_id=None,\n configs={\"conf_key2\": \"conf_val2\"},\n job_idx=1)\n ])\n\n @mock.patch(\"%s.CreateLaunchJob.generate_random_name\" % BASE,\n return_value=\"job_42\")\n @mock.patch(\"%s.CreateLaunchJobSequenceWithScaling\"\n \"._scale_cluster\" % BASE)\n @mock.patch(\"%s.CreateLaunchJob._run_job_execution\" % BASE)\n def test_create_launch_job_sequence_with_scaling(\n self,\n mock_run_job,\n mock_create_launch_job_sequence_with_scaling__scale_cluster,\n mock_create_launch_job_generate_random_name\n ):\n self.clients(\"sahara\").jobs.create.return_value = mock.MagicMock(\n id=\"42\")\n self.clients(\"sahara\").clusters.get.return_value = mock.MagicMock(\n id=\"cl_42\", status=\"active\")\n\n self.context.update({\n \"tenant\": {\n \"sahara\": {\n \"image\": \"test_image\",\n \"mains\": [\"main_42\"],\n \"libs\": [\"lib_42\"],\n \"cluster\": \"cl_42\",\n \"input\": \"in_42\"\n }\n }\n })\n scenario = jobs.CreateLaunchJobSequenceWithScaling(self.context)\n\n scenario.run(\n jobs=[\n {\n \"job_type\": \"java\",\n \"configs\": {\"conf_key\": \"conf_val\"}\n }, {\n \"job_type\": \"java\",\n \"configs\": {\"conf_key2\": \"conf_val2\"}\n }],\n deltas=[1, -1])\n\n jobs_create_call = mock.call(name=\"job_42\",\n type=\"java\",\n description=\"\",\n mains=[\"main_42\"],\n libs=[\"lib_42\"])\n\n self.clients(\"sahara\").jobs.create.assert_has_calls(\n [jobs_create_call, jobs_create_call])\n\n je_0 = mock.call(job_id=\"42\", cluster_id=\"cl_42\", input_id=None,\n output_id=None, configs={\"conf_key\": \"conf_val\"},\n job_idx=0)\n je_1 = mock.call(job_id=\"42\", cluster_id=\"cl_42\", input_id=None,\n output_id=None,\n configs={\"conf_key2\": \"conf_val2\"}, job_idx=1)\n mock_run_job.assert_has_calls([je_0, je_1, je_0, je_1, je_0, je_1])\n" }, { "alpha_fraction": 0.6670736074447632, "alphanum_fraction": 0.6695151925086975, "avg_line_length": 34.39506149291992, "blob_id": "a32ade650fb38e69c40aa615ca73022e7ed83c20", "content_id": "33092ba7f5a56200cf04dced0e7b86b1bbde5a18", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5734, "license_type": "permissive", "max_line_length": 77, "num_lines": 162, "path": "/rally_openstack/task/scenarios/designate/basic.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Author: Endre Karlson <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.designate import utils\n\n\n\"\"\"Basic scenarios for Designate.\"\"\"\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"designate\"]},\n name=\"DesignateBasic.create_and_list_zones\",\n platform=\"openstack\")\nclass CreateAndListZones(utils.DesignateScenario):\n\n def run(self):\n \"\"\"Create a zone and list all zones.\n\n Measure the \"openstack zone list\" command performance.\n\n If you have only 1 user in your context, you will\n add 1 zone on every iteration. So you will have more\n and more zone and will be able to measure the\n performance of the \"openstack zone list\" command depending on\n the number of zones owned by users.\n \"\"\"\n zone = self._create_zone()\n self.assertTrue(zone)\n list_zones = self._list_zones()\n self.assertIn(zone, list_zones)\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"DesignateBasic.list_zones\", platform=\"openstack\")\nclass ListZones(utils.DesignateScenario):\n\n def run(self):\n \"\"\"List Designate zones.\n\n This simple scenario tests the openstack zone list command by listing\n all the zones.\n \"\"\"\n\n self._list_zones()\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](context={\"cleanup@openstack\": [\"designate\"]},\n name=\"DesignateBasic.create_and_delete_zone\",\n platform=\"openstack\")\nclass CreateAndDeleteZone(utils.DesignateScenario):\n\n def run(self):\n \"\"\"Create and then delete a zone.\n\n Measure the performance of creating and deleting zones\n with different level of load.\n \"\"\"\n zone = self._create_zone()\n self._delete_zone(zone[\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"DesignateBasic.list_recordsets\",\n platform=\"openstack\")\nclass ListRecordsets(utils.DesignateScenario):\n\n def run(self, zone_id):\n \"\"\"List Designate recordsets.\n\n This simple scenario tests the openstack recordset list command by\n listing all the recordsets in a zone.\n\n :param zone_id: Zone ID\n \"\"\"\n\n self._list_recordsets(zone_id)\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"zones\"))\[email protected](context={\"cleanup@openstack\": [\"designate\"]},\n name=\"DesignateBasic.create_and_delete_recordsets\",\n platform=\"openstack\")\nclass CreateAndDeleteRecordsets(utils.DesignateScenario):\n\n def run(self, recordsets_per_zone=5):\n \"\"\"Create and then delete recordsets.\n\n Measure the performance of creating and deleting recordsets\n with different level of load.\n\n :param recordsets_per_zone: recordsets to create pr zone.\n \"\"\"\n zone = random.choice(self.context[\"tenant\"][\"zones\"])\n\n recordsets = []\n\n for i in range(recordsets_per_zone):\n recordset = self._create_recordset(zone)\n recordsets.append(recordset)\n\n for recordset in recordsets:\n self._delete_recordset(\n zone[\"id\"], recordset[\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.DESIGNATE])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=(\"zones\"))\[email protected](context={\"cleanup@openstack\": [\"designate\"]},\n name=\"DesignateBasic.create_and_list_recordsets\",\n platform=\"openstack\")\nclass CreateAndListRecordsets(utils.DesignateScenario):\n\n def run(self, recordsets_per_zone=5):\n \"\"\"Create and then list recordsets.\n\n If you have only 1 user in your context, you will\n add 1 recordset on every iteration. So you will have more\n and more recordsets and will be able to measure the\n performance of the \"openstack recordset list\" command depending on\n the number of zones/recordsets owned by users.\n\n :param recordsets_per_zone: recordsets to create pr zone.\n \"\"\"\n zone = random.choice(self.context[\"tenant\"][\"zones\"])\n\n for i in range(recordsets_per_zone):\n self._create_recordset(zone)\n\n self._list_recordsets(zone[\"id\"])\n" }, { "alpha_fraction": 0.5654296875, "alphanum_fraction": 0.572998046875, "avg_line_length": 45.54545593261719, "blob_id": "eb878ba3ac43d57e9bc2dbb5d77f3363faabcf6e", "content_id": "b2fa25113aac591685c487b6e80e8abd6b4d214d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4096, "license_type": "permissive", "max_line_length": 79, "num_lines": 88, "path": "/rally_openstack/common/cfg/tempest.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\n\nOPTS = {\"openstack\": [\n cfg.StrOpt(\"img_url\",\n default=\"http://download.cirros-cloud.net/\"\n \"0.5.2/cirros-0.5.2-x86_64-disk.img\",\n deprecated_group=\"tempest\",\n help=\"image URL\"),\n cfg.StrOpt(\"img_disk_format\",\n default=\"qcow2\",\n deprecated_group=\"tempest\",\n help=\"Image disk format to use when creating the image\"),\n cfg.StrOpt(\"img_container_format\",\n default=\"bare\",\n deprecated_group=\"tempest\",\n help=\"Image container format to use when creating the image\"),\n cfg.StrOpt(\"img_name_regex\",\n default=\"^.*(cirros|testvm).*$\",\n deprecated_group=\"tempest\",\n help=\"Regular expression for name of a public image to \"\n \"discover it in the cloud and use it for the tests. \"\n \"Note that when Rally is searching for the image, case \"\n \"insensitive matching is performed. Specify nothing \"\n \"('img_name_regex =') if you want to disable discovering. \"\n \"In this case Rally will create needed resources by \"\n \"itself if the values for the corresponding config \"\n \"options are not specified in the Tempest config file\"),\n cfg.StrOpt(\"swift_operator_role\",\n default=\"member\",\n deprecated_group=\"tempest\",\n help=\"Role required for users \"\n \"to be able to create Swift containers\"),\n cfg.StrOpt(\"swift_reseller_admin_role\",\n default=\"ResellerAdmin\",\n deprecated_group=\"tempest\",\n help=\"User role that has reseller admin\"),\n cfg.StrOpt(\"heat_stack_owner_role\",\n default=\"heat_stack_owner\",\n deprecated_group=\"tempest\",\n help=\"Role required for users \"\n \"to be able to manage Heat stacks\"),\n cfg.StrOpt(\"heat_stack_user_role\",\n default=\"heat_stack_user\",\n deprecated_group=\"tempest\",\n help=\"Role for Heat template-defined users\"),\n cfg.IntOpt(\"flavor_ref_ram\",\n default=\"128\",\n deprecated_group=\"tempest\",\n help=\"Primary flavor RAM size used by most of the test cases\"),\n cfg.IntOpt(\"flavor_ref_alt_ram\",\n default=\"192\",\n deprecated_group=\"tempest\",\n help=\"Alternate reference flavor RAM size used by test that \"\n \"need two flavors, like those that resize an instance\"),\n cfg.IntOpt(\"flavor_ref_disk\",\n default=\"5\",\n help=\"Primary flavor disk size in GiB used by most of the test \"\n \"cases\"),\n cfg.IntOpt(\"flavor_ref_alt_disk\",\n default=\"5\",\n help=\"Alternate reference flavor disk size in GiB used by \"\n \"tests that need two flavors, like those that resize an \"\n \"instance\"),\n cfg.IntOpt(\"heat_instance_type_ram\",\n default=\"128\",\n deprecated_group=\"tempest\",\n help=\"RAM size flavor used for orchestration test cases\"),\n cfg.IntOpt(\"heat_instance_type_disk\",\n default=\"5\",\n deprecated_group=\"tempest\",\n help=\"Disk size requirement in GiB flavor used for \"\n \"orchestration test cases\"),\n]}\n" }, { "alpha_fraction": 0.587949275970459, "alphanum_fraction": 0.5954158902168274, "avg_line_length": 33.692771911621094, "blob_id": "47d7a6f6269d04f46c4560f3628fe39b9e95d874", "content_id": "9dfbcc1aee107a9b477afbbef29123a4c8ceff32", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5759, "license_type": "permissive", "max_line_length": 76, "num_lines": 166, "path": "/tests/unit/task/scenarios/designate/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Author: Endre Karlson <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.designate import utils\nfrom tests.unit import test\n\nDESIGNATE_UTILS = \"rally_openstack.task.scenarios.designate.utils.\"\n\n\[email protected]\nclass DesignateScenarioTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(DesignateScenarioTestCase, self).setUp()\n self.domain = mock.Mock()\n self.zone = mock.Mock()\n self.server = mock.Mock()\n\n self.client = self.clients(\"designate\", version=\"2\")\n\n @ddt.data(\n {},\n {\"email\": \"[email protected]\"})\n @ddt.data(\n {},\n {\"data\": \"127.0.0.1\"})\n # NOTE: API V2\n @ddt.data(\n {},\n {\"email\": \"[email protected]\"},\n {\"name\": \"example.name.\"},\n {\n \"email\": \"[email protected]\",\n \"name\": \"example.name.\"\n })\n def test_create_zone(self, zone_data):\n scenario = utils.DesignateScenario()\n\n random_name = \"foo\"\n\n scenario = utils.DesignateScenario(context=self.context)\n scenario.generate_random_name = mock.Mock(return_value=random_name)\n self.client.zones.create.return_value = self.zone\n\n expected = {\n \"email\": \"[email protected]\",\n \"name\": \"%s.name.\" % random_name,\n \"type_\": \"PRIMARY\"\n }\n expected.update(zone_data)\n\n # Check that the defaults / randoms are used if nothing is specified\n zone = scenario._create_zone(**zone_data)\n self.client.zones.create.assert_called_once_with(\n description=None,\n ttl=None,\n **expected)\n self.assertEqual(self.zone, zone)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.create_zone\")\n\n def test_list_zones(self):\n scenario = utils.DesignateScenario(context=self.context)\n return_zones_list = scenario._list_zones()\n self.assertEqual(self.client.zones.list.return_value,\n return_zones_list)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.list_zones\")\n\n def test_delete_zone(self):\n scenario = utils.DesignateScenario(context=self.context)\n\n zone = scenario._create_zone()\n scenario._delete_zone(zone[\"id\"])\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.delete_zone\")\n\n def test_list_recordsets(self):\n scenario = utils.DesignateScenario(context=self.context)\n return_recordsets_list = scenario._list_recordsets(\"123\")\n self.assertEqual(\n self.client.recordsets.list.return_value,\n return_recordsets_list)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.list_recordsets\")\n\n @ddt.data(\n {},\n {\"data\": \"127.0.0.1\"})\n def test_create_recordset(self, recordset_data):\n scenario = utils.DesignateScenario()\n\n random_name = \"foo\"\n zone_name = \"zone.name.\"\n random_recordset_name = \"%s.%s\" % (random_name, zone_name)\n\n scenario = utils.DesignateScenario(context=self.context)\n scenario.generate_random_name = mock.Mock(return_value=random_name)\n\n zone = {\"name\": zone_name, \"id\": \"123\"}\n\n # Create with randoms (name and type)\n scenario._create_recordset(zone)\n\n self.client.recordsets.create.assert_called_once_with(\n zone[\"id\"],\n name=random_recordset_name,\n type_=\"A\",\n records=[\"10.0.0.1\"])\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.create_recordset\")\n\n self.client.recordsets.create.reset_mock()\n\n # Specify name\n recordset = {\"name\": \"www.zone.name.\", \"type_\": \"ASD\"}\n scenario._create_recordset(zone, recordset)\n self.client.recordsets.create.assert_called_once_with(\n zone[\"id\"],\n name=\"www.zone.name.\",\n type_=\"ASD\",\n records=[\"10.0.0.1\"])\n\n self.client.recordsets.create.reset_mock()\n\n # Specify type without underscore\n scenario._create_recordset(zone, {\"type\": \"A\"})\n self.client.recordsets.create.assert_called_once_with(\n zone[\"id\"],\n name=\"foo.zone.name.\",\n type_=\"A\",\n records=[\"10.0.0.1\"])\n\n def test_delete_recordset(self):\n scenario = utils.DesignateScenario(context=self.context)\n\n zone_id = mock.Mock()\n recordset_id = mock.Mock()\n scenario._delete_recordset(zone_id, recordset_id)\n self.client.recordsets.delete.assert_called_once_with(\n zone_id, recordset_id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"designate.delete_recordset\")\n\n self.client.recordsets.delete.reset_mock()\n scenario._delete_recordset(zone_id, recordset_id)\n self.client.recordsets.delete.assert_called_once_with(\n zone_id, recordset_id)\n" }, { "alpha_fraction": 0.6484174728393555, "alphanum_fraction": 0.6522669196128845, "avg_line_length": 37.96666717529297, "blob_id": "7239868e86d15cc4c89a1d4d345b3c20bf232e61", "content_id": "bc0f9482d5f77bfd77f3e29551510fe5736556cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2338, "license_type": "permissive", "max_line_length": 78, "num_lines": 60, "path": "/tests/unit/task/scenarios/gnocchi/test_resource.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.gnocchi import resource\nfrom tests.unit import test\n\n\nclass GnocchiResourceTestCase(test.ScenarioTestCase):\n\n def get_test_context(self):\n context = super(GnocchiResourceTestCase, self).get_test_context()\n context.update({\n \"admin\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\n \"user_id\": \"fake\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake\"}\n })\n return context\n\n def setUp(self):\n super(GnocchiResourceTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.gnocchi.metric.GnocchiService\")\n self.addCleanup(patch.stop)\n self.mock_metric = patch.start()\n\n def test_create_resource(self):\n resource_service = self.mock_metric.return_value\n scenario = resource.CreateResource(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(resource_type=\"foo\")\n resource_service.create_resource.assert_called_once_with(\n \"name\", resource_type=\"foo\")\n\n def test_create_delete_resource(self):\n resource_service = self.mock_metric.return_value\n scenario = resource.CreateDeleteResource(self.context)\n scenario.generate_random_name = mock.MagicMock(return_value=\"name\")\n scenario.run(resource_type=\"foo\")\n resource_service.create_resource.assert_called_once_with(\n \"name\", resource_type=\"foo\")\n self.assertEqual(1, resource_service.delete_resource.call_count)\n" }, { "alpha_fraction": 0.5432415008544922, "alphanum_fraction": 0.5475122928619385, "avg_line_length": 33.94776153564453, "blob_id": "32beccedf373b0a023c79ec24f118930bbc7297b", "content_id": "2a1b341266cd75e11dc92e0614aa8f2efa0c3c1c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4683, "license_type": "permissive", "max_line_length": 79, "num_lines": 134, "path": "/rally_openstack/task/contexts/nova/flavors.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\nfrom rally.common import utils as rutils\nfrom rally.common import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected](\"required_platform\", platform=\"openstack\", admin=True)\[email protected](name=\"flavors\", platform=\"openstack\", order=340)\nclass FlavorsGenerator(context.OpenStackContext):\n \"\"\"Context creates a list of flavors.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"array\",\n \"$schema\": consts.JSON_SCHEMA,\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n },\n \"ram\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"vcpus\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"disk\": {\n \"type\": \"integer\",\n \"minimum\": 0\n },\n \"swap\": {\n \"type\": \"integer\",\n \"minimum\": 0\n },\n \"ephemeral\": {\n \"type\": \"integer\",\n \"minimum\": 0\n },\n \"extra_specs\": {\n \"type\": \"object\",\n \"additionalProperties\": {\n \"type\": \"string\"\n }\n }\n },\n \"additionalProperties\": False,\n \"required\": [\"name\", \"ram\"]\n }\n }\n\n def setup(self):\n \"\"\"Create list of flavors.\"\"\"\n from novaclient import exceptions as nova_exceptions\n\n self.context[\"flavors\"] = {}\n\n clients = osclients.Clients(self.context[\"admin\"][\"credential\"])\n for flavor_config in self.config:\n\n extra_specs = flavor_config.get(\"extra_specs\")\n\n flavor_config = FlavorConfig(**flavor_config)\n try:\n flavor = clients.nova().flavors.create(**flavor_config)\n except nova_exceptions.Conflict:\n msg = \"Using existing flavor %s\" % flavor_config[\"name\"]\n if logging.is_debug():\n LOG.exception(msg)\n else:\n LOG.warning(msg)\n continue\n\n if extra_specs:\n flavor.set_keys(extra_specs)\n\n self.context[\"flavors\"][flavor_config[\"name\"]] = flavor.to_dict()\n LOG.debug(\"Created flavor with id '%s'\" % flavor.id)\n\n def cleanup(self):\n \"\"\"Delete created flavors.\"\"\"\n mather = rutils.make_name_matcher(*[f[\"name\"] for f in self.config])\n resource_manager.cleanup(\n names=[\"nova.flavors\"],\n admin=self.context[\"admin\"],\n superclass=mather,\n task_id=self.get_owner_id())\n\n\nclass FlavorConfig(dict):\n def __init__(self, name, ram, vcpus=1, disk=0, swap=0, ephemeral=0,\n extra_specs=None):\n \"\"\"Flavor configuration for context and flavor & image validation code.\n\n Context code uses this code to provide default values for flavor\n creation. Validation code uses this class as a Flavor instance to\n check image validity against a flavor that is to be created by\n the context.\n\n :param name: name of the newly created flavor\n :param ram: RAM amount for the flavor (MBs)\n :param vcpus: VCPUs amount for the flavor\n :param disk: disk amount for the flavor (GBs)\n :param swap: swap amount for the flavor (MBs)\n :param ephemeral: ephemeral disk amount for the flavor (GBs)\n :param extra_specs: is ignored\n \"\"\"\n super(FlavorConfig, self).__init__(\n name=name, ram=ram, vcpus=vcpus, disk=disk,\n swap=swap, ephemeral=ephemeral)\n self.__dict__.update(self)\n" }, { "alpha_fraction": 0.5131286382675171, "alphanum_fraction": 0.515353798866272, "avg_line_length": 35.83606719970703, "blob_id": "fc3cac9f408afe32432ca676188f166fd067f62a", "content_id": "b4684ed6bf7ae82471a5741b705cd58b0df6909f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6741, "license_type": "permissive", "max_line_length": 78, "num_lines": 183, "path": "/rally_openstack/task/contexts/sahara/sahara_cluster.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import validation\nfrom rally import exceptions\nfrom rally.task import utils as bench_utils\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\nCONF = cfg.CONF\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"sahara_cluster\", platform=\"openstack\", order=441)\nclass SaharaCluster(context.OpenStackContext):\n \"\"\"Context class for setting up the Cluster an EDP job.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"plugin_name\": {\n \"type\": \"string\"\n },\n \"hadoop_version\": {\n \"type\": \"string\",\n },\n \"workers_count\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"flavor_id\": {\n \"type\": \"string\",\n },\n \"master_flavor_id\": {\n \"type\": \"string\",\n },\n \"worker_flavor_id\": {\n \"type\": \"string\",\n },\n \"floating_ip_pool\": {\n \"type\": \"string\",\n },\n \"volumes_per_node\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"volumes_size\": {\n \"type\": \"integer\",\n \"minimum\": 1\n },\n \"auto_security_group\": {\n \"type\": \"boolean\",\n },\n \"security_groups\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n },\n \"node_configs\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"cluster_configs\": {\n \"type\": \"object\",\n \"additionalProperties\": True\n },\n \"enable_anti_affinity\": {\n \"type\": \"boolean\"\n },\n \"enable_proxy\": {\n \"type\": \"boolean\"\n },\n \"use_autoconfig\": {\n \"type\": \"boolean\"\n },\n },\n \"additionalProperties\": False,\n \"required\": [\"plugin_name\", \"hadoop_version\", \"workers_count\",\n \"master_flavor_id\", \"worker_flavor_id\"]\n }\n\n def setup(self):\n utils.init_sahara_context(self)\n self.context[\"sahara\"][\"clusters\"] = {}\n\n wait_dict = {}\n\n for user, tenant_id in self._iterate_per_tenants():\n\n image_id = self.context[\"tenants\"][tenant_id][\"sahara\"][\"image\"]\n\n floating_ip_pool = self.config.get(\"floating_ip_pool\")\n\n temporary_context = {\n \"user\": user,\n \"tenant\": self.context[\"tenants\"][tenant_id],\n \"task\": self.context[\"task\"],\n \"owner_id\": self.context[\"owner_id\"]\n }\n scenario = utils.SaharaScenario(context=temporary_context)\n\n cluster = scenario._launch_cluster(\n plugin_name=self.config[\"plugin_name\"],\n hadoop_version=self.config[\"hadoop_version\"],\n flavor_id=self.config.get(\"flavor_id\"),\n master_flavor_id=self.config[\"master_flavor_id\"],\n worker_flavor_id=self.config[\"worker_flavor_id\"],\n workers_count=self.config[\"workers_count\"],\n image_id=image_id,\n floating_ip_pool=floating_ip_pool,\n volumes_per_node=self.config.get(\"volumes_per_node\"),\n volumes_size=self.config.get(\"volumes_size\", 1),\n auto_security_group=self.config.get(\"auto_security_group\",\n True),\n security_groups=self.config.get(\"security_groups\"),\n node_configs=self.config.get(\"node_configs\"),\n cluster_configs=self.config.get(\"cluster_configs\"),\n enable_anti_affinity=self.config.get(\"enable_anti_affinity\",\n False),\n enable_proxy=self.config.get(\"enable_proxy\", False),\n wait_active=False,\n use_autoconfig=self.config.get(\"use_autoconfig\", True)\n )\n\n self.context[\"tenants\"][tenant_id][\"sahara\"][\"cluster\"] = (\n cluster.id)\n\n # Need to save the client instance to poll for active status\n wait_dict[cluster] = scenario.clients(\"sahara\")\n\n bench_utils.wait_for(\n resource=wait_dict,\n update_resource=self.update_clusters_dict,\n is_ready=self.all_clusters_active,\n timeout=CONF.openstack.sahara_cluster_create_timeout,\n check_interval=CONF.openstack.sahara_cluster_check_interval)\n\n def update_clusters_dict(self, dct):\n new_dct = {}\n for cluster, client in dct.items():\n new_cl = client.clusters.get(cluster.id)\n new_dct[new_cl] = client\n\n return new_dct\n\n def all_clusters_active(self, dct):\n for cluster, client in dct.items():\n cluster_status = cluster.status.lower()\n if cluster_status == \"error\":\n msg = (\"Sahara cluster %(name)s has failed to\"\n \" %(action)s. Reason: '%(reason)s'\"\n % {\"name\": cluster.name, \"action\": \"start\",\n \"reason\": cluster.status_description})\n raise exceptions.ContextSetupFailure(ctx_name=self.get_name(),\n msg=msg)\n elif cluster_status != \"active\":\n return False\n return True\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"sahara.clusters\"],\n users=self.context.get(\"users\", []),\n superclass=utils.SaharaScenario,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5969556570053101, "alphanum_fraction": 0.5996029376983643, "avg_line_length": 36.30864334106445, "blob_id": "2fa85ff556b008396509fe54e55a1798706b3c7c", "content_id": "38df21af39651d576f4798de54bb1e722a2f3f3e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3022, "license_type": "permissive", "max_line_length": 78, "num_lines": 81, "path": "/rally_openstack/common/credential.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import logging\n\nLOG = logging.getLogger(__file__)\n\n\nclass OpenStackCredential(dict):\n \"\"\"Credential for OpenStack.\"\"\"\n\n def __init__(self, auth_url, username, password, tenant_name=None,\n project_name=None,\n permission=None,\n region_name=None, endpoint_type=None,\n domain_name=None, endpoint=None, user_domain_name=None,\n project_domain_name=None,\n https_insecure=False, https_cacert=None,\n https_cert=None, https_key=None,\n profiler_hmac_key=None, profiler_conn_str=None,\n api_info=None, **kwargs):\n if kwargs:\n raise TypeError(\"%s\" % kwargs)\n\n # TODO(andreykurilin): deprecate permission and endpoint\n\n if https_cert and https_key:\n https_cert = (https_cert, https_key)\n\n super(OpenStackCredential, self).__init__([\n (\"auth_url\", auth_url),\n (\"username\", username),\n (\"password\", password),\n (\"tenant_name\", (tenant_name or project_name)),\n (\"permission\", permission),\n (\"endpoint\", endpoint),\n (\"region_name\", region_name),\n (\"endpoint_type\", endpoint_type),\n (\"domain_name\", domain_name),\n (\"user_domain_name\", user_domain_name),\n (\"project_domain_name\", project_domain_name),\n (\"https_insecure\", https_insecure),\n (\"https_cacert\", https_cacert),\n (\"https_cert\", https_cert),\n (\"profiler_hmac_key\", profiler_hmac_key),\n (\"profiler_conn_str\", profiler_conn_str),\n (\"api_info\", api_info or {})\n ])\n\n self._clients_cache = {}\n\n def __getattr__(self, attr, default=None):\n # TODO(andreykurilin): print warning to force everyone to use this\n # object as raw dict as soon as we clean over code.\n return self.get(attr, default)\n\n def to_dict(self):\n return dict(self)\n\n def __deepcopy__(self, memodict=None):\n import copy\n return self.__class__(**copy.deepcopy(self.to_dict()))\n\n # this method is mostly used by validation step. let's refactor it and\n # deprecated this\n def clients(self):\n from rally_openstack.common import osclients\n\n return osclients.Clients(self, cache=self._clients_cache)\n" }, { "alpha_fraction": 0.6071075797080994, "alphanum_fraction": 0.6107271909713745, "avg_line_length": 37.96154022216797, "blob_id": "c2996fde47bf08609477346adb3d93de56dad02f", "content_id": "3e78cd2bfaac1b5b2f1e5ccc7f59f0c248c65230", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3039, "license_type": "permissive", "max_line_length": 78, "num_lines": 78, "path": "/rally_openstack/task/contexts/murano/murano_packages.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport io\nimport os\nimport zipfile\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import osclients\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.scenarios.murano import utils as mutils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"murano_packages\", platform=\"openstack\", order=401)\nclass PackageGenerator(context.OpenStackContext):\n \"\"\"Context class for uploading applications for murano.\"\"\"\n\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": consts.JSON_SCHEMA,\n \"properties\": {\n \"app_package\": {\n \"type\": \"string\",\n }\n },\n \"required\": [\"app_package\"],\n \"additionalProperties\": False\n }\n\n def setup(self):\n is_config_app_dir = False\n pckg_path = os.path.expanduser(self.config[\"app_package\"])\n if zipfile.is_zipfile(pckg_path):\n zip_name = pckg_path\n elif os.path.isdir(pckg_path):\n is_config_app_dir = True\n zip_name = mutils.pack_dir(pckg_path)\n else:\n msg = \"There is no zip archive or directory by this path: %s\"\n raise exceptions.ContextSetupFailure(msg=msg % pckg_path,\n ctx_name=self.get_name())\n\n for user, tenant_id in self._iterate_per_tenants():\n clients = osclients.Clients(user[\"credential\"])\n self.context[\"tenants\"][tenant_id][\"packages\"] = []\n if is_config_app_dir:\n self.context[\"tenants\"][tenant_id][\"murano_ctx\"] = zip_name\n # TODO(astudenov): use self.generate_random_name()\n with open(zip_name, \"rb\") as f:\n file = io.BytesIO(f.read())\n package = clients.murano().packages.create(\n {\"categories\": [\"Web\"], \"tags\": [\"tag\"]},\n {\"file\": file})\n\n self.context[\"tenants\"][tenant_id][\"packages\"].append(package)\n\n def cleanup(self):\n resource_manager.cleanup(names=[\"murano.packages\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n" }, { "alpha_fraction": 0.5811315178871155, "alphanum_fraction": 0.5857266187667847, "avg_line_length": 39.488372802734375, "blob_id": "f1eb878fc47906eac6a652fbc609037c028ac3d8", "content_id": "f838e1dd396068bc52c0e84a8979b09ddbbc520d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6964, "license_type": "permissive", "max_line_length": 78, "num_lines": 172, "path": "/tests/unit/task/test_scenario.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\nimport fixtures\n\nfrom rally_openstack.common.credential import OpenStackCredential\nfrom rally_openstack.task import scenario as base_scenario\nfrom tests.unit import test\n\n\nCREDENTIAL_WITHOUT_HMAC = OpenStackCredential(\n \"auth_url\",\n \"username\",\n \"password\")\n\nCREDENTIAL_WITH_HMAC = OpenStackCredential(\n \"auth_url\",\n \"username\",\n \"password\",\n profiler_hmac_key=\"test_profiler_hmac_key\")\n\n\[email protected]\nclass OpenStackScenarioTestCase(test.TestCase):\n def setUp(self):\n super(OpenStackScenarioTestCase, self).setUp()\n self.osclients = fixtures.MockPatch(\n \"rally_openstack.common.osclients.Clients\")\n self.useFixture(self.osclients)\n self.context = test.get_test_context()\n self.context.update({\"foo\": \"bar\"})\n\n def test_init(self):\n scenario = base_scenario.OpenStackScenario(self.context)\n self.assertEqual(self.context, scenario.context)\n\n def test_init_admin_context(self):\n self.context[\"admin\"] = {\"credential\": mock.Mock()}\n scenario = base_scenario.OpenStackScenario(self.context)\n self.assertEqual(self.context, scenario.context)\n self.osclients.mock.assert_called_once_with(\n self.context[\"admin\"][\"credential\"])\n\n def test_init_admin_clients(self):\n scenario = base_scenario.OpenStackScenario(\n self.context, admin_clients=\"foobar\")\n self.assertEqual(self.context, scenario.context)\n\n self.assertEqual(\"foobar\", scenario._admin_clients)\n\n def test_init_user_context(self):\n user = {\"credential\": mock.Mock(), \"tenant_id\": \"foo\"}\n self.context[\"users\"] = [user]\n self.context[\"tenants\"] = {\"foo\": {\"name\": \"bar\"}}\n self.context[\"user_choice_method\"] = \"random\"\n\n scenario = base_scenario.OpenStackScenario(self.context)\n\n self.assertEqual(user, scenario.context[\"user\"])\n self.assertEqual(self.context[\"tenants\"][\"foo\"],\n scenario.context[\"tenant\"])\n\n self.osclients.mock.assert_called_once_with(user[\"credential\"])\n\n def test_init_clients(self):\n scenario = base_scenario.OpenStackScenario(self.context,\n admin_clients=\"spam\",\n clients=\"ham\")\n self.assertEqual(\"spam\", scenario._admin_clients)\n self.assertEqual(\"ham\", scenario._clients)\n\n def test_init_user_clients(self):\n scenario = base_scenario.OpenStackScenario(\n self.context, clients=\"foobar\")\n self.assertEqual(self.context, scenario.context)\n\n self.assertEqual(\"foobar\", scenario._clients)\n\n @ddt.data(([], 0),\n ([(\"admin\", CREDENTIAL_WITHOUT_HMAC)], 0),\n ([(\"user\", CREDENTIAL_WITHOUT_HMAC)], 0),\n ([(\"admin\", CREDENTIAL_WITH_HMAC)], 1),\n ([(\"user\", CREDENTIAL_WITH_HMAC)], 1),\n ([(\"admin\", CREDENTIAL_WITH_HMAC),\n (\"user\", CREDENTIAL_WITH_HMAC)], 1),\n ([(\"admin\", CREDENTIAL_WITHOUT_HMAC),\n (\"user\", CREDENTIAL_WITH_HMAC)], 1),\n ([(\"admin\", CREDENTIAL_WITH_HMAC),\n (\"user\", CREDENTIAL_WITHOUT_HMAC)], 1),\n ([(\"admin\", CREDENTIAL_WITHOUT_HMAC),\n (\"user\", CREDENTIAL_WITHOUT_HMAC)], 0))\n @ddt.unpack\n @mock.patch(\"rally_openstack.task.scenario.profiler.init\")\n @mock.patch(\"rally_openstack.task.scenario.profiler.get\")\n def test_profiler_init(self, users_credentials,\n expected_call_count,\n mock_profiler_get,\n mock_profiler_init):\n for user, credential in users_credentials:\n self.context.update({user: {\"credential\": credential},\n \"iteration\": 0})\n base_scenario.OpenStackScenario(self.context)\n\n if expected_call_count:\n mock_profiler_init.assert_called_once_with(\n CREDENTIAL_WITH_HMAC[\"profiler_hmac_key\"])\n mock_profiler_get.assert_called_once_with()\n else:\n self.assertFalse(mock_profiler_init.called)\n self.assertFalse(mock_profiler_get.called)\n\n def test__choose_user_random(self):\n users = [{\"credential\": mock.Mock(), \"tenant_id\": \"foo\"}\n for _ in range(5)]\n self.context[\"users\"] = users\n self.context[\"tenants\"] = {\"foo\": {\"name\": \"bar\"},\n \"baz\": {\"name\": \"spam\"}}\n self.context[\"user_choice_method\"] = \"random\"\n\n scenario = base_scenario.OpenStackScenario()\n scenario._choose_user(self.context)\n self.assertIn(\"user\", self.context)\n self.assertIn(self.context[\"user\"], self.context[\"users\"])\n self.assertIn(\"tenant\", self.context)\n tenant_id = self.context[\"user\"][\"tenant_id\"]\n self.assertEqual(self.context[\"tenants\"][tenant_id],\n self.context[\"tenant\"])\n\n @ddt.data((1, \"0\", \"bar\"),\n (2, \"0\", \"foo\"),\n (3, \"1\", \"bar\"),\n (4, \"1\", \"foo\"),\n (5, \"0\", \"bar\"),\n (6, \"0\", \"foo\"),\n (7, \"1\", \"bar\"),\n (8, \"1\", \"foo\"))\n @ddt.unpack\n def test__choose_user_round_robin(self, iteration,\n expected_user_id, expected_tenant_id):\n self.context[\"iteration\"] = iteration\n self.context[\"user_choice_method\"] = \"round_robin\"\n self.context[\"users\"] = []\n self.context[\"tenants\"] = {}\n for tid in (\"foo\", \"bar\"):\n users = [{\"id\": str(i), \"tenant_id\": tid} for i in range(2)]\n self.context[\"users\"] += users\n self.context[\"tenants\"][tid] = {\"name\": tid, \"users\": users}\n\n scenario = base_scenario.OpenStackScenario()\n scenario._choose_user(self.context)\n self.assertIn(\"user\", self.context)\n self.assertIn(self.context[\"user\"], self.context[\"users\"])\n self.assertEqual(expected_user_id, self.context[\"user\"][\"id\"])\n self.assertIn(\"tenant\", self.context)\n tenant_id = self.context[\"user\"][\"tenant_id\"]\n self.assertEqual(self.context[\"tenants\"][tenant_id],\n self.context[\"tenant\"])\n self.assertEqual(expected_tenant_id, tenant_id)\n" }, { "alpha_fraction": 0.6524624228477478, "alphanum_fraction": 0.6538596153259277, "avg_line_length": 37.68918991088867, "blob_id": "ac1614e83b16eaba6973b9df49d4005ce6349488", "content_id": "6c6e390e7a5ff4ac93e96ac04c29a9c6f53eaeee", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2863, "license_type": "permissive", "max_line_length": 78, "num_lines": 74, "path": "/rally_openstack/task/scenarios/watcher/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.task import atomic\nfrom rally.task import utils\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass WatcherScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Watcher scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"watcher.create_audit_template\")\n def _create_audit_template(self, goal_id, strategy_id):\n \"\"\"Create Audit Template in DB\n\n :param goal_id: UUID Goal\n :param strategy_id: UUID Strategy\n :return: Audit Template object\n \"\"\"\n return self.admin_clients(\"watcher\").audit_template.create(\n goal=goal_id,\n strategy=strategy_id,\n name=self.generate_random_name())\n\n @atomic.action_timer(\"watcher.delete_audit_template\")\n def _delete_audit_template(self, audit_template):\n \"\"\"Delete Audit Template from DB\n\n :param audit_template: Audit Template object\n \"\"\"\n self.admin_clients(\"watcher\").audit_template.delete(audit_template)\n\n @atomic.action_timer(\"watcher.list_audit_templates\")\n def _list_audit_templates(self, name=None, goal=None, strategy=None,\n limit=None, sort_key=None, sort_dir=None,\n detail=False):\n return self.admin_clients(\"watcher\").audit_template.list(\n name=name, goal=goal, strategy=strategy, limit=limit,\n sort_key=sort_key, sort_dir=sort_dir, detail=detail)\n\n @atomic.action_timer(\"watcher.create_audit\")\n def _create_audit(self, audit_template_uuid):\n audit = self.admin_clients(\"watcher\").audit.create(\n audit_template_uuid=audit_template_uuid,\n audit_type=\"ONESHOT\")\n utils.wait_for_status(\n audit,\n ready_statuses=[\"SUCCEEDED\"],\n failure_statuses=[\"FAILED\"],\n status_attr=\"state\",\n update_resource=utils.get_from_manager(),\n timeout=CONF.openstack.watcher_audit_launch_timeout,\n check_interval=CONF.openstack.watcher_audit_launch_poll_interval,\n id_attr=\"uuid\"\n )\n return audit\n\n @atomic.action_timer(\"watcher.delete_audit\")\n def _delete_audit(self, audit):\n self.admin_clients(\"watcher\").audit.delete(audit.uuid)\n" }, { "alpha_fraction": 0.5893868207931519, "alphanum_fraction": 0.5915939211845398, "avg_line_length": 39.08076858520508, "blob_id": "0c3ff5e3a800489d8d5513338822bfa2f4e7ed14", "content_id": "3f1ccfe65a5c6405b5b41fc63c8f64f3f67a60e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10421, "license_type": "permissive", "max_line_length": 79, "num_lines": 260, "path": "/rally_openstack/task/types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport operator\nimport re\n\nfrom rally.common import logging\nfrom rally.common.plugin import plugin\nfrom rally import exceptions\nfrom rally.task import types\n\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common.services.image import image\nfrom rally_openstack.common.services.storage import block\n\n\nLOG = logging.getLogger(__name__)\n\n\nconfigure = plugin.configure\n\n\nclass OpenStackResourceType(types.ResourceType):\n \"\"\"A base class for OpenStack ResourceTypes plugins with help-methods\"\"\"\n\n def __init__(self, context=None, cache=None):\n super(OpenStackResourceType, self).__init__(context, cache)\n\n self._clients = None\n if self._context.get(\"admin\"):\n self._clients = osclients.Clients(\n self._context[\"admin\"][\"credential\"])\n elif self._context.get(\"users\"):\n self._clients = osclients.Clients(\n self._context[\"users\"][0][\"credential\"])\n\n def _find_resource(self, resource_spec, resources):\n \"\"\"Return the resource whose name matches the pattern.\n\n .. note:: This method is a modified version of\n `rally.task.types.obj_from_name`. The difference is supporting the\n case of returning the latest version of resource in case of\n `accurate=False` option.\n\n :param resource_spec: resource specification to find.\n Expected keys:\n\n * name - The exact name of resource to search. If no exact match\n and value of *accurate* key is False (default behaviour), name\n will be interpreted as a regexp\n * regexp - a regexp of resource name to match. If several resources\n match and value of *accurate* key is False (default behaviour),\n the latest resource will be returned.\n :param resources: iterable containing all resources\n :raises InvalidScenarioArgument: if the pattern does\n not match anything.\n\n :returns: resource object mapped to `name` or `regex`\n \"\"\"\n if \"name\" in resource_spec:\n # In a case of pattern string exactly matches resource name\n matching_exact = [resource for resource in resources\n if resource.name == resource_spec[\"name\"]]\n if len(matching_exact) == 1:\n return matching_exact[0]\n elif len(matching_exact) > 1:\n raise exceptions.InvalidScenarioArgument(\n \"%(typename)s with name '%(pattern)s' \"\n \"is ambiguous, possible matches \"\n \"by id: %(ids)s\" % {\n \"typename\": self.get_name().title(),\n \"pattern\": resource_spec[\"name\"],\n \"ids\": \", \".join(map(operator.attrgetter(\"id\"),\n matching_exact))})\n if resource_spec.get(\"accurate\", False):\n raise exceptions.InvalidScenarioArgument(\n \"%(typename)s with name '%(name)s' not found\" % {\n \"typename\": self.get_name().title(),\n \"name\": resource_spec[\"name\"]})\n # Else look up as regex\n patternstr = resource_spec[\"name\"]\n elif \"regex\" in resource_spec:\n patternstr = resource_spec[\"regex\"]\n else:\n raise exceptions.InvalidScenarioArgument(\n \"%(typename)s 'id', 'name', or 'regex' not found \"\n \"in '%(resource_spec)s' \" % {\n \"typename\": self.get_name().title(),\n \"resource_spec\": resource_spec})\n\n pattern = re.compile(patternstr)\n matching = [resource for resource in resources\n if re.search(pattern, resource.name or \"\")]\n if not matching:\n raise exceptions.InvalidScenarioArgument(\n \"%(typename)s with pattern '%(pattern)s' not found\" % {\n \"typename\": self.get_name().title(),\n \"pattern\": pattern.pattern})\n elif len(matching) > 1:\n if not resource_spec.get(\"accurate\", False):\n return sorted(matching, key=lambda o: o.name or \"\")[-1]\n\n raise exceptions.InvalidScenarioArgument(\n \"%(typename)s with name '%(pattern)s' is ambiguous, possible \"\n \"matches by id: %(ids)s\" % {\n \"typename\": self.get_name().title(),\n \"pattern\": pattern.pattern,\n \"ids\": \", \".join(map(operator.attrgetter(\"id\"),\n matching))})\n return matching[0]\n\n\[email protected](name=\"nova_flavor\")\nclass Flavor(OpenStackResourceType):\n \"\"\"Find Nova's flavor ID by name or regexp.\"\"\"\n\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n if not resource_id:\n novaclient = self._clients.nova()\n resource_id = types._id_from_name(\n resource_config=resource_spec,\n resources=novaclient.flavors.list(),\n typename=\"flavor\")\n return resource_id\n\n\[email protected](name=\"glance_image\")\nclass GlanceImage(OpenStackResourceType):\n \"\"\"Find Glance's image ID by name or regexp.\"\"\"\n\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n list_kwargs = resource_spec.get(\"list_kwargs\", {})\n\n if not resource_id:\n cache_id = hash(frozenset(list_kwargs.items()))\n if cache_id not in self._cache:\n glance = image.Image(self._clients)\n self._cache[cache_id] = glance.list_images(**list_kwargs)\n images = self._cache[cache_id]\n resource = self._find_resource(resource_spec, images)\n return resource.id\n return resource_id\n\n\[email protected](name=\"glance_image_args\")\nclass GlanceImageArguments(OpenStackResourceType):\n \"\"\"Process Glance image create options to look similar in case of V1/V2.\"\"\"\n def pre_process(self, resource_spec, config):\n resource_spec = copy.deepcopy(resource_spec)\n if \"is_public\" in resource_spec:\n if \"visibility\" in resource_spec:\n resource_spec.pop(\"is_public\")\n else:\n visibility = (\"public\" if resource_spec.pop(\"is_public\")\n else \"private\")\n resource_spec[\"visibility\"] = visibility\n return resource_spec\n\n\[email protected](name=\"ec2_image\")\nclass EC2Image(OpenStackResourceType):\n \"\"\"Find EC2 image ID.\"\"\"\n\n def pre_process(self, resource_spec, config):\n if \"name\" not in resource_spec and \"regex\" not in resource_spec:\n # NOTE(wtakase): gets resource name from OpenStack id\n glanceclient = self._clients.glance()\n resource_name = types._name_from_id(\n resource_config=resource_spec,\n resources=list(glanceclient.images.list()),\n typename=\"image\")\n resource_spec[\"name\"] = resource_name\n\n # NOTE(wtakase): gets EC2 resource id from name or regex\n ec2client = self._clients.ec2()\n resource_ec2_id = types._id_from_name(\n resource_config=resource_spec,\n resources=list(ec2client.get_all_images()),\n typename=\"ec2_image\")\n return resource_ec2_id\n\n\[email protected](name=\"cinder_volume_type\")\nclass VolumeType(OpenStackResourceType):\n \"\"\"Find Cinder volume type ID by name or regexp.\"\"\"\n\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n if not resource_id:\n cinder = block.BlockStorage(self._clients)\n resource_id = types._id_from_name(\n resource_config=resource_spec,\n resources=cinder.list_types(),\n typename=\"volume_type\")\n return resource_id\n\n\[email protected](name=\"neutron_network\")\nclass NeutronNetwork(OpenStackResourceType):\n \"\"\"Find Neutron network ID by it's name.\"\"\"\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n if resource_id:\n return resource_id\n else:\n neutronclient = self._clients.neutron()\n for net in neutronclient.list_networks()[\"networks\"]:\n if net[\"name\"] == resource_spec.get(\"name\"):\n return net[\"id\"]\n\n raise exceptions.InvalidScenarioArgument(\n \"Neutron network with name '{name}' not found\".format(\n name=resource_spec.get(\"name\")))\n\n\[email protected](name=\"watcher_strategy\")\nclass WatcherStrategy(OpenStackResourceType):\n \"\"\"Find Watcher strategy ID by it's name.\"\"\"\n\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n if not resource_id:\n watcherclient = self._clients.watcher()\n resource_id = types._id_from_name(\n resource_config=resource_spec,\n resources=[watcherclient.strategy.get(\n resource_spec.get(\"name\"))],\n typename=\"strategy\",\n id_attr=\"uuid\")\n return resource_id\n\n\[email protected](name=\"watcher_goal\")\nclass WatcherGoal(OpenStackResourceType):\n \"\"\"Find Watcher goal ID by it's name.\"\"\"\n\n def pre_process(self, resource_spec, config):\n resource_id = resource_spec.get(\"id\")\n if not resource_id:\n watcherclient = self._clients.watcher()\n resource_id = types._id_from_name(\n resource_config=resource_spec,\n resources=[watcherclient.goal.get(resource_spec.get(\"name\"))],\n typename=\"goal\",\n id_attr=\"uuid\")\n return resource_id\n" }, { "alpha_fraction": 0.6433092355728149, "alphanum_fraction": 0.64647376537323, "avg_line_length": 37.13793182373047, "blob_id": "421e84898697217cda146c368fb5fdff40dfcee8", "content_id": "da912f4bbc9babdfcdf7977f3467b9ea74d378c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2212, "license_type": "permissive", "max_line_length": 78, "num_lines": 58, "path": "/tests/unit/task/scenarios/cinder/test_volume_backups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.scenarios.cinder import volume_backups\nfrom tests.unit import test\n\n\nclass CinderBackupTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(CinderBackupTestCase, self).setUp()\n patch = mock.patch(\n \"rally_openstack.common.services.storage.block.BlockStorage\")\n self.addCleanup(patch.stop)\n self.mock_cinder = patch.start()\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"admin\": {\n \"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()\n },\n \"user\": {\"id\": \"fake_user_id\",\n \"credential\": mock.MagicMock()},\n \"tenant\": {\"id\": \"fake\", \"name\": \"fake\"}})\n return context\n\n def test_create_incremental_volume_backup(self):\n mock_service = self.mock_cinder.return_value\n scenario = volume_backups.CreateIncrementalVolumeBackup(\n self._get_context())\n\n volume_kwargs = {\"some_var\": \"zaq\"}\n backup_kwargs = {\"incremental\": True}\n\n scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs,\n create_backup_kwargs=backup_kwargs)\n\n self.assertEqual(2, mock_service.create_backup.call_count)\n mock_service.create_volume.assert_called_once_with(1, **volume_kwargs)\n mock_service.delete_backup.assert_has_calls(\n mock_service.create_backup.return_value)\n mock_service.delete_volume.assert_called_once_with(\n mock_service.create_volume.return_value)\n" }, { "alpha_fraction": 0.6241691708564758, "alphanum_fraction": 0.6314199566841125, "avg_line_length": 29.090909957885742, "blob_id": "f85da14ed172541618d806a00535871924a56a06", "content_id": "8b40235d87b310a18cb4a97f33a513b216685afa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "permissive", "max_line_length": 78, "num_lines": 55, "path": "/rally_openstack/task/scenarios/vm/workloads/siege.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\n\nSIEGE_RE = re.compile(r\"^(Throughput|Transaction rate):\\s+(\\d+\\.\\d+)\\s+.*\")\n\n\ndef get_instances():\n outputs = json.load(sys.stdin)\n for output in outputs:\n if output[\"output_key\"] == \"wp_nodes\":\n for node in output[\"output_value\"].values():\n yield node[\"wordpress-network\"][0]\n\n\ndef generate_urls_list(instances):\n urls = tempfile.NamedTemporaryFile(delete=False)\n with urls:\n for inst in instances:\n for i in range(1, 1000):\n urls.write(\"http://%s/wordpress/index.php/%d/\\n\" % (inst, i))\n return urls.name\n\n\ndef run():\n instances = list(get_instances())\n urls = generate_urls_list(instances)\n out = subprocess.check_output(\n [\"siege\", \"-q\", \"-t\", \"60S\", \"-b\", \"-f\", urls],\n stderr=subprocess.STDOUT)\n for line in out.splitlines():\n m = SIEGE_RE.match(line)\n if m:\n sys.stdout.write(\"%s:%s\\n\" % m.groups())\n\n\nif __name__ == \"__main__\":\n sys.exit(run())\n" }, { "alpha_fraction": 0.5414982438087463, "alphanum_fraction": 0.5441141724586487, "avg_line_length": 37.227272033691406, "blob_id": "68f132da022e090d560d11b4eb74a296f5e04f6c", "content_id": "66124b8b9e6e30ff760f056e1ec268c90429c095", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4205, "license_type": "permissive", "max_line_length": 77, "num_lines": 110, "path": "/rally_openstack/task/contexts/vm/image_command_customizer.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.vm import custom_image\nfrom rally_openstack.task.scenarios.vm import utils as vm_utils\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=\"image_command_customizer\", platform=\"openstack\",\n order=501)\nclass ImageCommandCustomizerContext(custom_image.BaseCustomImageGenerator):\n \"\"\"Context class for generating image customized by a command execution.\n\n Run a command specified by configuration to prepare image.\n\n Use this script e.g. to download and install something.\n \"\"\"\n\n CONFIG_SCHEMA = copy.deepcopy(\n custom_image.BaseCustomImageGenerator.CONFIG_SCHEMA)\n CONFIG_SCHEMA[\"definitions\"] = {\n \"stringOrStringList\": {\n \"anyOf\": [\n {\"type\": \"string\", \"description\": \"just a string\"},\n {\n \"type\": \"array\", \"description\": \"just a list of strings\",\n \"items\": {\"type\": \"string\"}\n }\n ]\n },\n \"scriptFile\": {\n \"type\": \"object\",\n \"properties\": {\n \"script_file\": {\"$ref\": \"#/definitions/stringOrStringList\"},\n \"interpreter\": {\"$ref\": \"#/definitions/stringOrStringList\"},\n \"command_args\": {\"$ref\": \"#/definitions/stringOrStringList\"}\n },\n \"required\": [\"script_file\", \"interpreter\"],\n \"additionalProperties\": False,\n },\n \"scriptInline\": {\n \"type\": \"object\",\n \"properties\": {\n \"script_inline\": {\"type\": \"string\"},\n \"interpreter\": {\"$ref\": \"#/definitions/stringOrStringList\"},\n \"command_args\": {\"$ref\": \"#/definitions/stringOrStringList\"}\n },\n \"required\": [\"script_inline\", \"interpreter\"],\n \"additionalProperties\": False,\n },\n \"commandPath\": {\n \"type\": \"object\",\n \"properties\": {\n \"remote_path\": {\"$ref\": \"#/definitions/stringOrStringList\"},\n \"local_path\": {\"type\": \"string\"},\n \"command_args\": {\"$ref\": \"#/definitions/stringOrStringList\"}\n },\n \"required\": [\"remote_path\"],\n \"additionalProperties\": False,\n },\n \"commandDict\": {\n \"oneOf\": [\n {\"$ref\": \"#/definitions/scriptFile\"},\n {\"$ref\": \"#/definitions/scriptInline\"},\n {\"$ref\": \"#/definitions/commandPath\"},\n ],\n }\n }\n CONFIG_SCHEMA[\"properties\"][\"command\"] = {\n \"$ref\": \"#/definitions/commandDict\"\n }\n\n def _customize_image(self, server, fip, user):\n code, out, err = vm_utils.VMScenario(self.context)._run_command(\n fip[\"ip\"], self.config[\"port\"],\n self.config[\"username\"], self.config.get(\"password\"),\n command=self.config[\"command\"],\n pkey=user[\"keypair\"][\"private\"])\n\n if code:\n raise exceptions.ScriptError(\n message=\"Command `%(command)s' execution failed,\"\n \" code %(code)d:\\n\"\n \"STDOUT:\\n============================\\n\"\n \"%(out)s\\n\"\n \"STDERR:\\n============================\\n\"\n \"%(err)s\\n\"\n \"============================\\n\"\n % {\"command\": self.config[\"command\"], \"code\": code,\n \"out\": out, \"err\": err})\n\n return code, out, err\n" }, { "alpha_fraction": 0.6564597487449646, "alphanum_fraction": 0.6581375598907471, "avg_line_length": 38.081966400146484, "blob_id": "db5912737b012cfb68d7e7fa99ae437cc7fdc551", "content_id": "c6f24ec4d5160be85d7fac0ac4465759c6b81382", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2384, "license_type": "permissive", "max_line_length": 75, "num_lines": 61, "path": "/tests/unit/task/contexts/cinder/test_volume_types.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.cinder import volume_types\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.cinder.volume_types\"\nSERVICE = \"rally_openstack.common.services.storage\"\n\n\nclass VolumeTypeGeneratorTestCase(test.ContextTestCase):\n def setUp(self):\n super(VolumeTypeGeneratorTestCase, self).setUp()\n self.context.update({\"admin\": {\"credential\": \"admin_creds\"}})\n\n @mock.patch(\"%s.block.BlockStorage\" % SERVICE)\n def test_setup(self, mock_block_storage):\n self.context.update({\"config\": {\"volume_types\": [\"foo\", \"bar\"]}})\n mock_service = mock_block_storage.return_value\n mock_service.create_volume_type.side_effect = (\n mock.Mock(id=\"foo-id\"), mock.Mock(id=\"bar-id\"))\n\n vtype_ctx = volume_types.VolumeTypeGenerator(self.context)\n vtype_ctx.setup()\n\n mock_service.create_volume_type.assert_has_calls(\n [mock.call(\"foo\"), mock.call(\"bar\")])\n self.assertEqual(self.context[\"volume_types\"],\n [{\"id\": \"foo-id\", \"name\": \"foo\"},\n {\"id\": \"bar-id\", \"name\": \"bar\"}])\n\n @mock.patch(\"%s.utils.make_name_matcher\" % CTX)\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup, mock_make_name_matcher):\n self.context.update({\n \"config\": {\"volume_types\": [\"foo\", \"bar\"]}})\n\n vtype_ctx = volume_types.VolumeTypeGenerator(self.context)\n\n vtype_ctx.cleanup()\n\n mock_cleanup.assert_called_once_with(\n names=[\"cinder.volume_types\"],\n admin=self.context[\"admin\"],\n superclass=mock_make_name_matcher.return_value,\n task_id=vtype_ctx.get_owner_id())\n\n mock_make_name_matcher.assert_called_once_with(\"foo\", \"bar\")\n" }, { "alpha_fraction": 0.5862318873405457, "alphanum_fraction": 0.5891304612159729, "avg_line_length": 37.87323760986328, "blob_id": "e62bc972e7577940cc50f6f21fccd0e2c37d413c", "content_id": "6ce01b5667d04782bd459ad4e921d66ca14f816f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5520, "license_type": "permissive", "max_line_length": 78, "num_lines": 142, "path": "/rally_openstack/task/scenario.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport functools\nimport random\n\nfrom osprofiler import profiler\nfrom rally.common import cfg\nfrom rally.common.plugin import plugin\nfrom rally.task import context\nfrom rally.task import scenario\n\nfrom rally_openstack.common import osclients\n\n\nconfigure = functools.partial(scenario.configure, platform=\"openstack\")\n\nCONF = cfg.CONF\n\n\[email protected]_default_context(\"users@openstack\", {})\[email protected]_meta(inherit=True)\nclass OpenStackScenario(scenario.Scenario):\n \"\"\"Base class for all OpenStack scenarios.\"\"\"\n\n def __init__(self, context=None, admin_clients=None, clients=None):\n super(OpenStackScenario, self).__init__(context)\n if context:\n if admin_clients is None and \"admin\" in context:\n self._admin_clients = osclients.Clients(\n context[\"admin\"][\"credential\"])\n if clients is None:\n if \"users\" in context and \"user\" not in context:\n self._choose_user(context)\n\n if \"user\" in context:\n self._clients = osclients.Clients(\n context[\"user\"][\"credential\"])\n\n if admin_clients:\n self._admin_clients = admin_clients\n\n if clients:\n self._clients = clients\n\n self._init_profiler(context)\n\n def _choose_user(self, context):\n \"\"\"Choose one user from users context\n\n We are choosing on each iteration one user\n\n \"\"\"\n if context[\"user_choice_method\"] == \"random\":\n user = random.choice(context[\"users\"])\n tenant = context[\"tenants\"][user[\"tenant_id\"]]\n else:\n # Second and last case - 'round_robin'.\n tenants_amount = len(context[\"tenants\"])\n # NOTE(amaretskiy): iteration is subtracted by `1' because it\n # starts from `1' but we count from `0'\n iteration = context[\"iteration\"] - 1\n tenant_index = int(iteration % tenants_amount)\n tenant_id = sorted(context[\"tenants\"].keys())[tenant_index]\n tenant = context[\"tenants\"][tenant_id]\n users = context[\"tenants\"][tenant_id][\"users\"]\n user_index = int((iteration / tenants_amount) % len(users))\n user = users[user_index]\n\n context[\"user\"], context[\"tenant\"] = user, tenant\n\n def clients(self, client_type, version=None):\n \"\"\"Returns a python openstack client of the requested type.\n\n Only one non-admin user is used per every run of scenario.\n\n :param client_type: Client type (\"nova\"/\"glance\" etc.)\n :param version: client version (\"1\"/\"2\" etc.)\n\n :returns: Standard python OpenStack client instance\n \"\"\"\n client = getattr(self._clients, client_type)\n\n return client(version) if version is not None else client()\n\n def admin_clients(self, client_type, version=None):\n \"\"\"Returns a python admin openstack client of the requested type.\n\n :param client_type: Client type (\"nova\"/\"glance\" etc.)\n :param version: client version (\"1\"/\"2\" etc.)\n\n :returns: Python openstack client object\n \"\"\"\n client = getattr(self._admin_clients, client_type)\n\n return client(version) if version is not None else client()\n\n def _init_profiler(self, context):\n \"\"\"Inits the profiler.\"\"\"\n if not CONF.openstack.enable_profiler:\n return\n\n # False statement here means that Scenario class is used outside the\n # runner as some kind of utils\n if context is not None and \"iteration\" in context:\n\n profiler_hmac_key = None\n profiler_conn_str = None\n if context.get(\"admin\"):\n cred = context[\"admin\"][\"credential\"]\n if cred.profiler_hmac_key is not None:\n profiler_hmac_key = cred.profiler_hmac_key\n profiler_conn_str = cred.profiler_conn_str\n if context.get(\"user\"):\n cred = context[\"user\"][\"credential\"]\n if cred.profiler_hmac_key is not None:\n profiler_hmac_key = cred.profiler_hmac_key\n profiler_conn_str = cred.profiler_conn_str\n if profiler_hmac_key is None:\n return\n profiler.init(profiler_hmac_key)\n trace_id = profiler.get().get_base_id()\n complete_data = {\"title\": \"OSProfiler Trace-ID\",\n \"chart_plugin\": \"OSProfiler\",\n \"data\": {\"trace_id\": trace_id,\n \"conn_str\": profiler_conn_str,\n \"taskID\": context[\"task\"][\"uuid\"],\n \"workload_uuid\": context[\"owner_id\"],\n \"iteration\": context[\"iteration\"]}}\n self.add_output(complete=complete_data)\n" }, { "alpha_fraction": 0.5695561170578003, "alphanum_fraction": 0.5757647752761841, "avg_line_length": 39.11259460449219, "blob_id": "e887f72869e070ef9b458ca29dc3293002f00c5d", "content_id": "b7b29b31e1ff0b7c8fbb0991a8321b08d92f8f79", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42038, "license_type": "permissive", "max_line_length": 79, "num_lines": 1048, "path": "/tests/unit/common/test_validators.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2017: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport ddt\nfrom unittest import mock\n\nfrom glanceclient import exc as glance_exc\nfrom novaclient import exceptions as nova_exc\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import validators\nfrom tests.unit import test\n\n\nPATH = \"rally_openstack.common.validators\"\n\n\ncontext = {\n \"admin\": mock.MagicMock(),\n \"users\": [mock.MagicMock()],\n}\n\nconfig = dict(args={\"image\": {\"id\": \"fake_id\",\n \"min_ram\": 10,\n \"size\": 1024 ** 3,\n \"min_disk\": 10.0 * (1024 ** 3),\n \"image_name\": \"foo_image\"},\n \"flavor\": {\"id\": \"fake_flavor_id\",\n \"name\": \"test\"},\n \"foo_image\": {\"id\": \"fake_image_id\"}\n },\n context={\"images\": {\"image_name\": \"foo_image\"},\n \"api_versions@openstack\": mock.MagicMock(),\n \"zones\": {\"set_zone_in_network\": True}}\n )\n\n\[email protected](\"rally_openstack.task.contexts.keystone.roles.RoleGenerator\")\ndef test_with_roles_ctx(mock_role_generator):\n\n @validators.with_roles_ctx()\n def func(config, context):\n pass\n\n config = {\"contexts\": {}}\n context = {\"admin\": {\"credential\": mock.MagicMock()},\n \"task\": mock.MagicMock()}\n func(config, context)\n mock_role_generator().setup.assert_not_called()\n\n config = {\"contexts\": {\"roles\": \"admin\"}}\n func(config, context)\n mock_role_generator().setup.assert_called_once_with()\n\n\nclass RequiredOpenStackValidatorTestCase(test.TestCase):\n def validate(self):\n validator = validators.RequiredOpenStackValidator(admin=True)\n validator.validate(\n {\"platforms\": {\"openstack\": {\"admin\": \"foo\"}}}, {}, None, None)\n\n validator = validators.RequiredOpenStackValidator(users=True)\n validator.validate(\n {\"platforms\": {\"openstack\": {\"admin\": \"foo\"}}}, {}, None, None)\n\n validator = validators.RequiredOpenStackValidator(users=True)\n validator.validate(\n {\"platforms\": {\"openstack\": {\"users\": [\"foo\"]}}}, {}, None, None)\n\n def test_validate_failed(self):\n # case #1: wrong configuration of validator\n validator = validators.RequiredOpenStackValidator()\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, {}, {}, None, None)\n self.assertEqual(\n \"You should specify admin=True or users=True or both.\",\n e.message)\n\n # case #2: admin is not present\n validator = validators.RequiredOpenStackValidator(admin=True)\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate,\n {\"platforms\": {\"openstack\": {}}}, {}, None, None)\n self.assertEqual(\"No admin credentials for openstack\",\n e.message)\n\n # case #3: users are not present\n validator = validators.RequiredOpenStackValidator(users=True)\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate,\n {\"platforms\": {\"openstack\": {}}}, {}, None, None)\n self.assertEqual(\"No user credentials for openstack\",\n e.message)\n\n\[email protected]\nclass ImageExistsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(ImageExistsValidatorTestCase, self).setUp()\n self.validator = validators.ImageExistsValidator(\"image\", True)\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n @ddt.unpack\n @ddt.data(\n {\"param_name\": \"fake_param\", \"nullable\": True, \"err_msg\": None},\n {\"param_name\": \"fake_param\", \"nullable\": False,\n \"err_msg\": \"Parameter fake_param is not specified.\"},\n {\"param_name\": \"image\", \"nullable\": True, \"err_msg\": None},\n )\n def test_validator(self, param_name, nullable, err_msg, ex=False):\n validator = validators.ImageExistsValidator(param_name,\n nullable)\n\n clients = self.context[\"users\"][0].clients.return_value\n\n clients.glance().images.get = mock.Mock()\n if ex:\n clients.glance().images.get.side_effect = ex\n\n if err_msg:\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n self.assertEqual(err_msg, e.message)\n else:\n result = validator.validate(self.config, self.context, None,\n None)\n self.assertIsNone(result)\n\n def test_validator_image_from_context(self):\n config = {\n \"args\": {\"image\": {\"regex\": r\"^foo$\"}},\n \"contexts\": {\"images\": {\"image_name\": \"foo\"}}}\n\n self.validator.validate(self.context, config, None, None)\n\n @mock.patch(\"%s.openstack_types.GlanceImage\" % PATH)\n def test_validator_image_not_in_context(self, mock_glance_image):\n mock_glance_image.return_value.pre_process.return_value = \"image_id\"\n config = {\n \"args\": {\"image\": \"fake_image\"},\n \"contexts\": {\n \"images\": {\"fake_image_name\": \"foo\"}}}\n\n clients = self.context[\n \"users\"][0][\"credential\"].clients.return_value\n clients.glance().images.get = mock.Mock()\n\n result = self.validator.validate(self.context, config, None, None)\n self.assertIsNone(result)\n\n mock_glance_image.assert_called_once_with(\n context={\"admin\": {\n \"credential\": self.context[\"users\"][0][\"credential\"]}})\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n config[\"args\"][\"image\"], config={})\n clients.glance().images.get.assert_called_with(\"image_id\")\n\n exs = [exceptions.InvalidScenarioArgument(),\n glance_exc.HTTPNotFound()]\n for ex in exs:\n clients.glance().images.get.side_effect = ex\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, self.context, config, None, None)\n\n self.assertEqual(\"Image 'fake_image' not found\", e.message)\n\n\[email protected]\nclass ExternalNetworkExistsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(ExternalNetworkExistsValidatorTestCase, self).setUp()\n self.validator = validators.ExternalNetworkExistsValidator(\"net\")\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n @ddt.unpack\n @ddt.data(\n {\"foo_conf\": {}},\n {\"foo_conf\": {\"args\": {\"net\": \"custom\"}}},\n {\"foo_conf\": {\"args\": {\"net\": \"non_exist\"}},\n \"err_msg\": \"External (floating) network with name non_exist\"\n \" not found by user {}. Available networks:\"\n \" [{}, {}]\"},\n {\"foo_conf\": {\"args\": {\"net\": \"custom\"}},\n \"net1_name\": {\"name\": {\"net\": \"public\"}},\n \"net2_name\": {\"name\": {\"net\": \"custom\"}},\n \"err_msg\": \"External (floating) network with name custom\"\n \" not found by user {}. Available networks:\"\n \" [{}, {}]\"}\n )\n def test_validator(self, foo_conf, net1_name=\"public\", net2_name=\"custom\",\n err_msg=\"\"):\n\n user = self.context[\"users\"][0]\n\n net1 = {\"name\": net1_name, \"router:external\": True}\n net2 = {\"name\": net2_name, \"router:external\": True}\n\n user[\"credential\"].clients().neutron().list_networks.return_value = {\n \"networks\": [net1, net2]}\n if err_msg:\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, self.context, foo_conf,\n None, None)\n self.assertEqual(\n err_msg.format(user[\"credential\"].username, net1, net2),\n e.message)\n else:\n result = self.validator.validate(self.context, foo_conf,\n None, None)\n self.assertIsNone(result, \"Unexpected result '%s'\" % result)\n\n\[email protected]\nclass RequiredNeutronExtensionsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(RequiredNeutronExtensionsValidatorTestCase, self).setUp()\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n def test_validator(self):\n validator = validators.RequiredNeutronExtensionsValidator(\n \"existing_extension\")\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.neutron().list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"existing_extension\"}]}\n\n validator.validate(self.context, {}, None, None)\n\n def test_validator_failed(self):\n err_msg = \"Neutron extension absent_extension is not configured\"\n validator = validators.RequiredNeutronExtensionsValidator(\n \"absent_extension\")\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.neutron().list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"existing_extension\"}]}\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None)\n self.assertEqual(err_msg, e.message)\n\n\nclass FlavorExistsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(FlavorExistsValidatorTestCase, self).setUp()\n self.validator = validators.FlavorExistsValidator(\n param_name=\"foo_flavor\")\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n def test__get_validated_flavor_wrong_value_in_config(self):\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_validated_flavor, self.config,\n mock.MagicMock(), \"foo_flavor\")\n self.assertEqual(\"Parameter foo_flavor is not specified.\",\n e.message)\n\n @mock.patch(\"%s.openstack_types.Flavor\" % PATH)\n def test__get_validated_flavor(self, mock_flavor):\n mock_flavor.return_value.pre_process.return_value = \"flavor_id\"\n\n clients = mock.Mock()\n clients.nova().flavors.get.return_value = \"flavor\"\n\n result = self.validator._get_validated_flavor(self.config,\n clients,\n \"flavor\")\n self.assertEqual(\"flavor\", result)\n\n mock_flavor.assert_called_once_with(\n context={\"admin\": {\"credential\": clients.credential}}\n )\n mock_flavor_obj = mock_flavor.return_value\n mock_flavor_obj.pre_process.assert_called_once_with(\n self.config[\"args\"][\"flavor\"], config={})\n clients.nova().flavors.get.assert_called_once_with(flavor=\"flavor_id\")\n mock_flavor_obj.pre_process.reset_mock()\n\n clients.side_effect = exceptions.InvalidScenarioArgument(\"\")\n result = self.validator._get_validated_flavor(\n self.config, clients, \"flavor\")\n self.assertEqual(\"flavor\", result)\n mock_flavor_obj.pre_process.assert_called_once_with(\n self.config[\"args\"][\"flavor\"], config={})\n clients.nova().flavors.get.assert_called_with(flavor=\"flavor_id\")\n\n @mock.patch(\"%s.openstack_types.Flavor\" % PATH)\n def test__get_validated_flavor_not_found(self, mock_flavor):\n mock_flavor.return_value.pre_process.return_value = \"flavor_id\"\n\n clients = mock.MagicMock()\n clients.nova().flavors.get.side_effect = nova_exc.NotFound(\"\")\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_validated_flavor,\n self.config, clients, \"flavor\")\n self.assertEqual(\"Flavor '%s' not found\" %\n self.config[\"args\"][\"flavor\"],\n e.message)\n mock_flavor_obj = mock_flavor.return_value\n mock_flavor_obj.pre_process.assert_called_once_with(\n self.config[\"args\"][\"flavor\"], config={})\n\n @mock.patch(\"%s.types.obj_from_name\" % PATH)\n @mock.patch(\"%s.flavors_ctx.FlavorConfig\" % PATH)\n def test__get_flavor_from_context(self, mock_flavor_config,\n mock_obj_from_name):\n config = {\n \"contexts\": {\"images\": {\"fake_parameter_name\": \"foo_image\"}}}\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_flavor_from_context,\n config, \"foo_flavor\")\n self.assertEqual(\"No flavors context\", e.message)\n\n config = {\"contexts\": {\"images\": {\"fake_parameter_name\": \"foo_image\"},\n \"flavors\": [{\"flavor1\": \"fake_flavor1\"}]}}\n result = self.validator._get_flavor_from_context(config, \"foo_flavor\")\n self.assertEqual(\"<context flavor: %s>\" % result.name, result.id)\n\n def test_validate(self):\n expected_e = validators.validation.ValidationError(\"fpp\")\n self.validator._get_validated_flavor = mock.Mock(\n side_effect=expected_e)\n\n config = {}\n ctx = mock.MagicMock()\n actual_e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, ctx, config, None, None)\n self.assertEqual(expected_e, actual_e)\n self.validator._get_validated_flavor.assert_called_once_with(\n config=config,\n clients=ctx[\"users\"][0][\"credential\"].clients(),\n param_name=self.validator.param_name)\n\n\[email protected]\nclass ImageValidOnFlavorValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(ImageValidOnFlavorValidatorTestCase, self).setUp()\n self.validator = validators.ImageValidOnFlavorValidator(\"foo_flavor\",\n \"image\")\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n @ddt.data(\n {\"validate_disk\": True, \"flavor_disk\": True},\n {\"validate_disk\": False, \"flavor_disk\": True},\n {\"validate_disk\": False, \"flavor_disk\": False}\n )\n @ddt.unpack\n def test_validate(self, validate_disk, flavor_disk):\n validator = validators.ImageValidOnFlavorValidator(\n flavor_param=\"foo_flavor\",\n image_param=\"image\",\n fail_on_404_image=False,\n validate_disk=validate_disk)\n\n min_ram = 2048\n disk = 10\n fake_image = {\"min_ram\": min_ram,\n \"size\": disk * (1024 ** 3),\n \"min_disk\": disk}\n fake_flavor = mock.Mock(disk=None, ram=min_ram * 2)\n if flavor_disk:\n fake_flavor.disk = disk * 2\n\n validator._get_validated_flavor = mock.Mock(\n return_value=fake_flavor)\n\n # case 1: no image, but it is ok, since fail_on_404_image is False\n validator._get_validated_image = mock.Mock(\n side_effect=validators.validation.ValidationError(\"!!!\"))\n validator.validate(self.context, {}, None, None)\n\n # case 2: there is an image\n validator._get_validated_image = mock.Mock(\n return_value=fake_image)\n validator.validate(self.context, {}, None, None)\n\n # case 3: check caching of the flavor\n self.context[\"users\"].append(self.context[\"users\"][0])\n validator._get_validated_image.reset_mock()\n validator._get_validated_flavor.reset_mock()\n\n validator.validate(self.context, {}, None, None)\n\n self.assertEqual(1, validator._get_validated_flavor.call_count)\n self.assertEqual(2, validator._get_validated_image.call_count)\n\n def test_validate_failed(self):\n validator = validators.ImageValidOnFlavorValidator(\n flavor_param=\"foo_flavor\",\n image_param=\"image\",\n fail_on_404_image=True,\n validate_disk=True)\n\n min_ram = 2048\n disk = 10\n fake_flavor = mock.Mock(disk=disk, ram=min_ram)\n fake_flavor.id = \"flavor_id\"\n\n validator._get_validated_flavor = mock.Mock(\n return_value=fake_flavor)\n\n # case 1: there is no image and fail_on_404_image flag is True\n expected_e = validators.validation.ValidationError(\"!!!\")\n validator._get_validated_image = mock.Mock(\n side_effect=expected_e)\n actual_e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None\n )\n self.assertEqual(expected_e, actual_e)\n\n # case 2: there is no right flavor\n expected_e = KeyError(\"Ooops\")\n validator._get_validated_flavor.side_effect = expected_e\n actual_e = self.assertRaises(\n KeyError,\n validator.validate, self.context, {}, None, None\n )\n self.assertEqual(expected_e, actual_e)\n\n # case 3: ram of a flavor is less than min_ram of an image\n validator._get_validated_flavor = mock.Mock(\n return_value=fake_flavor)\n\n fake_image = {\"min_ram\": min_ram * 2, \"id\": \"image_id\"}\n validator._get_validated_image = mock.Mock(\n return_value=fake_image)\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None\n )\n self.assertEqual(\n \"The memory size for flavor 'flavor_id' is too small for \"\n \"requested image 'image_id'.\", e.message)\n\n # case 4: disk of a flavor is less than size of an image\n fake_image = {\"min_ram\": min_ram / 2.0,\n \"size\": disk * (1024 ** 3) * 3,\n \"id\": \"image_id\"}\n validator._get_validated_image = mock.Mock(\n return_value=fake_image)\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None\n )\n self.assertEqual(\n \"The disk size for flavor 'flavor_id' is too small for \"\n \"requested image 'image_id'.\", e.message)\n\n # case 5: disk of a flavor is less than size of an image\n fake_image = {\"min_ram\": min_ram,\n \"size\": disk * (1024 ** 3),\n \"min_disk\": disk * 2,\n \"id\": \"image_id\"}\n validator._get_validated_image = mock.Mock(\n return_value=fake_image)\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None\n )\n self.assertEqual(\n \"The minimal disk size for flavor 'flavor_id' is too small for \"\n \"requested image 'image_id'.\", e.message)\n\n # case 6: _get_validated_image raises an unexpected error,\n # fail_on_404_image=False should not work in this case\n expected_e = KeyError(\"Foo!\")\n validator = validators.ImageValidOnFlavorValidator(\n flavor_param=\"foo_flavor\",\n image_param=\"image\",\n fail_on_404_image=False,\n validate_disk=True)\n validator._get_validated_image = mock.Mock(\n side_effect=expected_e)\n validator._get_validated_flavor = mock.Mock()\n\n actual_e = self.assertRaises(\n KeyError,\n validator.validate, self.context, {}, None, None\n )\n\n self.assertEqual(expected_e, actual_e)\n\n @mock.patch(\"%s.openstack_types.GlanceImage\" % PATH)\n def test__get_validated_image(self, mock_glance_image):\n mock_glance_image.return_value.pre_process.return_value = \"image_id\"\n image = {\n \"size\": 0,\n \"min_ram\": 0,\n \"min_disk\": 0\n }\n # Get image name from context\n result = self.validator._get_validated_image({\n \"args\": {\n \"image\": {\"regex\": r\"^foo$\"}},\n \"contexts\": {\n \"images\": {\"image_name\": \"foo\"}}},\n mock.Mock(), \"image\")\n self.assertEqual(image, result)\n\n clients = mock.Mock()\n clients.glance().images.get().to_dict.return_value = {\n \"image\": \"image_id\"}\n image[\"image\"] = \"image_id\"\n\n result = self.validator._get_validated_image(self.config,\n clients,\n \"image\")\n self.assertEqual(image, result)\n mock_glance_image.assert_called_once_with(\n context={\"admin\": {\"credential\": clients.credential}})\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n config[\"args\"][\"image\"], config={})\n clients.glance().images.get.assert_called_with(\"image_id\")\n\n @mock.patch(\"%s.openstack_types.GlanceImage\" % PATH)\n def test__get_validated_image_incorrect_param(self, mock_glance_image):\n mock_glance_image.return_value.pre_process.return_value = \"image_id\"\n # Wrong 'param_name'\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_validated_image, self.config,\n mock.Mock(), \"fake_param\")\n self.assertEqual(\"Parameter fake_param is not specified.\",\n e.message)\n\n # 'image_name' is not in 'image_context'\n image = {\"id\": \"image_id\", \"size\": 1024,\n \"min_ram\": 256, \"min_disk\": 512}\n\n clients = mock.Mock()\n clients.glance().images.get().to_dict.return_value = image\n config = {\"args\": {\"image\": \"foo_image\",\n \"context\": {\"images\": {\n \"fake_parameter_name\": \"foo_image\"}\n }}\n }\n result = self.validator._get_validated_image(config, clients, \"image\")\n self.assertEqual(image, result)\n\n mock_glance_image.assert_called_once_with(\n context={\"admin\": {\"credential\": clients.credential}})\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n config[\"args\"][\"image\"], config={})\n clients.glance().images.get.assert_called_with(\"image_id\")\n\n @mock.patch(\"%s.openstack_types.GlanceImage\" % PATH)\n def test__get_validated_image_exceptions(self, mock_glance_image):\n mock_glance_image.return_value.pre_process.return_value = \"image_id\"\n clients = mock.Mock()\n clients.glance().images.get.side_effect = glance_exc.HTTPNotFound(\"\")\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_validated_image,\n config, clients, \"image\")\n self.assertEqual(\"Image '%s' not found\" % config[\"args\"][\"image\"],\n e.message)\n\n mock_glance_image.assert_called_once_with(\n context={\"admin\": {\"credential\": clients.credential}})\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n config[\"args\"][\"image\"], config={})\n clients.glance().images.get.assert_called_with(\"image_id\")\n mock_glance_image.return_value.pre_process.reset_mock()\n\n clients.side_effect = exceptions.InvalidScenarioArgument(\"\")\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator._get_validated_image, config, clients, \"image\")\n self.assertEqual(\"Image '%s' not found\" % config[\"args\"][\"image\"],\n e.message)\n mock_glance_image.return_value.pre_process.assert_called_once_with(\n config[\"args\"][\"image\"], config={})\n clients.glance().images.get.assert_called_with(\"image_id\")\n\n\nclass RequiredServicesValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(RequiredServicesValidatorTestCase, self).setUp()\n self.validator = validators.RequiredServicesValidator([\n consts.Service.KEYSTONE,\n consts.Service.NOVA])\n self.config = config\n self.context = context\n\n def test_validator(self):\n\n self.config[\"context\"][\"api_versions@openstack\"].get = mock.Mock(\n return_value={consts.Service.KEYSTONE: \"service_type\"})\n\n clients = self.context[\"admin\"].get(\"credential\").clients()\n\n clients.services().values.return_value = [\n consts.Service.KEYSTONE, consts.Service.NOVA,\n consts.Service.NOVA_NET]\n fake_service = mock.Mock(binary=\"nova-network\", status=\"enabled\")\n clients.nova.services.list.return_value = [fake_service]\n result = self.validator.validate(self.context, self.config,\n None, None)\n self.assertIsNone(result)\n\n fake_service = mock.Mock(binary=\"keystone\", status=\"enabled\")\n clients.nova.services.list.return_value = [fake_service]\n result = self.validator.validate(self.context, self.config,\n None, None)\n self.assertIsNone(result)\n\n fake_service = mock.Mock(binary=\"nova-network\", status=\"disabled\")\n clients.nova.services.list.return_value = [fake_service]\n result = self.validator.validate(self.context, self.config,\n None, None)\n self.assertIsNone(result)\n\n def test_validator_wrong_service(self):\n\n self.config[\"context\"][\"api_versions@openstack\"].get = mock.Mock(\n return_value={consts.Service.KEYSTONE: \"service_type\",\n consts.Service.NOVA: \"service_name\"})\n\n clients = self.context[\"admin\"].get(\"credential\").clients()\n clients.services().values.return_value = [\n consts.Service.KEYSTONE, consts.Service.NOVA]\n\n validator = validators.RequiredServicesValidator([\n consts.Service.KEYSTONE,\n consts.Service.NOVA, \"lol\"])\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, {}, None, None)\n expected_msg = (\"'{0}' service is not available. Hint: If '{0}'\"\n \" service has non-default service_type, try to setup\"\n \" it via 'api_versions@openstack' context.\"\n ).format(\"lol\")\n self.assertEqual(expected_msg, e.message)\n\n\[email protected]\nclass ValidateHeatTemplateValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(ValidateHeatTemplateValidatorTestCase, self).setUp()\n self.validator = validators.ValidateHeatTemplateValidator(\n \"template_path1\", \"template_path2\")\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n @ddt.data(\n {\"exception_msg\": \"Heat template validation failed on fake_path1. \"\n \"Original error message: fake_msg.\"},\n {\"exception_msg\": None}\n )\n @ddt.unpack\n @mock.patch(\"%s.os.path.exists\" % PATH,\n return_value=True)\n @mock.patch(\"rally_openstack.common.validators.open\",\n side_effect=mock.mock_open(), create=True)\n def test_validate(self, mock_open, mock_exists, exception_msg):\n clients = self.context[\"users\"][0][\"credential\"].clients()\n mock_open().__enter__().read.side_effect = [\"fake_template1\",\n \"fake_template2\"]\n heat_validator = mock.MagicMock()\n if exception_msg:\n heat_validator.side_effect = Exception(\"fake_msg\")\n clients.heat().stacks.validate = heat_validator\n context = {\"args\": {\"template_path1\": \"fake_path1\",\n \"template_path2\": \"fake_path2\"}}\n if not exception_msg:\n result = self.validator.validate(self.context, context, None, None)\n\n heat_validator.assert_has_calls([\n mock.call(template=\"fake_template1\"),\n mock.call(template=\"fake_template2\")\n ])\n mock_open.assert_has_calls([\n mock.call(\"fake_path1\", \"r\"),\n mock.call(\"fake_path2\", \"r\")\n ], any_order=True)\n self.assertIsNone(result)\n else:\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, self.context, context, None, None)\n heat_validator.assert_called_once_with(\n template=\"fake_template1\")\n self.assertEqual(\n \"Heat template validation failed on fake_path1.\"\n \" Original error message: fake_msg.\", e.message)\n\n def test_validate_missed_params(self):\n validator = validators.ValidateHeatTemplateValidator(\n params=\"fake_param\")\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n\n expected_msg = (\"Path to heat template is not specified. Its needed \"\n \"for heat template validation. Please check the \"\n \"content of `fake_param` scenario argument.\")\n self.assertEqual(expected_msg, e.message)\n\n @mock.patch(\"%s.os.path.exists\" % PATH,\n return_value=False)\n def test_validate_file_not_found(self, mock_exists):\n config = {\"args\": {\"template_path1\": \"fake_path1\",\n \"template_path2\": \"fake_path2\"}}\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, self.context, config, None, None)\n expected_msg = \"No file found by the given path fake_path1\"\n self.assertEqual(expected_msg, e.message)\n\n\nclass RequiredCinderServicesValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(RequiredCinderServicesValidatorTestCase, self).setUp()\n self.context = copy.deepcopy(context)\n self.config = copy.deepcopy(config)\n\n def test_validate(self):\n validator = validators.RequiredCinderServicesValidator(\n \"cinder_service\")\n\n fake_service = mock.Mock(binary=\"cinder_service\", state=\"up\")\n clients = self.context[\"admin\"][\"credential\"].clients()\n clients.cinder().services.list.return_value = [fake_service]\n result = validator.validate(self.context, self.config, None, None)\n self.assertIsNone(result)\n\n fake_service.state = \"down\"\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n self.assertEqual(\"cinder_service service is not available\",\n e.message)\n\n\[email protected]\nclass RequiredAPIVersionsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(RequiredAPIVersionsValidatorTestCase, self).setUp()\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n def _get_keystone_v2_mock_client(self):\n keystone = mock.Mock()\n del keystone.projects\n keystone.tenants = mock.Mock()\n return keystone\n\n def _get_keystone_v3_mock_client(self):\n keystone = mock.Mock()\n del keystone.tenants\n keystone.projects = mock.Mock()\n return keystone\n\n def test_validate(self):\n validator = validators.RequiredAPIVersionsValidator(\"keystone\",\n [2.0, 3])\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.keystone.return_value = self._get_keystone_v3_mock_client()\n validator.validate(self.context, self.config, None, None)\n\n clients.keystone.return_value = self._get_keystone_v2_mock_client()\n validator.validate(self.context, self.config, None, None)\n\n def test_validate_with_keystone_v2(self):\n validator = validators.RequiredAPIVersionsValidator(\"keystone\",\n [2.0])\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n clients.keystone.return_value = self._get_keystone_v2_mock_client()\n validator.validate(self.context, self.config, None, None)\n\n clients.keystone.return_value = self._get_keystone_v3_mock_client()\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n self.assertEqual(\"Task was designed to be used with keystone V2.0, \"\n \"but V3 is selected.\", e.message)\n\n def test_validate_with_keystone_v3(self):\n validator = validators.RequiredAPIVersionsValidator(\"keystone\",\n [3])\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n clients.keystone.return_value = self._get_keystone_v3_mock_client()\n validator.validate(self.context, self.config, None, None)\n\n clients.keystone.return_value = self._get_keystone_v2_mock_client()\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n self.assertEqual(\"Task was designed to be used with keystone V3, \"\n \"but V2.0 is selected.\", e.message)\n\n @ddt.unpack\n @ddt.data(\n {\"nova\": 2, \"versions\": [2], \"err_msg\": None},\n {\"nova\": 3, \"versions\": [2],\n \"err_msg\": \"Task was designed to be used with nova V2, \"\n \"but V3 is selected.\"},\n {\"nova\": None, \"versions\": [2],\n \"err_msg\": \"Unable to determine the API version.\"},\n {\"nova\": 2, \"versions\": [2, 3], \"err_msg\": None},\n {\"nova\": 4, \"versions\": [2, 3],\n \"err_msg\": \"Task was designed to be used with nova V2, 3, \"\n \"but V4 is selected.\"}\n )\n def test_validate_nova(self, nova, versions, err_msg):\n validator = validators.RequiredAPIVersionsValidator(\"nova\",\n versions)\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.nova.choose_version.return_value = nova\n config = {\"contexts\": {\"api_versions@openstack\": {}}}\n\n if err_msg:\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, config, None, None)\n self.assertEqual(err_msg, e.message)\n else:\n result = validator.validate(self.context, config, None, None)\n self.assertIsNone(result)\n\n @ddt.unpack\n @ddt.data({\"version\": 2, \"err_msg\": None},\n {\"version\": 3, \"err_msg\": \"Task was designed to be used with \"\n \"nova V3, but V2 is selected.\"})\n def test_validate_context(self, version, err_msg):\n validator = validators.RequiredAPIVersionsValidator(\"nova\",\n [version])\n\n config = {\n \"contexts\": {\"api_versions@openstack\": {\"nova\": {\"version\": 2}}}}\n\n if err_msg:\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, config, None, None)\n self.assertEqual(err_msg, e.message)\n else:\n result = validator.validate(self.context, config, None, None)\n self.assertIsNone(result)\n\n\nclass VolumeTypeExistsValidatorTestCase(test.TestCase):\n\n def setUp(self):\n super(VolumeTypeExistsValidatorTestCase, self).setUp()\n self.validator = validators.VolumeTypeExistsValidator(\"volume_type\",\n True)\n self.config = copy.deepcopy(config)\n self.context = copy.deepcopy(context)\n\n def test_validator_without_ctx(self):\n validator = validators.VolumeTypeExistsValidator(\"fake_param\",\n nullable=True)\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.cinder().volume_types.list.return_value = [mock.MagicMock()]\n\n result = validator.validate(self.context, self.config, None, None)\n self.assertIsNone(result, \"Unexpected result\")\n\n def test_validator_without_ctx_failed(self):\n validator = validators.VolumeTypeExistsValidator(\"fake_param\",\n nullable=False)\n\n clients = self.context[\"users\"][0][\"credential\"].clients()\n\n clients.cinder().volume_types.list.return_value = [mock.MagicMock()]\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, self.context, self.config, None, None)\n self.assertEqual(\n \"The parameter 'fake_param' is required and should not be empty.\",\n e.message)\n\n def test_validate_with_ctx(self):\n clients = self.context[\"users\"][0][\"credential\"].clients()\n clients.cinder().volume_types.list.return_value = []\n ctx = {\"args\": {\"volume_type\": \"fake_type\"},\n \"contexts\": {\"volume_types\": [\"fake_type\"]}}\n result = self.validator.validate(self.context, ctx, None, None)\n\n self.assertIsNone(result)\n\n def test_validate_with_ctx_failed(self):\n clients = self.context[\"users\"][0][\"credential\"].clients()\n clients.cinder().volume_types.list.return_value = []\n config = {\"args\": {\"volume_type\": \"fake_type\"},\n \"contexts\": {\"volume_types\": [\"fake_type_2\"]}}\n e = self.assertRaises(\n validators.validation.ValidationError,\n self.validator.validate, self.context, config, None, None)\n\n err_msg = (\"Specified volume type fake_type not found for user {}. \"\n \"List of available types: ['fake_type_2']\")\n fake_user = self.context[\"users\"][0]\n self.assertEqual(err_msg.format(fake_user), e.message)\n\n\[email protected]\nclass WorkbookContainsWorkflowValidatorTestCase(test.TestCase):\n\n @mock.patch(\"%s.yaml.safe_load\" % PATH)\n @mock.patch(\"%s.os.access\" % PATH)\n @mock.patch(\"%s.open\" % PATH)\n def test_validator(self, mock_open, mock_access, mock_safe_load):\n mock_safe_load.return_value = {\n \"version\": \"2.0\",\n \"name\": \"wb\",\n \"workflows\": {\n \"wf1\": {\n \"type\": \"direct\",\n \"tasks\": {\n \"t1\": {\n \"action\": \"std.noop\"\n }\n }\n }\n }\n }\n validator = validators.WorkbookContainsWorkflowValidator(\n workbook_param=\"definition\", workflow_param=\"workflow_name\")\n\n config = {\n \"args\": {\n \"definition\": \"fake_path1\",\n \"workflow_name\": \"wf1\"\n }\n }\n\n result = validator.validate(None, config, None, None)\n self.assertIsNone(result)\n\n self.assertEqual(1, mock_open.called)\n self.assertEqual(1, mock_access.called)\n self.assertEqual(1, mock_safe_load.called)\n\n\[email protected]\nclass RequiredContextConfigValidatorTestCase(test.TestCase):\n\n def test_validator(self):\n validator = validators.RequiredContextConfigValidator(\n context_name=\"zones\",\n context_config={\"set_zone_in_network\": True})\n cfg = {\n \"contexts\": {\n \"users\": {\n \"tenants\": 1, \"users_per_tenant\": 1\n },\n \"network\": {\n \"dns_nameservers\": [\"8.8.8.8\", \"192.168.210.45\"]\n },\n \"zones\": {\"set_zone_in_network\": True}\n },\n }\n validator.validate({}, cfg, None, None)\n\n def test_validator_context_not_in_contexts(self):\n validator = validators.RequiredContextConfigValidator(\n context_name=\"zones\",\n context_config={\"set_zone_in_network\": True})\n cfg = {\n \"contexts\": {\n \"users\": {\n \"tenants\": 1, \"users_per_tenant\": 1\n },\n \"network\": {\n \"dns_nameservers\": [\"8.8.8.8\", \"192.168.210.45\"]\n },\n },\n }\n validator.validate({}, cfg, None, None)\n\n def test_validator_failed(self):\n validator = validators.RequiredContextConfigValidator(\n context_name=\"zones\",\n context_config={\"set_zone_in_network\": True})\n cfg = {\n \"contexts\": {\n \"users\": {\n \"tenants\": 1, \"users_per_tenant\": 1\n },\n \"network\": {\n \"dns_nameservers\": [\"8.8.8.8\", \"192.168.210.45\"]\n },\n \"zones\": {\"set_zone_in_network\": False}\n },\n }\n\n e = self.assertRaises(\n validators.validation.ValidationError,\n validator.validate, {}, cfg, None, None)\n self.assertEqual(\n \"The 'zones' context expects '{'set_zone_in_network': True}'\",\n e.message)\n" }, { "alpha_fraction": 0.6014235019683838, "alphanum_fraction": 0.6045867800712585, "avg_line_length": 39.790321350097656, "blob_id": "4b5d1217ef9575c9284e2ecb04f7bf484c7b9b85", "content_id": "fac02b4e929ef150e8d777c0b6d5d889ef8418d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5058, "license_type": "permissive", "max_line_length": 78, "num_lines": 124, "path": "/tests/functional/test_cli_deployment.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport json\nimport re\n\nimport testtools\n\nfrom tests.functional import utils\n\n\nTEST_ENV = {\n \"OS_USERNAME\": \"admin\",\n \"OS_PASSWORD\": \"admin\",\n \"OS_TENANT_NAME\": \"admin\",\n \"OS_AUTH_URL\": \"http://fake/\",\n}\n\n\nclass DeploymentTestCase(testtools.TestCase):\n\n def test_create_fromenv_list_show(self):\n # NOTE(andreykurilin): `rally deployment create --fromenv` is\n # hardcoded to OpenStack. Should be fixed as soon as the platforms\n # will be introduced.\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n\n rally(\"deployment create --name t_create_env --fromenv\")\n self.assertIn(\"t_create_env\", rally(\"deployment list\"))\n self.assertIn(TEST_ENV[\"OS_AUTH_URL\"],\n rally(\"deployment show\"))\n\n def test_create_fromfile(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"deployment create --name t_create_env --fromenv\")\n existing_conf = rally(\"deployment config\", getjson=True)\n with open(\"/tmp/.tmp.deployment\", \"w\") as f:\n f.write(json.dumps(existing_conf))\n rally(\"deployment create --name t_create_file \"\n \"--filename /tmp/.tmp.deployment\")\n self.assertIn(\"t_create_file\", rally(\"deployment list\"))\n\n def test_destroy(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"deployment create --name t_create_env --fromenv\")\n self.assertIn(\"t_create_env\", rally(\"deployment list\"))\n rally(\"deployment destroy\")\n self.assertNotIn(\"t_create_env\", rally(\"deployment list\"))\n\n def test_check_success(self):\n rally = utils.Rally()\n rally(\"deployment check\")\n\n def test_check_fail(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"deployment create --name t_create_env --fromenv\")\n self.assertRaises(utils.RallyCliError, rally, \"deployment check\")\n\n def test_check_debug(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"deployment create --name t_create_env --fromenv\")\n config = rally(\"deployment config\", getjson=True)\n config[\"openstack\"][\"admin\"][\"password\"] = \"fakepassword\"\n file = utils.JsonTempFile(config)\n rally(\"deployment create --name t_create_file_debug \"\n \"--filename %s\" % file.filename)\n self.assertIn(\"t_create_file_debug\", rally(\"deployment list\"))\n self.assertEqual(config, rally(\"deployment config\", getjson=True))\n e = self.assertRaises(utils.RallyCliError, rally,\n \"--debug deployment check\")\n self.assertIn(\n \"AuthenticationFailed: Could not find versioned identity \"\n \"endpoints when attempting to authenticate.\",\n e.output)\n\n def test_use(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n output = rally(\n \"deployment create --name t_create_env1 --fromenv\")\n uuid = re.search(r\"Using deployment: (?P<uuid>[0-9a-f\\-]{36})\",\n output).group(\"uuid\")\n rally(\"deployment create --name t_create_env2 --fromenv\")\n rally(\"deployment use --deployment %s\" % uuid)\n current_deployment = utils.get_global(\"RALLY_DEPLOYMENT\",\n rally.env)\n self.assertEqual(uuid, current_deployment)\n\n def test_create_from_env_openstack_deployment(self):\n rally = utils.Rally()\n rally.env.update(TEST_ENV)\n rally(\"deployment create --name t_create_env --fromenv\")\n config = rally(\"deployment config\", getjson=True)\n self.assertIn(\"openstack\", config)\n self.assertEqual(TEST_ENV[\"OS_USERNAME\"],\n config[\"openstack\"][\"admin\"][\"username\"])\n self.assertEqual(TEST_ENV[\"OS_PASSWORD\"],\n config[\"openstack\"][\"admin\"][\"password\"])\n if \"project_name\" in config[\"openstack\"][\"admin\"]:\n # keystone v3\n self.assertEqual(TEST_ENV[\"OS_TENANT_NAME\"],\n config[\"openstack\"][\"admin\"][\"project_name\"])\n else:\n # keystone v2\n self.assertEqual(TEST_ENV[\"OS_TENANT_NAME\"],\n config[\"openstack\"][\"admin\"][\"tenant_name\"])\n self.assertEqual(TEST_ENV[\"OS_AUTH_URL\"],\n config[\"openstack\"][\"auth_url\"])\n" }, { "alpha_fraction": 0.6066572666168213, "alphanum_fraction": 0.6087669730186462, "avg_line_length": 28.832168579101562, "blob_id": "0606da49806c5920320e566d594ea63acf4c0a48", "content_id": "a0981d40523a7fd09a0b5d502deb9747104bfb52", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4266, "license_type": "permissive", "max_line_length": 79, "num_lines": 143, "path": "/tests/ci/playbooks/roles/prepare-for-rally-task/library/make_env_spec_with_existing_users.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\nimport uuid\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nfrom rally import api\nfrom rally.env import env_mgr\nfrom rally import plugins\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.common import credential\n\n\ndef fetch_parent_env_and_admin_creds(env_name):\n \"\"\"Fetch parent environment spec and openstack admin creds from it.\"\"\"\n\n env_data = env_mgr.EnvManager.get(env_name).data\n\n openstack_platform = env_data[\"platforms\"][\"openstack\"]\n admin_creds = credential.OpenStackCredential(\n permission=consts.EndpointPermission.ADMIN,\n **openstack_platform[\"platform_data\"][\"admin\"])\n\n return env_data[\"spec\"], admin_creds\n\n\ndef create_projects_and_users(admin_creds, projects_count, users_per_project):\n \"\"\"Create new projects and users via 'users@openstack' context.\n\n :param admin_creds: admin credentials to use for creating new entities\n :param projects_count: The number of keystone projects to create.\n :param users_per_project: The number of keystone users to create per one\n keystone project.\n \"\"\"\n\n # it should be imported after calling rally.api.API that setups oslo_config\n from rally_openstack.task.contexts.keystone import users as users_ctx\n\n ctx = {\n \"env\": {\n \"platforms\": {\n \"openstack\": {\n \"admin\": admin_creds.to_dict(),\n \"users\": []\n }\n }\n },\n \"task\": {\n \"uuid\": str(uuid.uuid4())\n },\n \"config\": {\n \"users@openstack\": {\n \"tenants\": projects_count,\n \"users_per_tenant\": users_per_project\n }\n }\n }\n\n users_ctx.UserGenerator(ctx).setup()\n\n users = []\n for user in ctx[\"users\"]:\n users.append({\n \"username\": user[\"credential\"][\"username\"],\n \"password\": user[\"credential\"][\"password\"],\n \"project_name\": user[\"credential\"][\"tenant_name\"]\n })\n\n for optional in (\"domain_name\",\n \"user_domain_name\",\n \"project_domain_name\"):\n if user[\"credential\"][optional]:\n users[-1][optional] = user[\"credential\"][optional]\n\n return users\n\n\ndef store_a_new_spec(original_spec, users, path_for_new_spec):\n new_spec = copy.deepcopy(original_spec)\n del new_spec[\"existing@openstack\"][\"admin\"]\n new_spec[\"existing@openstack\"][\"users\"] = users\n with open(path_for_new_spec, \"w\") as f:\n f.write(json.dumps(new_spec, indent=4))\n\n\[email protected]_plugins_are_loaded\ndef ansible_main():\n module = AnsibleModule(argument_spec=dict(\n projects_count=dict(\n type=\"int\",\n default=1,\n required=False\n ),\n users_per_project=dict(\n type=\"int\",\n default=1,\n required=False\n ),\n parent_env_name=dict(\n type=\"str\",\n required=True\n ),\n path_for_new_spec=dict(\n type=\"str\",\n required=True\n )\n ))\n\n # init Rally API as it makes all work for logging and config initialization\n api.API()\n\n original_spec, admin_creds = fetch_parent_env_and_admin_creds(\n module.params[\"parent_env_name\"]\n )\n\n users = create_projects_and_users(\n admin_creds,\n projects_count=module.params[\"projects_count\"],\n users_per_project=module.params[\"users_per_project\"]\n )\n\n store_a_new_spec(original_spec, users, module.params[\"path_for_new_spec\"])\n\n module.exit_json(changed=True)\n\n\nif __name__ == \"__main__\":\n ansible_main()\n" }, { "alpha_fraction": 0.4864831864833832, "alphanum_fraction": 0.48911911249160767, "avg_line_length": 38.21394348144531, "blob_id": "1d090917ea0969a0adef65d13e466dbb837156dd", "content_id": "3b4bd74a7527eedaee4d276b01fd92728cba34a7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16313, "license_type": "permissive", "max_line_length": 79, "num_lines": 416, "path": "/tests/unit/environment/platforms/test_existing.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nfrom unittest import mock\n\nimport jsonschema\n\nfrom rally.env import env_mgr\nfrom rally.env import platform\nfrom rally import exceptions\n\nfrom rally_openstack.environment.platforms import existing\nfrom tests.unit import test\n\n\nclass PlatformBaseTestCase(test.TestCase):\n\n def _check_schema(self, schema, obj):\n jsonschema.validate(obj, schema)\n\n def _check_health_schema(self, obj):\n self._check_schema(env_mgr.EnvManager._HEALTH_FORMAT, obj)\n\n def _check_cleanup_schema(self, obj):\n self._check_schema(env_mgr.EnvManager._CLEANUP_FORMAT, obj)\n\n def _check_info_schema(self, obj):\n self._check_schema(env_mgr.EnvManager._INFO_FORMAT, obj)\n\n\nclass ExistingPlatformTestCase(PlatformBaseTestCase):\n\n def test_validate_spec_schema(self):\n spec = {\n \"existing@openstack\": {\n \"auth_url\": \"url\",\n \"admin\": {\n \"username\": \"admin\",\n \"password\": \"password123\",\n \"tenant_name\": \"admin\"\n },\n \"users\": [{\n \"username\": \"admin\",\n \"password\": \"password123\",\n \"tenant_name\": \"admin\"\n }]\n }\n }\n result = platform.Platform.validate(\"existing@openstack\", {},\n spec, spec[\"existing@openstack\"])\n self.assertEqual([], result)\n\n def test_validate_invalid_spec(self):\n spec = {\n \"existing@openstack\": {\n \"something_wrong\": {\n \"username\": \"not_an_admin\",\n \"password\": \"password123\",\n \"project_name\": \"not_an_admin\"\n }\n }\n }\n result = platform.Platform.validate(\"existing@openstack\", {},\n spec, spec[\"existing@openstack\"])\n self.assertNotEqual([], result)\n\n def test_validate_spec_schema_with_api_info(self):\n spec = {\n \"existing@openstack\": {\n \"auth_url\": \"url\",\n \"admin\": {\n \"username\": \"admin\",\n \"password\": \"password123\",\n \"tenant_name\": \"admin\"\n },\n \"api_info\": {\n \"nova\": {\"version\": 1},\n \"cinder\": {\"version\": 2, \"service_type\": \"volumev2\"}\n }\n }\n }\n result = platform.Platform.validate(\"existing@openstack\", {},\n spec, spec[\"existing@openstack\"])\n self.assertEqual([], result)\n\n def test_create_users_only(self):\n\n spec = {\n \"auth_url\": \"https://best\",\n \"endpoint\": \"check_that_its_poped\",\n \"users\": [\n {\"project_name\": \"a\", \"username\": \"a\", \"password\": \"a\"},\n {\"project_name\": \"b\", \"username\": \"b\", \"password\": \"b\"}\n ]\n }\n\n self.assertEqual(\n ({\n \"admin\": None,\n \"users\": [\n {\n \"auth_url\": \"https://best\", \"endpoint_type\": None,\n \"region_name\": None,\n \"domain_name\": None,\n \"user_domain_name\": \"default\",\n \"project_domain_name\": \"default\",\n \"https_insecure\": False, \"https_cacert\": None,\n \"tenant_name\": \"a\", \"username\": \"a\", \"password\": \"a\"\n },\n {\n \"auth_url\": \"https://best\", \"endpoint_type\": None,\n \"region_name\": None,\n \"domain_name\": None,\n \"user_domain_name\": \"default\",\n \"project_domain_name\": \"default\",\n \"https_insecure\": False, \"https_cacert\": None,\n \"tenant_name\": \"b\", \"username\": \"b\", \"password\": \"b\"\n }\n ]\n }, {}),\n existing.OpenStack(spec).create())\n\n def test_create_admin_only(self):\n spec = {\n \"auth_url\": \"https://best\",\n \"endpoint_type\": \"public\",\n \"https_insecure\": True,\n \"https_cacert\": \"/my.ca\",\n \"profiler_hmac_key\": \"key\",\n \"profiler_conn_str\": \"http://prof\",\n \"admin\": {\n \"domain_name\": \"d\", \"user_domain_name\": \"d\",\n \"project_domain_name\": \"d\", \"project_name\": \"d\",\n \"username\": \"d\", \"password\": \"d\"\n }\n }\n self.assertEqual(\n (\n {\n \"admin\": {\n \"auth_url\": \"https://best\",\n \"endpoint_type\": \"public\",\n \"https_insecure\": True, \"https_cacert\": \"/my.ca\",\n \"profiler_hmac_key\": \"key\",\n \"profiler_conn_str\": \"http://prof\",\n \"region_name\": None, \"domain_name\": \"d\",\n \"user_domain_name\": \"d\", \"project_domain_name\": \"d\",\n \"tenant_name\": \"d\", \"username\": \"d\", \"password\": \"d\"\n },\n \"users\": []\n },\n {}\n ),\n existing.OpenStack(spec).create())\n\n def test_create_spec_from_sys_environ(self):\n # keystone v2\n sys_env = {\n \"OS_AUTH_URL\": \"https://example.com\",\n \"OS_USERNAME\": \"user\",\n \"OS_PASSWORD\": \"pass\",\n \"OS_TENANT_NAME\": \"projectX\",\n \"OS_INTERFACE\": \"publicURL\",\n \"OS_REGION_NAME\": \"Region1\",\n \"OS_CACERT\": \"Cacert\",\n \"OS_CERT\": \"cert\",\n \"OS_KEY\": \"key\",\n \"OS_INSECURE\": True,\n \"OSPROFILER_HMAC_KEY\": \"hmackey\",\n \"OSPROFILER_CONN_STR\": \"https://example2.com\",\n }\n\n result = existing.OpenStack.create_spec_from_sys_environ(sys_env)\n self.assertTrue(result[\"available\"])\n self.assertEqual(\n {\n \"admin\": {\n \"username\": \"user\",\n \"tenant_name\": \"projectX\",\n \"password\": \"pass\"\n },\n \"auth_url\": \"https://example.com\",\n \"endpoint_type\": \"public\",\n \"region_name\": \"Region1\",\n \"https_cacert\": \"Cacert\",\n \"https_cert\": \"cert\",\n \"https_key\": \"key\",\n \"https_insecure\": True,\n \"profiler_hmac_key\": \"hmackey\",\n \"profiler_conn_str\": \"https://example2.com\",\n \"api_info\": {\n \"keystone\": {\n \"version\": 2,\n \"service_type\": \"identity\"\n }\n }\n }, result[\"spec\"])\n\n # keystone v3\n sys_env[\"OS_IDENTITY_API_VERSION\"] = \"3\"\n\n result = existing.OpenStack.create_spec_from_sys_environ(sys_env)\n\n self.assertEqual(\n {\n \"admin\": {\n \"username\": \"user\",\n \"project_name\": \"projectX\",\n \"user_domain_name\": \"Default\",\n \"password\": \"pass\",\n \"project_domain_name\": \"Default\"\n },\n \"endpoint_type\": \"public\",\n \"auth_url\": \"https://example.com\",\n \"region_name\": \"Region1\",\n \"https_cacert\": \"Cacert\",\n \"https_cert\": \"cert\",\n \"https_key\": \"key\",\n \"https_insecure\": True,\n \"profiler_hmac_key\": \"hmackey\",\n \"profiler_conn_str\": \"https://example2.com\",\n \"api_info\": {\n \"keystone\": {\n \"version\": 3,\n \"service_type\": \"identityv3\"\n }\n }\n }, result[\"spec\"])\n\n def test_create_spec_from_sys_environ_fails_with_missing_vars(self):\n sys_env = {\"OS_AUTH_URL\": \"https://example.com\"}\n result = existing.OpenStack.create_spec_from_sys_environ(sys_env)\n self.assertFalse(result[\"available\"])\n self.assertIn(\"OS_USERNAME\", result[\"message\"])\n self.assertIn(\"OS_PASSWORD\", result[\"message\"])\n self.assertNotIn(\"OS_AUTH_URL\", result[\"message\"])\n\n sys_env = {\"OS_AUTH_URL\": \"https://example.com\",\n \"OS_USERNAME\": \"user\",\n \"OS_PASSWORD\": \"pass\"}\n result = existing.OpenStack.create_spec_from_sys_environ(sys_env)\n self.assertFalse(result[\"available\"])\n self.assertIn(\"OS_PROJECT_NAME or OS_TENANT_NAME\", result[\"message\"])\n\n def test_destroy(self):\n self.assertIsNone(existing.OpenStack({}).destroy())\n\n def test_cleanup(self):\n result1 = existing.OpenStack({}).cleanup()\n result2 = existing.OpenStack({}).cleanup(task_uuid=\"any\")\n self.assertEqual(result1, result2)\n self.assertEqual(\n {\n \"message\": \"Coming soon!\",\n \"discovered\": 0,\n \"deleted\": 0,\n \"failed\": 0,\n \"resources\": {},\n \"errors\": []\n },\n result1\n )\n self._check_cleanup_schema(result1)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_health(self, mock_clients):\n pdata = {\n \"admin\": mock.MagicMock(),\n \"users\": [mock.MagicMock(), mock.MagicMock()]\n }\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual({\"available\": True}, result)\n mock_clients.assert_has_calls(\n [mock.call(pdata[\"users\"][0]), mock.call().keystone(),\n mock.call(pdata[\"users\"][1]), mock.call().keystone(),\n mock.call(pdata[\"admin\"]), mock.call().verified_keystone()])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_failed_with_native_rally_exc(self, mock_clients):\n e = exceptions.RallyException(\"foo\")\n mock_clients.return_value.keystone.side_effect = e\n pdata = {\"admin\": None,\n \"users\": [{\"username\": \"balbab\", \"password\": \"12345\"}]}\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual(\n {\n \"available\": False,\n \"message\": e.format_message(),\n \"traceback\": mock.ANY\n },\n result)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_failed_admin(self, mock_clients):\n mock_clients.return_value.verified_keystone.side_effect = Exception\n pdata = {\"admin\": {\"username\": \"balbab\", \"password\": \"12345\"},\n \"users\": []}\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual(\n {\"available\": False,\n \"message\":\n \"Bad admin creds: \\n%s\"\n % json.dumps({\"username\": \"balbab\", \"password\": \"***\",\n \"api_info\": {}},\n indent=2, sort_keys=True),\n \"traceback\": mock.ANY},\n result)\n self.assertIn(\"Traceback (most recent call last)\", result[\"traceback\"])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_failed_users(self, mock_clients):\n mock_clients.return_value.keystone.side_effect = Exception\n pdata = {\"admin\": None,\n \"users\": [{\"username\": \"balbab\", \"password\": \"12345\"}]}\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual(\n {\"available\": False,\n \"message\":\n \"Bad user creds: \\n%s\"\n % json.dumps({\"username\": \"balbab\", \"password\": \"***\",\n \"api_info\": {}},\n indent=2, sort_keys=True),\n \"traceback\": mock.ANY},\n result)\n self.assertIn(\"Traceback (most recent call last)\", result[\"traceback\"])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_health_with_api_info(self, mock_clients):\n pdata = {\"admin\": mock.MagicMock(),\n \"users\": [],\n \"api_info\": {\"fakeclient\": \"version\"}}\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual({\"available\": True}, result)\n mock_clients.assert_has_calls(\n [mock.call(pdata[\"admin\"]), mock.call().verified_keystone(),\n mock.call().fakeclient.choose_version(),\n mock.call().fakeclient.validate_version(\n mock_clients.return_value.fakeclient.choose_version\n .return_value),\n mock.call().fakeclient.create_client()])\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_version_failed_with_api_info(self, mock_clients):\n pdata = {\"admin\": mock.MagicMock(),\n \"users\": [],\n \"api_info\": {\"fakeclient\": \"version\"}}\n\n def validate_version(version):\n raise exceptions.RallyException(\"Version is not supported.\")\n (mock_clients.return_value.fakeclient\n .validate_version) = validate_version\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual({\"available\": False,\n \"message\": (\"Invalid setting for 'fakeclient':\"\n \" Version is not supported.\")},\n result)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_check_unexpected_failed_with_api_info(self, mock_clients):\n pdata = {\"admin\": mock.MagicMock(),\n \"users\": [],\n \"api_info\": {\"fakeclient\": \"version\"}}\n\n def create_client():\n raise Exception(\"Invalid client.\")\n\n (mock_clients.return_value.fakeclient\n .choose_version.return_value) = \"1.0\"\n mock_clients.return_value.fakeclient.create_client = create_client\n result = existing.OpenStack({}, platform_data=pdata).check_health()\n self._check_health_schema(result)\n self.assertEqual({\"available\": False,\n \"message\": (\"Can not create 'fakeclient' with\"\n \" 1.0 version.\"),\n \"traceback\": mock.ANY},\n result)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_info(self, mock_clients):\n mock_clients.return_value.services.return_value = {\n \"foo\": \"bar\",\n \"volumev4\": \"__unknown__\"}\n platform_data = {\n \"admin\": None,\n \"users\": [{\"username\": \"u1\", \"password\": \"123\"}]\n }\n p = existing.OpenStack({}, platform_data=platform_data)\n\n result = p.info()\n mock_clients.assert_called_once_with(platform_data[\"users\"][0])\n mock_clients.return_value.services.assert_called_once_with()\n self.assertEqual(\n {\n \"info\": {\n \"services\": [{\"type\": \"foo\", \"name\": \"bar\"},\n {\"type\": \"volumev4\"}]}},\n result)\n self._check_info_schema(result)\n" }, { "alpha_fraction": 0.5399770736694336, "alphanum_fraction": 0.5450171828269958, "avg_line_length": 35.375, "blob_id": "4c84fe6e36090cd78d0b6bafd7fdd953e13109b8", "content_id": "ace704cfcc46d5152741a5620b141377148266a6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4365, "license_type": "permissive", "max_line_length": 78, "num_lines": 120, "path": "/tests/unit/task/contexts/network/test_allow_ssh.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.network import allow_ssh\nfrom tests.unit import test\n\n\nCTX = \"rally_openstack.task.contexts.network.allow_ssh\"\n\n\nclass AllowSSHContextTestCase(test.TestCase):\n\n def setUp(self):\n super(AllowSSHContextTestCase, self).setUp()\n self.users_count = 3\n\n self.ctx = test.get_test_context()\n self.ctx.update(\n users=[\n {\n \"tenant_id\": f\"uuid{i // 3}\",\n \"credential\": mock.MagicMock()\n }\n for i in range(1, self.users_count + 1)\n ],\n admin={\n \"tenant_id\": \"uuid2\",\n \"credential\": mock.MagicMock()},\n tenants={\n \"uuid1\": {\"id\": \"uuid1\", \"name\": \"uuid1\"},\n \"uuid2\": {\"id\": \"uuid2\", \"name\": \"uuid1\"}\n }\n )\n\n def test_setup(self):\n for i, user in enumerate(self.ctx[\"users\"]):\n clients = user[\"credential\"].clients.return_value\n nc = clients.neutron.return_value\n nc.list_extensions.return_value = {\n \"extensions\": [{\"alias\": \"security-group\"}]\n }\n nc.create_security_group.return_value = {\n \"security_group\": {\n \"name\": \"xxx\",\n \"id\": f\"security-group-{i}\",\n \"security_group_rules\": []\n }\n }\n\n allow_ssh.AllowSSH(self.ctx).setup()\n\n # admin user should not be used\n self.assertFalse(self.ctx[\"admin\"][\"credential\"].clients.called)\n\n processed_tenants = {}\n for i, user in enumerate(self.ctx[\"users\"]):\n clients = user[\"credential\"].clients.return_value\n nc = clients.neutron.return_value\n if i == 0:\n nc.list_extensions.assert_called_once_with()\n else:\n self.assertFalse(nc.list_extensions.called)\n\n if user[\"tenant_id\"] in processed_tenants:\n self.assertFalse(nc.create_security_group.called)\n self.assertFalse(nc.create_security_group_rule.called)\n else:\n nc.create_security_group.assert_called_once_with({\n \"security_group\": {\n \"name\": mock.ANY,\n \"description\": mock.ANY\n }\n })\n secgroup = nc.create_security_group.return_value\n secgroup = secgroup[\"security_group\"]\n\n rules = copy.deepcopy(allow_ssh._RULES_TO_ADD)\n for rule in rules:\n rule[\"security_group_id\"] = secgroup[\"id\"]\n self.assertEqual(\n [mock.call({\"security_group_rule\": rule})\n for rule in rules],\n nc.create_security_group_rule.call_args_list\n )\n\n processed_tenants[user[\"tenant_id\"]] = secgroup\n\n self.assertEqual(processed_tenants[user[\"tenant_id\"]][\"id\"],\n user[\"secgroup\"][\"id\"])\n\n def test_setup_no_security_group_extension(self):\n clients = self.ctx[\"users\"][0][\"credential\"].clients.return_value\n nc = clients.neutron.return_value\n nc.list_extensions.return_value = {\"extensions\": []}\n\n allow_ssh.AllowSSH(self.ctx).setup()\n\n # admin user should not be used\n self.assertFalse(self.ctx[\"admin\"][\"credential\"].clients.called)\n\n nc.list_extensions.assert_called_once_with()\n for i, user in enumerate(self.ctx[\"users\"]):\n if i == 0:\n continue\n self.assertFalse(user[\"credential\"].clients.called)\n" }, { "alpha_fraction": 0.6308107972145081, "alphanum_fraction": 0.6367567777633667, "avg_line_length": 33.25925827026367, "blob_id": "dbdbcd556fdc90a338ce5cf0a706cdad56367857", "content_id": "03bce8efe69f712ea266f87cb34de058b111d92b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1850, "license_type": "permissive", "max_line_length": 78, "num_lines": 54, "path": "/rally_openstack/task/scenarios/monasca/utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\nimport time\nimport uuid\n\nfrom rally.common import cfg\nfrom rally.task import atomic\n\nfrom rally_openstack.task import scenario\n\n\nCONF = cfg.CONF\n\n\nclass MonascaScenario(scenario.OpenStackScenario):\n \"\"\"Base class for Monasca scenarios with basic atomic actions.\"\"\"\n\n @atomic.action_timer(\"monasca.list_metrics\")\n def _list_metrics(self, **kwargs):\n \"\"\"Get list of user's metrics.\n\n :param kwargs: optional arguments for list query:\n name, dimensions, start_time, etc\n :returns list of monasca metrics\n \"\"\"\n return self.clients(\"monasca\").metrics.list(**kwargs)\n\n @atomic.action_timer(\"monasca.create_metrics\")\n def _create_metrics(self, **kwargs):\n \"\"\"Create user metrics.\n\n :param kwargs: attributes for metric creation:\n name, dimension, timestamp, value, etc\n \"\"\"\n timestamp = int(time.time() * 1000)\n kwargs.update({\"name\": self.generate_random_name(),\n \"timestamp\": timestamp,\n \"value\": random.random(),\n \"value_meta\": {\n \"key\": str(uuid.uuid4())[:10]}})\n self.clients(\"monasca\").metrics.create(**kwargs)\n" }, { "alpha_fraction": 0.6551111340522766, "alphanum_fraction": 0.6598095297813416, "avg_line_length": 47.91304397583008, "blob_id": "4c8f1b3c8256e9129ed710b697c0ca96458c9ee2", "content_id": "629cd5b5a3ccb66970f805070c6d51f4401dd46a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7875, "license_type": "permissive", "max_line_length": 79, "num_lines": 161, "path": "/tests/unit/task/scenarios/nova/test_aggregates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2016 IBM Corp.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.nova import aggregates\nfrom tests.unit import test\n\n\nclass NovaAggregatesTestCase(test.ScenarioTestCase):\n\n def test_list_aggregates(self):\n scenario = aggregates.ListAggregates()\n scenario._list_aggregates = mock.Mock()\n scenario.run()\n scenario._list_aggregates.assert_called_once_with()\n\n def test_create_and_list_aggregates(self):\n # Positive case\n scenario = aggregates.CreateAndListAggregates()\n scenario._create_aggregate = mock.Mock(return_value=\"agg1\")\n scenario._list_aggregates = mock.Mock(return_value=(\"agg1\", \"agg2\"))\n scenario.run(availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_once_with(\"nova\")\n scenario._list_aggregates.assert_called_once_with()\n\n # Negative case 1: aggregate isn't created\n scenario._create_aggregate.return_value = None\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_with(\"nova\")\n\n # Negative case 2: aggregate was created but not included into list\n scenario._create_aggregate.return_value = \"agg3\"\n self.assertRaises(exceptions.RallyAssertionError,\n scenario.run, availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_with(\"nova\")\n scenario._list_aggregates.assert_called_with()\n\n def test_create_and_delete_aggregate(self):\n scenario = aggregates.CreateAndDeleteAggregate()\n scenario._create_aggregate = mock.Mock()\n scenario._delete_aggregate = mock.Mock()\n scenario.run(availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_once_with(\"nova\")\n aggregate = scenario._create_aggregate.return_value\n scenario._delete_aggregate.assert_called_once_with(aggregate)\n\n def test_create_and_update_aggregate(self):\n scenario = aggregates.CreateAndUpdateAggregate()\n scenario._create_aggregate = mock.Mock()\n scenario._update_aggregate = mock.Mock()\n scenario.run(availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_once_with(\"nova\")\n aggregate = scenario._create_aggregate.return_value\n scenario._update_aggregate.assert_called_once_with(aggregate)\n\n def test_create_aggregate_add_and_remove_host(self):\n fake_aggregate = \"fake_aggregate\"\n fake_hosts = [mock.Mock(service={\"host\": \"fake_host_name\"})]\n scenario = aggregates.CreateAggregateAddAndRemoveHost()\n scenario._create_aggregate = mock.MagicMock(\n return_value=fake_aggregate)\n scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts)\n scenario._aggregate_add_host = mock.MagicMock()\n scenario._aggregate_remove_host = mock.MagicMock()\n scenario.run(availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_once_with(\n \"nova\")\n scenario._list_hypervisors.assert_called_once_with()\n scenario._aggregate_add_host.assert_called_once_with(\n \"fake_aggregate\", \"fake_host_name\")\n scenario._aggregate_remove_host.assert_called_once_with(\n \"fake_aggregate\", \"fake_host_name\")\n\n def test_create_and_get_aggregate_details(self):\n scenario = aggregates.CreateAndGetAggregateDetails()\n scenario._create_aggregate = mock.Mock()\n scenario._get_aggregate_details = mock.Mock()\n scenario.run(availability_zone=\"nova\")\n scenario._create_aggregate.assert_called_once_with(\"nova\")\n aggregate = scenario._create_aggregate.return_value\n scenario._get_aggregate_details.assert_called_once_with(aggregate)\n\n def test_create_aggregate_add_host_and_boot_server(self):\n fake_aggregate = mock.Mock()\n fake_hosts = [mock.Mock(service={\"host\": \"fake_host_name\"}, state=\"up\",\n status=\"enabled\")]\n fake_flavor = mock.MagicMock(id=\"flavor-id-0\", ram=512, disk=1,\n vcpus=1)\n fake_metadata = {\"test_metadata\": \"true\"}\n fake_server = mock.MagicMock(id=\"server-id-0\")\n setattr(fake_server, \"OS-EXT-SRV-ATTR:hypervisor_hostname\",\n \"fake_host_name\")\n fake_aggregate_kwargs = {\"fake_arg1\": \"f\"}\n\n scenario = aggregates.CreateAggregateAddHostAndBootServer()\n scenario._create_aggregate = mock.MagicMock(\n return_value=fake_aggregate)\n scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts)\n scenario._aggregate_add_host = mock.MagicMock()\n scenario._aggregate_set_metadata = mock.MagicMock()\n scenario._create_flavor = mock.MagicMock(return_value=fake_flavor)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n self.admin_clients(\"nova\").servers.get.return_value = fake_server\n\n scenario.run(\"img\", fake_metadata, availability_zone=\"nova\",\n boot_server_kwargs=fake_aggregate_kwargs)\n scenario._create_aggregate.assert_called_once_with(\"nova\")\n scenario._list_hypervisors.assert_called_once_with()\n scenario._aggregate_set_metadata.assert_called_once_with(\n fake_aggregate, fake_metadata)\n scenario._aggregate_add_host(fake_aggregate, \"fake_host_name\")\n scenario._create_flavor.assert_called_once_with(512, 1, 1)\n fake_flavor.set_keys.assert_called_once_with(fake_metadata)\n scenario._boot_server.assert_called_once_with(\"img\", \"flavor-id-0\",\n **fake_aggregate_kwargs)\n self.admin_clients(\"nova\").servers.get.assert_called_once_with(\n \"server-id-0\")\n\n self.assertEqual(getattr(\n fake_server, \"OS-EXT-SRV-ATTR:hypervisor_hostname\"),\n \"fake_host_name\")\n\n def test_create_aggregate_add_host_and_boot_server_failure(self):\n fake_aggregate = mock.Mock()\n fake_hosts = [mock.Mock(service={\"host\": \"fake_host_name\"})]\n fake_flavor = mock.MagicMock(id=\"flavor-id-0\", ram=512, disk=1,\n vcpus=1)\n fake_metadata = {\"test_metadata\": \"true\"}\n fake_server = mock.MagicMock(id=\"server-id-0\")\n setattr(fake_server, \"OS-EXT-SRV-ATTR:hypervisor_hostname\",\n \"wrong_host_name\")\n fake_boot_server_kwargs = {\"fake_arg1\": \"f\"}\n\n scenario = aggregates.CreateAggregateAddHostAndBootServer()\n scenario._create_aggregate = mock.MagicMock(\n return_value=fake_aggregate)\n scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts)\n scenario._aggregate_add_host = mock.MagicMock()\n scenario._aggregate_set_metadata = mock.MagicMock()\n scenario._create_flavor = mock.MagicMock(return_value=fake_flavor)\n scenario._boot_server = mock.MagicMock(return_value=fake_server)\n self.admin_clients(\"nova\").servers.get.return_value = fake_server\n\n self.assertRaises(exceptions.RallyException, scenario.run, \"img\",\n fake_metadata, \"nova\", fake_boot_server_kwargs)\n" }, { "alpha_fraction": 0.6570733785629272, "alphanum_fraction": 0.6598930358886719, "avg_line_length": 41.15163803100586, "blob_id": "aaa1a33fc73e16ef17ff00bd8844107fc7376b64", "content_id": "85935cb30d29788f650493d3c376b58ecdd5b0a8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10285, "license_type": "permissive", "max_line_length": 79, "num_lines": 244, "path": "/rally_openstack/task/scenarios/neutron/security_groups.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.neutron import utils\n\n\n\"\"\"Scenarios for Neutron Security Groups.\"\"\"\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_list_security_groups\",\n platform=\"openstack\")\nclass CreateAndListSecurityGroups(utils.NeutronScenario):\n\n def run(self, security_group_create_args=None):\n \"\"\"Create and list Neutron security-groups.\n\n Measure the \"neutron security-group-create\" and \"neutron\n security-group-list\" command performance.\n\n :param security_group_create_args: dict, POST /v2.0/security-groups\n request options\n \"\"\"\n security_group_create_args = security_group_create_args or {}\n self._create_security_group(**security_group_create_args)\n self._list_security_groups()\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_show_security_group\",\n platform=\"openstack\")\nclass CreateAndShowSecurityGroup(utils.NeutronScenario):\n\n def run(self, security_group_create_args=None):\n \"\"\"Create and show Neutron security-group.\n\n Measure the \"neutron security-group-create\" and \"neutron\n security-group-show\" command performance.\n\n :param security_group_create_args: dict, POST /v2.0/security-groups\n request options\n \"\"\"\n security_group_create_args = security_group_create_args or {}\n security_group = self._create_security_group(\n **security_group_create_args)\n msg = \"security_group isn't created\"\n self.assertTrue(security_group, err_msg=msg)\n\n self._show_security_group(security_group)\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_delete_security_groups\",\n platform=\"openstack\")\nclass CreateAndDeleteSecurityGroups(utils.NeutronScenario):\n\n def run(self, security_group_create_args=None):\n \"\"\"Create and delete Neutron security-groups.\n\n Measure the \"neutron security-group-create\" and \"neutron\n security-group-delete\" command performance.\n\n :param security_group_create_args: dict, POST /v2.0/security-groups\n request options\n \"\"\"\n security_group_create_args = security_group_create_args or {}\n security_group = self._create_security_group(\n **security_group_create_args)\n self._delete_security_group(security_group)\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_update_security_groups\",\n platform=\"openstack\")\nclass CreateAndUpdateSecurityGroups(utils.NeutronScenario):\n\n def run(self, security_group_create_args=None,\n security_group_update_args=None):\n \"\"\"Create and update Neutron security-groups.\n\n Measure the \"neutron security-group-create\" and \"neutron\n security-group-update\" command performance.\n\n :param security_group_create_args: dict, POST /v2.0/security-groups\n request options\n :param security_group_update_args: dict, PUT /v2.0/security-groups\n update options\n \"\"\"\n security_group_create_args = security_group_create_args or {}\n security_group_update_args = security_group_update_args or {}\n security_group = self._create_security_group(\n **security_group_create_args)\n self._update_security_group(security_group,\n **security_group_update_args)\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_list_security_group_rules\",\n platform=\"openstack\")\nclass CreateAndListSecurityGroupRules(utils.NeutronScenario):\n\n def run(self, security_group_rules_count=1,\n security_group_args=None,\n security_group_rule_args=None):\n \"\"\"Create and list Neutron security-group-rules.\n\n Measure the \"neutron security-group-rule-create\" and \"neutron\n security-group-rule-list\" command performance.\n\n :param security_group_rules_count: int, number of rules per\n security group\n :param security_group_args: dict, POST /v2.0/security-groups\n request options\n :param security_group_rule_args: dict,\n POST /v2.0/security-group-rules request options\n \"\"\"\n security_group_args = security_group_args or {}\n security_group = self._create_security_group(**security_group_args)\n msg = \"security_group isn't created\"\n self.assertTrue(security_group, err_msg=msg)\n rules = []\n for rule in range(security_group_rules_count):\n security_group_rule_args = security_group_rule_args or {}\n security_group_rule_args[\"port_range_min\"] = rule + 1\n security_group_rule_args[\"port_range_max\"] = rule + 1\n security_group_rule = self._create_security_group_rule(\n security_group[\"security_group\"][\"id\"],\n **security_group_rule_args)\n rules.append(security_group_rule)\n msg = \"security_group_rule isn't created\"\n self.assertTrue(security_group_rule, err_msg=msg)\n security_group_rules = self._list_security_group_rules()\n for rule in rules:\n self.assertIn(rule[\"security_group_rule\"][\"id\"],\n [sgr[\"id\"] for sgr in security_group_rules[\n \"security_group_rules\"]])\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_show_security_group_rule\",\n platform=\"openstack\")\nclass CreateAndShowSecurityGroupRule(utils.NeutronScenario):\n\n def run(self, security_group_args=None,\n security_group_rule_args=None):\n \"\"\"Create and show Neutron security-group-rule.\n\n Measure the \"neutron security-group-rule-create\" and \"neutron\n security-group-rule-show\" command performance.\n\n :param security_group_args: dict, POST /v2.0/security-groups\n request options\n :param security_group_rule_args: dict,\n POST /v2.0/security-group-rules request options\n \"\"\"\n security_group_args = security_group_args or {}\n security_group_rule_args = security_group_rule_args or {}\n\n security_group = self._create_security_group(**security_group_args)\n msg = \"security_group isn't created\"\n self.assertTrue(security_group, err_msg=msg)\n\n security_group_rule = self._create_security_group_rule(\n security_group[\"security_group\"][\"id\"], **security_group_rule_args)\n msg = \"security_group_rule isn't created\"\n self.assertTrue(security_group_rule, err_msg=msg)\n\n self._show_security_group_rule(\n security_group_rule[\"security_group_rule\"][\"id\"])\n\n\[email protected](\"required_services\",\n services=[consts.Service.NEUTRON])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"neutron\"]},\n name=\"NeutronSecurityGroup.create_and_delete_security_group_rule\",\n platform=\"openstack\")\nclass CreateAndDeleteSecurityGroupRule(utils.NeutronScenario):\n\n def run(self, security_group_args=None,\n security_group_rule_args=None):\n \"\"\"Create and delete Neutron security-group-rule.\n\n Measure the \"neutron security-group-rule-create\" and \"neutron\n security-group-rule-delete\" command performance.\n\n :param security_group_args: dict, POST /v2.0/security-groups\n request options\n :param security_group_rule_args: dict,\n POST /v2.0/security-group-rules request options\n \"\"\"\n security_group_args = security_group_args or {}\n security_group_rule_args = security_group_rule_args or {}\n\n security_group = self._create_security_group(**security_group_args)\n msg = \"security_group isn't created\"\n self.assertTrue(security_group, err_msg=msg)\n\n security_group_rule = self._create_security_group_rule(\n security_group[\"security_group\"][\"id\"], **security_group_rule_args)\n msg = \"security_group_rule isn't created\"\n self.assertTrue(security_group_rule, err_msg=msg)\n\n self._delete_security_group_rule(\n security_group_rule[\"security_group_rule\"][\"id\"])\n self._delete_security_group(security_group)\n" }, { "alpha_fraction": 0.6439613699913025, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 36.6363639831543, "blob_id": "96c29c57bcfb6ff54ddefdddf980fbbd4c411bdb", "content_id": "ffb00346d6ee8356884e2f1d57a793f00d49c45e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2070, "license_type": "permissive", "max_line_length": 78, "num_lines": 55, "path": "/tests/unit/task/contexts/quotas/test_designate_quotas.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.quotas import designate_quotas\nfrom tests.unit import test\n\n\nclass DesignateQuotasTestCase(test.TestCase):\n\n def test_update(self):\n clients = mock.MagicMock()\n quotas = designate_quotas.DesignateQuotas(clients)\n tenant_id = mock.MagicMock()\n quotas_values = {\n \"api_export_size\": 5,\n \"zones\": 5,\n \"zone_recordsets\": 20,\n \"zone_records\": 20,\n \"recordset_records\": 20,\n }\n quotas.update(tenant_id, **quotas_values)\n clients.designate().quotas.update.assert_called_once_with(\n tenant_id, quotas_values)\n\n def test_delete(self):\n clients = mock.MagicMock()\n quotas = designate_quotas.DesignateQuotas(clients)\n tenant_id = mock.MagicMock()\n quotas.delete(tenant_id)\n clients.designate().quotas.reset.assert_called_once_with(tenant_id)\n\n def test_get(self):\n tenant_id = \"tenant_id\"\n quotas = {\"api_export_size\": -1, \"zones\": -1,\n \"zone_recordsets\": 2, \"zone_records\": 3,\n \"recordset_records\": 3}\n clients = mock.MagicMock()\n clients.designate.return_value.quotas.get.return_value = quotas\n designate_quo = designate_quotas.DesignateQuotas(clients)\n\n self.assertEqual(quotas, designate_quo.get(tenant_id))\n clients.designate().quotas.get.assert_called_once_with(tenant_id)\n" }, { "alpha_fraction": 0.6045063734054565, "alphanum_fraction": 0.6060489416122437, "avg_line_length": 27.4514102935791, "blob_id": "0b9d6fc60140a56a33d4cbd2fbd059b8ad1b69e9", "content_id": "d52d48ffed4b1689e9cb70ad54781aadbf5a80cb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18152, "license_type": "permissive", "max_line_length": 78, "num_lines": 638, "path": "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\n\"\"\"List and compare most used OpenStack cloud resources.\"\"\"\n\nimport io\nimport json\nimport subprocess\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nfrom rally.cli import cliutils\nfrom rally.common.plugin import discover\nfrom rally import plugins\n\ntry:\n from rally_openstack.common import consts\n from rally_openstack.common import credential\nexcept ImportError:\n # backward compatibility for stable branches\n from rally_openstack import consts\n from rally_openstack import credential\n\n\ndef skip_if_service(service):\n def wrapper(func):\n def inner(self):\n if service in self.clients.services().values():\n return []\n return func(self)\n return inner\n return wrapper\n\n\nclass ResourceManager(object):\n\n REQUIRED_SERVICE = None\n STR_ATTRS = (\"id\", \"name\")\n\n def __init__(self, clients):\n self.clients = clients\n\n def is_available(self):\n if self.REQUIRED_SERVICE:\n return self.REQUIRED_SERVICE in self.clients.services().values()\n return True\n\n @property\n def client(self):\n return getattr(self.clients, self.__class__.__name__.lower())()\n\n def get_resources(self):\n all_resources = []\n cls = self.__class__.__name__.lower()\n for prop in dir(self):\n if not prop.startswith(\"list_\"):\n continue\n f = getattr(self, prop)\n resources = f() or []\n resource_name = prop[5:][:-1]\n for raw_res in resources:\n res = {\"cls\": cls, \"resource_name\": resource_name,\n \"id\": {}, \"props\": {}}\n if not isinstance(raw_res, dict):\n raw_res = {k: getattr(raw_res, k) for k in dir(raw_res)\n if not k.startswith(\"_\")\n if not callable(getattr(raw_res, k))}\n for key, value in raw_res.items():\n if key.startswith(\"_\"):\n continue\n if key in self.STR_ATTRS:\n res[\"id\"][key] = value\n else:\n try:\n res[\"props\"][key] = json.dumps(value, indent=2)\n except TypeError:\n res[\"props\"][key] = str(value)\n if not res[\"id\"] and not res[\"props\"]:\n print(\"1: %s\" % raw_res)\n print(\"2: %s\" % cls)\n print(\"3: %s\" % resource_name)\n raise ValueError(\"Failed to represent resource %r\" %\n raw_res)\n all_resources.append(res)\n return all_resources\n\n\nclass Keystone(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.KEYSTONE\n\n def list_users(self):\n return self.client.users.list()\n\n def list_tenants(self):\n if hasattr(self.client, \"projects\"):\n return self.client.projects.list() # V3\n return self.client.tenants.list() # V2\n\n def list_roles(self):\n return self.client.roles.list()\n\n\nclass Magnum(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.MAGNUM\n\n def list_cluster_templates(self):\n result = []\n marker = None\n while True:\n ct_list = self.client.cluster_templates.list(marker=marker)\n if not ct_list:\n break\n result.extend(ct_list)\n marker = ct_list[-1].uuid\n return result\n\n def list_clusters(self):\n result = []\n marker = None\n while True:\n clusters = self.client.clusters.list(marker=marker)\n if not clusters:\n break\n result.extend(clusters)\n marker = clusters[-1].uuid\n return result\n\n\nclass Mistral(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.MISTRAL\n\n def list_workbooks(self):\n return self.client.workbooks.list()\n\n def list_workflows(self):\n return self.client.workflows.list()\n\n def list_executions(self):\n return self.client.executions.list()\n\n\nclass Nova(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.NOVA\n\n def list_flavors(self):\n return self.client.flavors.list()\n\n def list_aggregates(self):\n return self.client.aggregates.list()\n\n def list_hypervisors(self):\n return self.client.hypervisors.list()\n\n def list_keypairs(self):\n return self.client.keypairs.list()\n\n def list_servers(self):\n return self.client.servers.list(\n search_opts={\"all_tenants\": True})\n\n def list_server_groups(self):\n return self.client.server_groups.list(all_projects=True)\n\n def list_services(self):\n return self.client.services.list()\n\n def list_availability_zones(self):\n return self.client.availability_zones.list()\n\n\nclass Neutron(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.NEUTRON\n\n def has_extension(self, name):\n extensions = self.client.list_extensions().get(\"extensions\", [])\n return any(ext.get(\"alias\") == name for ext in extensions)\n\n def list_networks(self):\n return self.client.list_networks()[\"networks\"]\n\n def list_subnets(self):\n return self.client.list_subnets()[\"subnets\"]\n\n def list_routers(self):\n return self.client.list_routers()[\"routers\"]\n\n def list_ports(self):\n return self.client.list_ports()[\"ports\"]\n\n def list_floatingips(self):\n return self.client.list_floatingips()[\"floatingips\"]\n\n def list_security_groups(self):\n return self.client.list_security_groups()[\"security_groups\"]\n\n def list_trunks(self):\n if self.has_extension(\"trunks\"):\n return self.client.list_trunks()[\"trunks\"]\n\n def list_health_monitors(self):\n if self.has_extension(\"lbaas\"):\n return self.client.list_health_monitors()[\"health_monitors\"]\n\n def list_pools(self):\n if self.has_extension(\"lbaas\"):\n return self.client.list_pools()[\"pools\"]\n\n def list_vips(self):\n if self.has_extension(\"lbaas\"):\n return self.client.list_vips()[\"vips\"]\n\n def list_bgpvpns(self):\n if self.has_extension(\"bgpvpn\"):\n return self.client.list_bgpvpns()[\"bgpvpns\"]\n\n\nclass Glance(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.GLANCE\n\n def list_images(self):\n return self.client.images.list()\n\n\nclass Heat(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.HEAT\n\n def list_resource_types(self):\n return self.client.resource_types.list()\n\n def list_stacks(self):\n return self.client.stacks.list()\n\n\nclass Cinder(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.CINDER\n\n def list_availability_zones(self):\n return self.client.availability_zones.list()\n\n def list_backups(self):\n return self.client.backups.list()\n\n def list_volume_snapshots(self):\n return self.client.volume_snapshots.list()\n\n def list_volume_types(self):\n return self.client.volume_types.list()\n\n def list_encryption_types(self):\n return self.client.volume_encryption_types.list()\n\n def list_transfers(self):\n return self.client.transfers.list()\n\n def list_volumes(self):\n return self.client.volumes.list(search_opts={\"all_tenants\": True})\n\n def list_qos(self):\n return self.client.qos_specs.list()\n\n\nclass Senlin(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.SENLIN\n\n def list_clusters(self):\n return self.client.clusters()\n\n def list_profiles(self):\n return self.client.profiles()\n\n\nclass Manila(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.MANILA\n\n def list_shares(self):\n return self.client.shares.list(detailed=False,\n search_opts={\"all_tenants\": True})\n\n def list_share_networks(self):\n return self.client.share_networks.list(\n detailed=False, search_opts={\"all_tenants\": True})\n\n def list_share_servers(self):\n return self.client.share_servers.list(\n search_opts={\"all_tenants\": True})\n\n\nclass Gnocchi(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.GNOCCHI\n\n def list_resources(self):\n result = []\n marker = None\n while True:\n resources = self.client.resource.list(marker=marker)\n if not resources:\n break\n result.extend(resources)\n marker = resources[-1][\"id\"]\n return result\n\n def list_archive_policy_rules(self):\n return self.client.archive_policy_rule.list()\n\n def list_archive_policys(self):\n return self.client.archive_policy.list()\n\n def list_resource_types(self):\n return self.client.resource_type.list()\n\n def list_metrics(self):\n result = []\n marker = None\n while True:\n metrics = self.client.metric.list(marker=marker)\n if not metrics:\n break\n result.extend(metrics)\n marker = metrics[-1][\"id\"]\n return result\n\n\nclass Ironic(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.IRONIC\n\n def list_nodes(self):\n return self.client.node.list()\n\n\nclass Sahara(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.SAHARA\n\n def list_node_group_templates(self):\n return self.client.node_group_templates.list()\n\n\nclass Murano(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.MURANO\n\n def list_environments(self):\n return self.client.environments.list()\n\n def list_packages(self):\n return self.client.packages.list(include_disabled=True)\n\n\nclass Designate(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.DESIGNATE\n\n def list_zones(self):\n return self.clients.designate(\"2\").zones.list()\n\n def list_recordset(self):\n client = self.clients.designate(\"2\")\n results = []\n results.extend(client.recordsets.list(zone_id)\n for zone_id in client.zones.list())\n return results\n\n\nclass Trove(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.TROVE\n\n def list_backups(self):\n return self.client.backup.list()\n\n def list_clusters(self):\n return self.client.cluster.list()\n\n def list_configurations(self):\n return self.client.configuration.list()\n\n def list_databases(self):\n return self.client.database.list()\n\n def list_datastore(self):\n return self.client.datastore.list()\n\n def list_instances(self):\n return self.client.list(include_clustered=True)\n\n def list_modules(self):\n return self.client.module.list(datastore=\"all\")\n\n\nclass Monasca(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.MONASCA\n\n def list_metrics(self):\n return self.client.metrics.list()\n\n\nclass Watcher(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.WATCHER\n\n REPR_KEYS = (\"uuid\", \"name\")\n\n def list_audits(self):\n return self.client.audit.list()\n\n def list_audit_templates(self):\n return self.client.audit_template.list()\n\n def list_goals(self):\n return self.client.goal.list()\n\n def list_strategies(self):\n return self.client.strategy.list()\n\n def list_action_plans(self):\n return self.client.action_plan.list()\n\n\nclass Octavia(ResourceManager):\n\n REQUIRED_SERVICE = consts.Service.OCTAVIA\n\n def list_load_balancers(self):\n return self.client.load_balancer_list()[\"loadbalancers\"]\n\n def list_listeners(self):\n return self.client.listener_list()[\"listeners\"]\n\n def list_pools(self):\n return self.client.pool_list()[\"pools\"]\n\n def list_l7policies(self):\n return self.client.l7policy_list()[\"l7policies\"]\n\n def list_health_monitors(self):\n return self.client.health_monitor_list()[\"healthmonitors\"]\n\n def list_amphoras(self):\n return self.client.amphora_list()[\"amphorae\"]\n\n\nclass CloudResources(object):\n \"\"\"List and compare cloud resources.\n\n resources = CloudResources(auth_url=..., ...)\n saved_list = resources.list()\n\n # Do something with the cloud ...\n\n changes = resources.compare(saved_list)\n has_changed = any(changes)\n removed, added = changes\n \"\"\"\n\n def __init__(self, **kwargs):\n self.clients = credential.OpenStackCredential(**kwargs).clients()\n\n def list(self):\n managers_classes = discover.itersubclasses(ResourceManager)\n resources = []\n for cls in managers_classes:\n manager = cls(self.clients)\n if manager.is_available():\n resources.extend(manager.get_resources())\n return resources\n\n def compare(self, with_list):\n def make_uuid(res):\n return \"%s.%s:%s\" % (\n res[\"cls\"], res[\"resource_name\"],\n \";\".join([\"%s=%s\" % (k, v)\n for k, v in sorted(res[\"id\"].items())]))\n\n current_resources = dict((make_uuid(r), r) for r in self.list())\n saved_resources = dict((make_uuid(r), r) for r in with_list)\n\n removed = set(saved_resources.keys()) - set(current_resources.keys())\n removed = [saved_resources[k] for k in sorted(removed)]\n added = set(current_resources.keys()) - set(saved_resources.keys())\n added = [current_resources[k] for k in sorted(added)]\n\n return removed, added\n\n\ndef _print_tabular_resources(resources, table_label):\n def dict_formatter(d):\n return \"\\n\".join(\"%s:%s\" % (k, v) for k, v in d.items())\n\n out = io.StringIO()\n\n cliutils.print_list(\n objs=[dict(r) for r in resources],\n fields=(\"cls\", \"resource_name\", \"id\", \"fields\"),\n field_labels=(\"service\", \"resource type\", \"id\", \"fields\"),\n table_label=table_label,\n formatters={\"id\": lambda d: dict_formatter(d[\"id\"]),\n \"fields\": lambda d: dict_formatter(d[\"props\"])},\n out=out\n )\n out.write(\"\\n\")\n print(out.getvalue())\n\n\ndef dump_resources(resources_mgr, json_output):\n resources_list = resources_mgr.list()\n _print_tabular_resources(resources_list, \"Available resources.\")\n\n if json_output:\n with open(json_output, \"w\") as f:\n f.write(json.dumps(resources_list))\n return 0, resources_list\n\n\ndef check_resource(resources_mgs, compare_with, json_output):\n with open(compare_with) as f:\n compare_to = f.read()\n compare_to = json.loads(compare_to)\n changes = resources_mgs.compare(with_list=compare_to)\n removed, added = changes\n\n # Cinder has a feature - cache images for speeding-up time of creating\n # volumes from images. let's put such cache-volumes into expected list\n volume_names = [\n \"image-%s\" % i[\"id\"][\"id\"] for i in compare_to\n if i[\"cls\"] == \"glance\" and i[\"resource_name\"] == \"image\"]\n\n # filter out expected additions\n expected = []\n for resource in added:\n if (False # <- makes indent of other cases similar\n or (resource[\"cls\"] == \"keystone\"\n and resource[\"resource_name\"] == \"role\"\n and resource[\"id\"].get(\"name\") == \"_member_\")\n or (resource[\"cls\"] == \"neutron\"\n and resource[\"resource_name\"] == \"security_group\"\n and resource[\"id\"].get(\"name\") == \"default\")\n or (resource[\"cls\"] == \"cinder\"\n and resource[\"resource_name\"] == \"volume\"\n and resource[\"id\"].get(\"name\") in volume_names)\n\n or resource[\"cls\"] == \"murano\"\n\n # Glance has issues with uWSGI integration...\n # or resource[\"cls\"] == \"glance\"\n\n or resource[\"cls\"] == \"gnocchi\"):\n\n expected.append(resource)\n\n for resource in expected:\n added.remove(resource)\n\n if removed:\n _print_tabular_resources(removed, \"Removed resources\")\n\n if added:\n _print_tabular_resources(added, \"Added resources (unexpected)\")\n\n if expected:\n _print_tabular_resources(expected, \"Added resources (expected)\")\n\n result = {\"removed\": removed, \"added\": added, \"expected\": expected}\n if json_output:\n with open(json_output, \"w\") as f:\n f.write(json.dumps(result, indent=4))\n\n rc = 1 if any(changes) else 0\n return rc, result\n\n\[email protected]_plugins_are_loaded\ndef do_it(json_output, compare_with):\n\n out = subprocess.check_output(\n [\"rally\", \"env\", \"show\", \"--only-spec\", \"--env\", \"devstack\"])\n config = json.loads(out.decode(\"utf-8\"))\n config = config[\"existing@openstack\"]\n config.update(config.pop(\"admin\"))\n if \"users\" in config:\n del config[\"users\"]\n\n resources = CloudResources(**config)\n\n if compare_with:\n return check_resource(resources, compare_with, json_output)\n else:\n return dump_resources(resources, json_output)\n\n\ndef ansible_main():\n module = AnsibleModule(\n argument_spec=dict(\n json_output=dict(required=False, type=\"str\"),\n compare_with=dict(required=False, type=\"path\")\n )\n )\n\n rc, json_result = do_it(\n json_output=module.params.get(\"json_output\"),\n compare_with=module.params.get(\"compare_with\")\n )\n if rc:\n module.fail_json(\n msg=\"Unexpected changes of resources are detected.\",\n rc=1,\n resources=json_result\n )\n\n module.exit_json(rc=0, changed=True, resources=json_result)\n\n\nif __name__ == \"__main__\":\n ansible_main()\n" }, { "alpha_fraction": 0.5610524415969849, "alphanum_fraction": 0.564709484577179, "avg_line_length": 38.69355010986328, "blob_id": "497fadf8b53911053498bbecf3241330cfdda6ee", "content_id": "ec0fa12df617bb75ad3ddec56fff57eab46fb554", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9844, "license_type": "permissive", "max_line_length": 78, "num_lines": 248, "path": "/tests/unit/task/contexts/magnum/test_ca_certs.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.task.contexts.magnum import ca_certs\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.magnum\"\nSCN = \"rally_openstack.task.scenarios\"\n\n\nclass CaCertsGeneratorTestCase(test.ScenarioTestCase):\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n tenants[str(id_)][\"cluster\"] = \"rally_cluster_uuid\"\n return tenants\n\n def test__generate_csr_and_key(self):\n\n ca_cert_ctx = ca_certs.CaCertGenerator(self.context)\n result = ca_cert_ctx._generate_csr_and_key()\n\n assert result[\"csr\"] is not None\n assert result[\"key\"] is not None\n\n @mock.patch(\"%s.magnum.utils.MagnumScenario._create_ca_certificate\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_ca_certificate\" % SCN)\n @mock.patch(\"%s.ca_certs.open\" % CTX, side_effect=mock.mock_open(),\n create=True)\n @mock.patch(\"%s.ca_certs.CaCertGenerator._generate_csr_and_key\"\n % CTX)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster_template\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster\" % SCN,\n return_value=mock.Mock())\n def test_setup(self, mock_magnum_scenario__get_cluster,\n mock_magnum_scenario__get_cluster_template,\n mock_ca_cert_generator__generate_csr_and_key,\n mock_open,\n mock_magnum_scenario__get_ca_certificate,\n mock_magnum_scenario__create_ca_certificate):\n tenants_count = 2\n users_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants_count,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"clusters\": {\n \"cluster_template_uuid\": \"123456789\",\n \"node_count\": 2\n },\n \"ca_certs\": {\n \"directory\": \"\"\n }\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n fake_ct = mock.Mock()\n fake_ct.tls_disabled = False\n mock_magnum_scenario__get_cluster_template.return_value = fake_ct\n fake_tls = {\"csr\": \"fake_csr\", \"key\": \"fake_key\"}\n mock_ca_cert_generator__generate_csr_and_key.return_value = fake_tls\n fake_ca_cert = mock.Mock()\n fake_ca_cert.pem = \"fake_ca_cert\"\n mock_magnum_scenario__get_ca_certificate.return_value = fake_ca_cert\n fake_cert = mock.Mock()\n fake_cert.pem = \"fake_cert\"\n mock_magnum_scenario__create_ca_certificate.return_value = fake_cert\n\n ca_cert_ctx = ca_certs.CaCertGenerator(self.context)\n ca_cert_ctx.setup()\n\n mock_cluster = mock_magnum_scenario__get_cluster.return_value\n mock_calls = [mock.call(mock_cluster.cluster_template_id)\n for i in range(tenants_count)]\n mock_magnum_scenario__get_cluster_template.assert_has_calls(\n mock_calls)\n mock_calls = [mock.call(\"rally_cluster_uuid\")\n for i in range(tenants_count)]\n mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls)\n mock_magnum_scenario__get_ca_certificate.assert_has_calls(mock_calls)\n fake_csr_req = {\"cluster_uuid\": \"rally_cluster_uuid\",\n \"csr\": fake_tls[\"csr\"]}\n mock_calls = [mock.call(fake_csr_req)\n for i in range(tenants_count)]\n mock_magnum_scenario__create_ca_certificate.assert_has_calls(\n mock_calls)\n\n @mock.patch(\"%s.magnum.utils.MagnumScenario._create_ca_certificate\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_ca_certificate\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster_template\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster\" % SCN,\n return_value=mock.Mock())\n def test_tls_disabled_setup(self, mock_magnum_scenario__get_cluster,\n mock_magnum_scenario__get_cluster_template,\n mock_magnum_scenario__get_ca_certificate,\n mock_magnum_scenario__create_ca_certificate):\n tenants_count = 2\n users_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants_count,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"clusters\": {\n \"cluster_template_uuid\": \"123456789\",\n \"node_count\": 2\n },\n \"ca_certs\": {\n \"directory\": \"\"\n }\n },\n \"users\": users,\n \"tenants\": tenants\n })\n\n fake_ct = mock.Mock()\n fake_ct.tls_disabled = True\n mock_magnum_scenario__get_cluster_template.return_value = fake_ct\n\n ca_cert_ctx = ca_certs.CaCertGenerator(self.context)\n ca_cert_ctx.setup()\n\n mock_cluster = mock_magnum_scenario__get_cluster.return_value\n mock_calls = [mock.call(mock_cluster.cluster_template_id)\n for i in range(tenants_count)]\n mock_magnum_scenario__get_cluster_template.assert_has_calls(\n mock_calls)\n mock_calls = [mock.call(\"rally_cluster_uuid\")\n for i in range(tenants_count)]\n mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls)\n mock_magnum_scenario__get_ca_certificate.assert_not_called()\n mock_magnum_scenario__create_ca_certificate.assert_not_called()\n\n @mock.patch(\"os.remove\", return_value=mock.Mock())\n @mock.patch(\"os.path.join\", return_value=mock.Mock())\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster_template\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster\" % SCN,\n return_value=mock.Mock())\n def test_cleanup(self, mock_magnum_scenario__get_cluster,\n mock_magnum_scenario__get_cluster_template,\n mock_os_path_join, mock_os_remove):\n\n tenants_count = 2\n users_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n },\n \"ca_certs_directory\": \"\",\n \"users\": users,\n \"tenants\": tenants\n })\n\n fake_ct = mock.Mock()\n fake_ct.tls_disabled = False\n mock_magnum_scenario__get_cluster_template.return_value = fake_ct\n\n ca_cert_ctx = ca_certs.CaCertGenerator(self.context)\n ca_cert_ctx.cleanup()\n\n cluster_uuid = \"rally_cluster_uuid\"\n dir = self.context[\"ca_certs_directory\"]\n mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(\".key\"))\n mock_os_path_join.assert_has_calls(\n dir, cluster_uuid.__add__(\"_ca.crt\"))\n mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(\".crt\"))\n\n @mock.patch(\"os.remove\", return_value=mock.Mock())\n @mock.patch(\"os.path.join\", return_value=mock.Mock())\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster_template\" % SCN)\n @mock.patch(\"%s.magnum.utils.MagnumScenario._get_cluster\" % SCN,\n return_value=mock.Mock())\n def test_tls_disabled_cleanup(self, mock_magnum_scenario__get_cluster,\n mock_magnum_scenario__get_cluster_template,\n mock_os_path_join, mock_os_remove):\n\n tenants_count = 2\n users_per_tenant = 5\n\n tenants = self._gen_tenants(tenants_count)\n users = []\n for ten_id in tenants:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": ten_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n },\n \"ca_certs_directory\": \"\",\n \"users\": users,\n \"tenants\": tenants\n })\n\n fake_ct = mock.Mock()\n fake_ct.tls_disabled = True\n mock_magnum_scenario__get_cluster_template.return_value = fake_ct\n\n ca_cert_ctx = ca_certs.CaCertGenerator(self.context)\n ca_cert_ctx.cleanup()\n\n mock_os_path_join.assert_not_called()\n mock_os_remove.assert_not_called()\n" }, { "alpha_fraction": 0.6346599459648132, "alphanum_fraction": 0.6385239362716675, "avg_line_length": 43.620689392089844, "blob_id": "5198f8dd88cfd215febb9858b47c019232bd6dbf", "content_id": "fc014921bf7fc3365e98d8a1e40b003a84281c41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5176, "license_type": "permissive", "max_line_length": 78, "num_lines": 116, "path": "/rally_openstack/task/scenarios/sahara/node_group_templates.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import types\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.sahara import utils\n\n\"\"\"Scenarios for Sahara node group templates.\"\"\"\n\n\[email protected](flavor={\"type\": \"nova_flavor\"})\[email protected](\"flavor_exists\", param_name=\"flavor\")\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaNodeGroupTemplates.create_and_list_node_group_templates\",\n platform=\"openstack\")\nclass CreateAndListNodeGroupTemplates(utils.SaharaScenario):\n\n def run(self, flavor, plugin_name=\"vanilla\",\n hadoop_version=\"1.2.1\", use_autoconfig=True):\n \"\"\"Create and list Sahara Node Group Templates.\n\n This scenario creates two Node Group Templates with different set of\n node processes. The master Node Group Template contains Hadoop's\n management processes. The worker Node Group Template contains\n Hadoop's worker processes.\n\n By default the templates are created for the vanilla Hadoop\n provisioning plugin using the version 1.2.1\n\n After the templates are created the list operation is called.\n\n :param flavor: Nova flavor that will be for nodes in the\n created node groups\n :param plugin_name: name of a provisioning plugin\n :param hadoop_version: version of Hadoop distribution supported by\n the specified plugin.\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n \"\"\"\n\n self._create_master_node_group_template(flavor_id=flavor,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n use_autoconfig=use_autoconfig)\n self._create_worker_node_group_template(flavor_id=flavor,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n use_autoconfig=use_autoconfig)\n self._list_node_group_templates()\n\n\[email protected](flavor={\"type\": \"nova_flavor\"})\[email protected](\"flavor_exists\", param_name=\"flavor\")\[email protected](\"required_services\", services=[consts.Service.SAHARA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\n context={\"cleanup@openstack\": [\"sahara\"]},\n name=\"SaharaNodeGroupTemplates.create_delete_node_group_templates\",\n platform=\"openstack\")\nclass CreateDeleteNodeGroupTemplates(utils.SaharaScenario):\n\n def run(self, flavor, plugin_name=\"vanilla\",\n hadoop_version=\"1.2.1\", use_autoconfig=True):\n \"\"\"Create and delete Sahara Node Group Templates.\n\n This scenario creates and deletes two most common types of\n Node Group Templates.\n\n By default the templates are created for the vanilla Hadoop\n provisioning plugin using the version 1.2.1\n\n :param flavor: Nova flavor that will be for nodes in the\n created node groups\n :param plugin_name: name of a provisioning plugin\n :param hadoop_version: version of Hadoop distribution supported by\n the specified plugin.\n :param use_autoconfig: If True, instances of the node group will be\n automatically configured during cluster\n creation. If False, the configuration values\n should be specify manually\n \"\"\"\n\n master_ngt = self._create_master_node_group_template(\n flavor_id=flavor,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n use_autoconfig=use_autoconfig)\n\n worker_ngt = self._create_worker_node_group_template(\n flavor_id=flavor,\n plugin_name=plugin_name,\n hadoop_version=hadoop_version,\n use_autoconfig=use_autoconfig)\n\n self._delete_node_group_template(master_ngt)\n self._delete_node_group_template(worker_ngt)\n" }, { "alpha_fraction": 0.6289448142051697, "alphanum_fraction": 0.6337850689888, "avg_line_length": 35.63120651245117, "blob_id": "8563fd71ff69cd46a161eef7b95a4c2c29ba85c8", "content_id": "549a2fba2d8b2c6c72f4459c9d530199082feee0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10330, "license_type": "permissive", "max_line_length": 79, "num_lines": 282, "path": "/tests/unit/common/services/identity/test_keystone_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally_openstack.common import osclients\nfrom rally_openstack.common import service\nfrom rally_openstack.common.services.identity import identity\nfrom rally_openstack.common.services.identity import keystone_common\nfrom tests.unit import test\n\n\nclass FullUnifiedKeystone(keystone_common.UnifiedKeystoneMixin,\n service.Service):\n \"\"\"Implementation of UnifiedKeystoneMixin with Service base class.\"\"\"\n pass\n\n\nclass UnifiedKeystoneMixinTestCase(test.TestCase):\n def setUp(self):\n super(UnifiedKeystoneMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.name_generator = mock.MagicMock()\n self.impl = mock.MagicMock()\n self.version = \"some\"\n self.service = FullUnifiedKeystone(\n clients=self.clients, name_generator=self.name_generator)\n self.service._impl = self.impl\n self.service.version = self.version\n\n def test__unify_service(self):\n class SomeFakeService(object):\n id = 123123123123123\n name = \"asdfasdfasdfasdfadf\"\n other_var = \"asdfasdfasdfasdfasdfasdfasdf\"\n\n service = self.service._unify_service(SomeFakeService())\n self.assertIsInstance(service, identity.Service)\n self.assertEqual(SomeFakeService.id, service.id)\n self.assertEqual(SomeFakeService.name, service.name)\n\n def test__unify_role(self):\n class SomeFakeRole(object):\n id = 123123123123123\n name = \"asdfasdfasdfasdfadf\"\n other_var = \"asdfasdfasdfasdfasdfasdfasdf\"\n\n role = self.service._unify_role(SomeFakeRole())\n self.assertIsInstance(role, identity.Role)\n self.assertEqual(SomeFakeRole.id, role.id)\n self.assertEqual(SomeFakeRole.name, role.name)\n\n def test_delete_user(self):\n user_id = \"id\"\n\n self.service.delete_user(user_id)\n self.impl.delete_user.assert_called_once_with(user_id)\n\n def test_get_user(self):\n user_id = \"id\"\n\n self.service._unify_user = mock.MagicMock()\n\n self.assertEqual(self.service._unify_user.return_value,\n self.service.get_user(user_id))\n\n self.impl.get_user.assert_called_once_with(user_id)\n self.service._unify_user.assert_called_once_with(\n self.impl.get_user.return_value)\n\n def test_create_service(self):\n self.service._unify_service = mock.MagicMock()\n\n name = \"some_Service\"\n service_type = \"computeNextGen\"\n description = \"we will Rock you!\"\n\n self.assertEqual(self.service._unify_service.return_value,\n self.service.create_service(\n name=name, service_type=service_type,\n description=description))\n\n self.service._unify_service.assert_called_once_with(\n self.service._impl.create_service.return_value)\n self.service._impl.create_service.assert_called_once_with(\n name=name, service_type=service_type, description=description)\n\n def test_delete_service(self):\n service_id = \"id\"\n\n self.service.delete_service(service_id)\n self.impl.delete_service.assert_called_once_with(service_id)\n\n def test_get_service(self):\n service_id = \"id\"\n\n self.service._unify_service = mock.MagicMock()\n\n self.assertEqual(self.service._unify_service.return_value,\n self.service.get_service(service_id))\n\n self.impl.get_service.assert_called_once_with(service_id)\n self.service._unify_service.assert_called_once_with(\n self.impl.get_service.return_value)\n\n def test_get_service_by_name(self):\n service_id = \"id\"\n\n self.service._unify_service = mock.MagicMock()\n\n self.assertEqual(self.service._unify_service.return_value,\n self.service.get_service_by_name(service_id))\n\n self.impl.get_service_by_name.assert_called_once_with(service_id)\n self.service._unify_service.assert_called_once_with(\n self.impl.get_service_by_name.return_value)\n\n def test_delete_role(self):\n role_id = \"id\"\n\n self.service.delete_role(role_id)\n self.impl.delete_role.assert_called_once_with(role_id)\n\n def test_get_role(self):\n role_id = \"id\"\n\n self.service._unify_role = mock.MagicMock()\n\n self.assertEqual(self.service._unify_role.return_value,\n self.service.get_role(role_id))\n\n self.impl.get_role.assert_called_once_with(role_id)\n self.service._unify_role.assert_called_once_with(\n self.impl.get_role.return_value)\n\n def test_list_ec2credentials(self):\n user_id = \"id\"\n self.assertEqual(self.impl.list_ec2credentials.return_value,\n self.service.list_ec2credentials(user_id))\n\n self.impl.list_ec2credentials.assert_called_once_with(user_id)\n\n def test_delete_ec2credential(self):\n user_id = \"id\"\n access = mock.MagicMock()\n\n self.assertEqual(self.impl.delete_ec2credential.return_value,\n self.service.delete_ec2credential(user_id,\n access=access))\n\n self.impl.delete_ec2credential.assert_called_once_with(user_id=user_id,\n access=access)\n\n def test_fetch_token(self):\n\n self.assertEqual(self.impl.fetch_token.return_value,\n self.service.fetch_token())\n\n self.impl.fetch_token.assert_called_once_with()\n\n def test_validate_token(self):\n token = \"id\"\n\n self.assertEqual(self.impl.validate_token.return_value,\n self.service.validate_token(token))\n\n self.impl.validate_token.assert_called_once_with(token)\n\n\nclass FullKeystone(service.Service, keystone_common.KeystoneMixin):\n \"\"\"Implementation of KeystoneMixin with Service base class.\"\"\"\n pass\n\n\nclass KeystoneMixinTestCase(test.TestCase):\n def setUp(self):\n super(KeystoneMixinTestCase, self).setUp()\n self.clients = mock.MagicMock()\n self.kc = self.clients.keystone.return_value\n self.name_generator = mock.MagicMock()\n self.version = \"some\"\n self.service = FullKeystone(\n clients=self.clients, name_generator=self.name_generator)\n self.service.version = self.version\n\n def test_list_users(self):\n self.assertEqual(self.kc.users.list.return_value,\n self.service.list_users())\n self.kc.users.list.assert_called_once_with()\n\n def test_delete_user(self):\n user_id = \"fake_id\"\n self.service.delete_user(user_id)\n self.kc.users.delete.assert_called_once_with(user_id)\n\n def test_get_user(self):\n user_id = \"fake_id\"\n self.service.get_user(user_id)\n self.kc.users.get.assert_called_once_with(user_id)\n\n def test_delete_service(self):\n service_id = \"fake_id\"\n self.service.delete_service(service_id)\n self.kc.services.delete.assert_called_once_with(service_id)\n\n def test_list_services(self):\n self.assertEqual(self.kc.services.list.return_value,\n self.service.list_services())\n self.kc.services.list.assert_called_once_with()\n\n def test_get_service(self):\n service_id = \"fake_id\"\n self.service.get_service(service_id)\n self.kc.services.get.assert_called_once_with(service_id)\n\n def test_get_service_by_name(self):\n class FakeService(object):\n def __init__(self, name):\n self.name = name\n service_name = \"fake_name\"\n services = [FakeService(name=\"foo\"), FakeService(name=service_name),\n FakeService(name=\"bar\")]\n self.service.list_services = mock.MagicMock(return_value=services)\n\n self.assertEqual(services[1],\n self.service.get_service_by_name(service_name))\n\n def test_delete_role(self):\n role_id = \"fake_id\"\n self.service.delete_role(role_id)\n self.kc.roles.delete.assert_called_once_with(role_id)\n\n def test_list_roles(self):\n self.assertEqual(self.kc.roles.list.return_value,\n self.service.list_roles())\n self.kc.roles.list.assert_called_once_with()\n\n def test_get_role(self):\n role_id = \"fake_id\"\n self.service.get_role(role_id)\n self.kc.roles.get.assert_called_once_with(role_id)\n\n def test_list_ec2credentials(self):\n user_id = \"fake_id\"\n\n self.assertEqual(self.kc.ec2.list.return_value,\n self.service.list_ec2credentials(user_id))\n self.kc.ec2.list.assert_called_once_with(user_id)\n\n def test_delete_ec2credentials(self):\n user_id = \"fake_id\"\n access = mock.MagicMock()\n\n self.service.delete_ec2credential(user_id, access=access)\n self.kc.ec2.delete.assert_called_once_with(user_id=user_id,\n access=access)\n\n @mock.patch(\"rally_openstack.common.osclients.Clients\",\n spec=osclients.Clients)\n def test_fetch_token(self, mock_clients):\n mock_clients.return_value = mock.Mock(keystone=mock.Mock())\n expected_token = mock_clients.return_value.keystone.auth_ref.auth_token\n self.assertEqual(expected_token, self.service.fetch_token())\n mock_clients.assert_called_once_with(\n credential=self.clients.credential)\n\n def test_validate_token(self):\n token = \"some_token\"\n\n self.service.validate_token(token)\n self.kc.tokens.validate.assert_called_once_with(token)\n" }, { "alpha_fraction": 0.632964015007019, "alphanum_fraction": 0.6412742137908936, "avg_line_length": 34.219512939453125, "blob_id": "a7ca4ebe2eaee5fcded47b64911c24f2d8d1ab6b", "content_id": "0f83bdf6fe3ead9744747730a65bb61ddef20194", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "permissive", "max_line_length": 78, "num_lines": 41, "path": "/tests/functional/test_certification_task.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# Copyright 2014: Catalyst IT Ltd.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport traceback\nimport unittest\n\nimport rally_openstack\nfrom tests.functional import utils\n\n\nclass TestPreCreatedTasks(unittest.TestCase):\n\n def test_task_samples_is_valid(self):\n\n rally = utils.Rally()\n full_path = os.path.join(\n os.path.dirname(rally_openstack.__file__), os.pardir,\n \"tasks\", \"openstack\")\n task_path = os.path.join(full_path, \"task.yaml\")\n args_path = os.path.join(full_path, \"task_arguments.yaml\")\n\n try:\n rally(\"task validate --task %s --task-args-file %s\" % (task_path,\n args_path))\n except Exception:\n print(traceback.format_exc())\n self.fail(\"Wrong task config %s\" % full_path)\n" }, { "alpha_fraction": 0.6089887619018555, "alphanum_fraction": 0.6139326095581055, "avg_line_length": 36.08333206176758, "blob_id": "f63fd1434a2d4c52d53d7334a40ce2961fb826f1", "content_id": "15ce843ff51b748f1c96f4462b101cf3294ff658", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2225, "license_type": "permissive", "max_line_length": 78, "num_lines": 60, "path": "/tests/unit/task/scenarios/neutron/test_loadbalancer_v2.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.neutron import loadbalancer_v2\nfrom tests.unit import test\n\n\[email protected]\nclass NeutronLoadbalancerv2TestCase(test.TestCase):\n\n def _get_context(self):\n context = test.get_test_context()\n context.update({\n \"user\": {\n \"id\": \"fake_user\",\n \"tenant_id\": \"fake_tenant\",\n \"credential\": mock.MagicMock()\n },\n \"tenant\": {\"id\": \"fake_tenant\",\n \"networks\": [{\"id\": \"fake_net\",\n \"subnets\": [\"fake_subnet\"]}]}})\n return context\n\n @ddt.data(\n {},\n {\"lb_create_args\": None},\n {\"lb_create_args\": {}},\n {\"lb_create_args\": {\"name\": \"given-name\"}},\n )\n @ddt.unpack\n def test_create_and_list_load_balancers(self, lb_create_args=None):\n context = self._get_context()\n scenario = loadbalancer_v2.CreateAndListLoadbalancers(context)\n lb_create_args = lb_create_args or {}\n networks = context[\"tenant\"][\"networks\"]\n scenario._create_lbaasv2_loadbalancer = mock.Mock()\n scenario._list_lbaasv2_loadbalancers = mock.Mock()\n scenario.run(lb_create_args=lb_create_args)\n\n subnets = []\n mock_has_calls = []\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet in subnets:\n mock_has_calls.append(mock.call(subnet, **lb_create_args))\n scenario._create_lbaasv2_loadbalancer.assert_has_calls(mock_has_calls)\n scenario._list_lbaasv2_loadbalancers.assert_called_once_with()\n" }, { "alpha_fraction": 0.5593057870864868, "alphanum_fraction": 0.5613405108451843, "avg_line_length": 40.56716537475586, "blob_id": "de29eec8b9fabdd18e789f381b800b99fff6a42c", "content_id": "81b3b6930316ceee4b0e0856498ca72969771776", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8355, "license_type": "permissive", "max_line_length": 79, "num_lines": 201, "path": "/rally_openstack/task/contexts/manila/manila_share_networks.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\nfrom rally.common import logging\nfrom rally.common import validation\nfrom rally import exceptions\n\nfrom rally_openstack.common import consts as rally_consts\nfrom rally_openstack.task.cleanup import manager as resource_manager\nfrom rally_openstack.task import context\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.scenarios.manila import utils as manila_utils\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nCONTEXT_NAME = consts.SHARE_NETWORKS_CONTEXT_NAME\n\nSHARE_NETWORKS_ARG_DESCR = \"\"\"\nThis context arg will be used only when context arg \"use_share_networks\" is\nset to True.\n\nIf context arg 'share_networks' has values then they will be used else share\nnetworks will be autocreated - one for each tenant network. If networks do not\nexist then will be created one share network for each tenant without network\ndata.\n\nExpected value is dict of lists where tenant Name or ID is key and list of\nshare_network Names or IDs is value. Example:\n\n .. code-block:: json\n\n \"context\": {\n \"manila_share_networks\": {\n \"use_share_networks\": true,\n \"share_networks\": {\n \"tenant_1_name_or_id\": [\"share_network_1_name_or_id\",\n \"share_network_2_name_or_id\"],\n \"tenant_2_name_or_id\": [\"share_network_3_name_or_id\"]}\n }\n }\n\nAlso, make sure that all 'existing users' in appropriate registered deployment\nhave share networks if its usage is enabled, else Rally will randomly take\nusers that does not satisfy criteria.\n\"\"\"\n\n\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](name=CONTEXT_NAME, platform=\"openstack\", order=450)\nclass ShareNetworks(context.OpenStackContext):\n \"\"\"This context creates share networks for Manila project.\"\"\"\n CONFIG_SCHEMA = {\n \"type\": \"object\",\n \"$schema\": rally_consts.JSON_SCHEMA,\n \"properties\": {\n \"use_share_networks\": {\n \"type\": \"boolean\",\n \"description\": \"Specifies whether manila should use share \"\n \"networks for share creation or not.\"},\n\n \"share_networks\": {\n \"type\": \"object\",\n \"description\": SHARE_NETWORKS_ARG_DESCR,\n \"additionalProperties\": True\n },\n },\n \"additionalProperties\": False\n }\n DEFAULT_CONFIG = {\n \"use_share_networks\": False,\n \"share_networks\": {},\n }\n\n def _setup_for_existing_users(self):\n if (self.config[\"use_share_networks\"]\n and not self.config[\"share_networks\"]):\n msg = (\"Usage of share networks was enabled but for deployment \"\n \"with existing users share networks also should be \"\n \"specified via arg 'share_networks'\")\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(), msg=msg)\n\n for tenant_name_or_id, share_networks in self.config[\n \"share_networks\"].items():\n # Verify project existence\n for tenant in self.context[\"tenants\"].values():\n if tenant_name_or_id in (tenant[\"id\"], tenant[\"name\"]):\n tenant_id = tenant[\"id\"]\n existing_user = None\n for user in self.context[\"users\"]:\n if user[\"tenant_id\"] == tenant_id:\n existing_user = user\n break\n break\n else:\n msg = (\"Provided tenant Name or ID '%s' was not found in \"\n \"existing tenants.\") % tenant_name_or_id\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(), msg=msg)\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME] = {}\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME][\n \"share_networks\"] = []\n\n manila_scenario = manila_utils.ManilaScenario({\n \"user\": existing_user\n })\n existing_sns = manila_scenario._list_share_networks(\n detailed=False, search_opts={\"project_id\": tenant_id})\n\n for sn_name_or_id in share_networks:\n # Verify share network existence\n for sn in existing_sns:\n if sn_name_or_id in (sn.id, sn.name):\n break\n else:\n msg = (\"Specified share network '%(sn)s' does not \"\n \"exist for tenant '%(tenant_id)s'\"\n % {\"sn\": sn_name_or_id, \"tenant_id\": tenant_id})\n raise exceptions.ContextSetupFailure(\n ctx_name=self.get_name(), msg=msg)\n\n # Set share network for project\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME][\n \"share_networks\"].append(sn.to_dict())\n\n def _setup_for_autocreated_users(self):\n # Create share network for each network of tenant\n for user, tenant_id in (self._iterate_per_tenants(\n self.context.get(\"users\", []))):\n networks = self.context[\"tenants\"][tenant_id].get(\"networks\")\n manila_scenario = manila_utils.ManilaScenario({\n \"task\": self.task,\n \"owner_id\": self.get_owner_id(),\n \"user\": user\n })\n manila_scenario.RESOURCE_NAME_FORMAT = self.RESOURCE_NAME_FORMAT\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME] = {\n \"share_networks\": []}\n data = {}\n\n def _setup_share_network(tenant_id, data):\n share_network = manila_scenario._create_share_network(\n **data).to_dict()\n self.context[\"tenants\"][tenant_id][CONTEXT_NAME][\n \"share_networks\"].append(share_network)\n for ss in self.context[\"tenants\"][tenant_id].get(\n consts.SECURITY_SERVICES_CONTEXT_NAME, {}).get(\n \"security_services\", []):\n manila_scenario._add_security_service_to_share_network(\n share_network[\"id\"], ss[\"id\"])\n\n if networks:\n for network in networks:\n if network.get(\"cidr\"):\n data[\"nova_net_id\"] = network[\"id\"]\n elif network.get(\"subnets\"):\n data[\"neutron_net_id\"] = network[\"id\"]\n data[\"neutron_subnet_id\"] = network[\"subnets\"][0]\n else:\n LOG.warning(\"Can't determine network service provider.\"\n \" Share network will have no data.\")\n _setup_share_network(tenant_id, data)\n else:\n _setup_share_network(tenant_id, data)\n\n def setup(self):\n self.context[CONTEXT_NAME] = {}\n if not self.config[\"use_share_networks\"]:\n pass\n elif self.context[\"config\"].get(\"existing_users\"):\n self._setup_for_existing_users()\n else:\n self._setup_for_autocreated_users()\n\n def cleanup(self):\n if (not self.context[\"config\"].get(\"existing_users\")\n or self.config[\"use_share_networks\"]):\n resource_manager.cleanup(\n names=[\"manila.share_networks\"],\n users=self.context.get(\"users\", []),\n superclass=self.__class__,\n task_id=self.get_owner_id())\n else:\n # NOTE(vponomaryov): assume that share networks were not created\n # by test run.\n return\n" }, { "alpha_fraction": 0.575679361820221, "alphanum_fraction": 0.5799301862716675, "avg_line_length": 37.7470588684082, "blob_id": "34d20c7ac1febead2617678e337f6f8b832d92e0", "content_id": "3ecd880ae3f2c82a7922297a2de4cd12f3c9b406", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6587, "license_type": "permissive", "max_line_length": 79, "num_lines": 170, "path": "/tests/unit/task/contexts/manila/test_manila_security_services.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2015 Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.contexts.manila import consts\nfrom rally_openstack.task.contexts.manila import manila_security_services\nfrom rally_openstack.task.scenarios.manila import utils as manila_utils\nfrom tests.unit import test\n\nCONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME\n\n\[email protected]\nclass SecurityServicesTestCase(test.ScenarioTestCase):\n TENANTS_AMOUNT = 3\n USERS_PER_TENANT = 4\n SECURITY_SERVICES = [\n {\"security_service_type\": ss_type,\n \"dns_ip\": \"fake_dns_ip_%s\" % ss_type,\n \"server\": \"fake_server_%s\" % ss_type,\n \"domain\": \"fake_domain_%s\" % ss_type,\n \"user\": \"fake_user_%s\" % ss_type,\n \"password\": \"fake_password_%s\" % ss_type}\n for ss_type in (\"ldap\", \"kerberos\", \"active_directory\")\n ]\n\n def _get_context(self, security_services=None, networks_per_tenant=2,\n neutron_network_provider=True):\n if security_services is None:\n security_services = self.SECURITY_SERVICES\n tenants = {}\n for t_id in range(self.TENANTS_AMOUNT):\n tenants[str(t_id)] = {\"name\": str(t_id)}\n tenants[str(t_id)][\"networks\"] = []\n for i in range(networks_per_tenant):\n network = {\"id\": \"fake_net_id_%s\" % i}\n if neutron_network_provider:\n network[\"subnets\"] = [\"fake_subnet_id_of_net_%s\" % i]\n else:\n network[\"cidr\"] = \"101.0.5.0/24\"\n tenants[str(t_id)][\"networks\"].append(network)\n users = []\n for t_id in tenants.keys():\n for i in range(self.USERS_PER_TENANT):\n users.append({\"id\": i, \"tenant_id\": t_id, \"endpoint\": \"fake\"})\n context = {\n \"config\": {\n \"users\": {\n \"tenants\": self.TENANTS_AMOUNT,\n \"users_per_tenant\": self.USERS_PER_TENANT,\n },\n CONTEXT_NAME: {\n \"security_services\": security_services,\n },\n },\n \"admin\": {\n \"endpoint\": mock.MagicMock(),\n },\n \"task\": mock.MagicMock(),\n \"owner_id\": \"foo_uuid\",\n \"users\": users,\n \"tenants\": tenants,\n }\n return context\n\n def test_init(self):\n context = {\n \"task\": mock.MagicMock(),\n \"config\": {\n CONTEXT_NAME: {\"foo\": \"bar\"},\n \"not_manila\": {\"not_manila_key\": \"not_manila_value\"},\n }\n }\n\n inst = manila_security_services.SecurityServices(context)\n\n self.assertEqual(\"bar\", inst.config.get(\"foo\"))\n self.assertFalse(inst.config.get(\"security_services\"))\n self.assertEqual(445, inst.get_order())\n self.assertEqual(CONTEXT_NAME, inst.get_name())\n\n @mock.patch.object(manila_security_services.manila_utils, \"ManilaScenario\")\n @ddt.data(True, False)\n def test_setup_security_services_set(self, neutron_network_provider,\n mock_manila_scenario):\n ctxt = self._get_context(\n neutron_network_provider=neutron_network_provider)\n inst = manila_security_services.SecurityServices(ctxt)\n\n inst.setup()\n\n self.assertEqual(\n self.TENANTS_AMOUNT, mock_manila_scenario.call_count)\n self.assertEqual(\n mock_manila_scenario.call_args_list,\n [mock.call({\n \"task\": inst.task,\n \"owner_id\": \"foo_uuid\",\n \"user\": user})\n for user in inst.context[\"users\"] if user[\"id\"] == 0]\n )\n mock_create_security_service = (\n mock_manila_scenario.return_value._create_security_service)\n expected_calls = []\n for ss in self.SECURITY_SERVICES:\n expected_calls.extend([mock.call(**ss), mock.call().to_dict()])\n mock_create_security_service.assert_has_calls(expected_calls)\n self.assertEqual(\n self.TENANTS_AMOUNT * len(self.SECURITY_SERVICES),\n mock_create_security_service.call_count)\n self.assertEqual(\n self.TENANTS_AMOUNT,\n len(inst.context[\"config\"][CONTEXT_NAME][\"security_services\"]))\n for tenant in inst.context[\"tenants\"]:\n self.assertEqual(\n self.TENANTS_AMOUNT,\n len(inst.context[\"tenants\"][tenant][CONTEXT_NAME][\n \"security_services\"])\n )\n\n @mock.patch.object(manila_security_services.manila_utils, \"ManilaScenario\")\n def test_setup_security_services_not_set(self, mock_manila_scenario):\n ctxt = self._get_context(security_services=[])\n inst = manila_security_services.SecurityServices(ctxt)\n\n inst.setup()\n\n self.assertFalse(mock_manila_scenario.called)\n self.assertFalse(\n mock_manila_scenario.return_value._create_security_service.called)\n self.assertIn(CONTEXT_NAME, inst.context[\"config\"])\n self.assertIn(\n \"security_services\", inst.context[\"config\"][CONTEXT_NAME])\n self.assertEqual(\n 0,\n len(inst.context[\"config\"][CONTEXT_NAME][\"security_services\"]))\n for tenant in inst.context[\"tenants\"]:\n self.assertEqual(\n 0,\n len(inst.context[\"tenants\"][tenant][CONTEXT_NAME][\n \"security_services\"])\n )\n\n @mock.patch.object(manila_security_services, \"resource_manager\")\n def test_cleanup_security_services_enabled(self, mock_resource_manager):\n ctxt = self._get_context()\n inst = manila_security_services.SecurityServices(ctxt)\n\n inst.cleanup()\n\n mock_resource_manager.cleanup.assert_called_once_with(\n names=[\"manila.security_services\"],\n users=ctxt[\"users\"],\n superclass=manila_utils.ManilaScenario,\n task_id=\"foo_uuid\")\n" }, { "alpha_fraction": 0.530219554901123, "alphanum_fraction": 0.5507474541664124, "avg_line_length": 44.39285659790039, "blob_id": "47efb6dcb51fa83aec048d967ed97a6047068f33", "content_id": "5c07dbf78d544b8e58909c201c5bce04b8ca86da", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13981, "license_type": "permissive", "max_line_length": 79, "num_lines": 308, "path": "/rally_openstack/common/cfg/nova.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2013: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.common import cfg\n\nOPTS = {\"openstack\": [\n # prepoll delay, timeout, poll interval\n # \"start\": (0, 300, 1)\n cfg.FloatOpt(\"nova_server_start_prepoll_delay\",\n default=0.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after start before polling for status\"),\n cfg.FloatOpt(\"nova_server_start_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server start timeout\"),\n cfg.FloatOpt(\"nova_server_start_poll_interval\",\n deprecated_group=\"benchmark\",\n default=1.0,\n help=\"Server start poll interval\"),\n # \"stop\": (0, 300, 2)\n cfg.FloatOpt(\"nova_server_stop_prepoll_delay\",\n default=0.0,\n help=\"Time to sleep after stop before polling for status\"),\n cfg.FloatOpt(\"nova_server_stop_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server stop timeout\"),\n cfg.FloatOpt(\"nova_server_stop_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server stop poll interval\"),\n # \"boot\": (1, 300, 1)\n cfg.FloatOpt(\"nova_server_boot_prepoll_delay\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after boot before polling for status\"),\n cfg.FloatOpt(\"nova_server_boot_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server boot timeout\"),\n cfg.FloatOpt(\"nova_server_boot_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server boot poll interval\"),\n # \"delete\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_delete_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after delete before polling for status\"),\n cfg.FloatOpt(\"nova_server_delete_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server delete timeout\"),\n cfg.FloatOpt(\"nova_server_delete_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server delete poll interval\"),\n # \"reboot\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_reboot_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after reboot before polling for status\"),\n cfg.FloatOpt(\"nova_server_reboot_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server reboot timeout\"),\n cfg.FloatOpt(\"nova_server_reboot_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server reboot poll interval\"),\n # \"rebuild\": (1, 300, 1)\n cfg.FloatOpt(\"nova_server_rebuild_prepoll_delay\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after rebuild before polling for status\"),\n cfg.FloatOpt(\"nova_server_rebuild_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server rebuild timeout\"),\n cfg.FloatOpt(\"nova_server_rebuild_poll_interval\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Server rebuild poll interval\"),\n # \"rescue\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_rescue_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after rescue before polling for status\"),\n cfg.FloatOpt(\"nova_server_rescue_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server rescue timeout\"),\n cfg.FloatOpt(\"nova_server_rescue_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server rescue poll interval\"),\n # \"unrescue\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_unrescue_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after unrescue \"\n \"before polling for status\"),\n cfg.FloatOpt(\"nova_server_unrescue_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server unrescue timeout\"),\n cfg.FloatOpt(\"nova_server_unrescue_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server unrescue poll interval\"),\n # \"suspend\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_suspend_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after suspend before polling for status\"),\n cfg.FloatOpt(\"nova_server_suspend_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server suspend timeout\"),\n cfg.FloatOpt(\"nova_server_suspend_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server suspend poll interval\"),\n # \"resume\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_resume_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after resume before polling for status\"),\n cfg.FloatOpt(\"nova_server_resume_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server resume timeout\"),\n cfg.FloatOpt(\"nova_server_resume_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server resume poll interval\"),\n # \"pause\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_pause_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after pause before polling for status\"),\n cfg.FloatOpt(\"nova_server_pause_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server pause timeout\"),\n cfg.FloatOpt(\"nova_server_pause_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server pause poll interval\"),\n # \"unpause\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_unpause_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after unpause before polling for status\"),\n cfg.FloatOpt(\"nova_server_unpause_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server unpause timeout\"),\n cfg.FloatOpt(\"nova_server_unpause_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server unpause poll interval\"),\n # \"shelve\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_shelve_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after shelve before polling for status\"),\n cfg.FloatOpt(\"nova_server_shelve_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server shelve timeout\"),\n cfg.FloatOpt(\"nova_server_shelve_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server shelve poll interval\"),\n # \"unshelve\": (2, 300, 2)\n cfg.FloatOpt(\"nova_server_unshelve_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after unshelve before \"\n \"polling for status\"),\n cfg.FloatOpt(\"nova_server_unshelve_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server unshelve timeout\"),\n cfg.FloatOpt(\"nova_server_unshelve_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server unshelve poll interval\"),\n # \"image_create\": (0, 300, 2)\n cfg.FloatOpt(\"nova_server_image_create_prepoll_delay\",\n default=0.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after image_create before polling\"\n \" for status\"),\n cfg.FloatOpt(\"nova_server_image_create_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server image_create timeout\"),\n cfg.FloatOpt(\"nova_server_image_create_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server image_create poll interval\"),\n # \"image_delete\": (0, 300, 2)\n cfg.FloatOpt(\"nova_server_image_delete_prepoll_delay\",\n default=0.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after image_delete before polling\"\n \" for status\"),\n cfg.FloatOpt(\"nova_server_image_delete_timeout\",\n default=300.0,\n deprecated_group=\"benchmark\",\n help=\"Server image_delete timeout\"),\n cfg.FloatOpt(\"nova_server_image_delete_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server image_delete poll interval\"),\n # \"resize\": (2, 400, 5)\n cfg.FloatOpt(\"nova_server_resize_prepoll_delay\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after resize before polling for status\"),\n cfg.FloatOpt(\"nova_server_resize_timeout\",\n default=400.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize timeout\"),\n cfg.FloatOpt(\"nova_server_resize_poll_interval\",\n default=4.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize poll interval\"),\n # \"resize_confirm\": (0, 200, 2)\n cfg.FloatOpt(\"nova_server_resize_confirm_prepoll_delay\",\n default=0.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after resize_confirm before polling\"\n \" for status\"),\n cfg.FloatOpt(\"nova_server_resize_confirm_timeout\",\n default=200.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize_confirm timeout\"),\n cfg.FloatOpt(\"nova_server_resize_confirm_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize_confirm poll interval\"),\n # \"resize_revert\": (0, 200, 2)\n cfg.FloatOpt(\"nova_server_resize_revert_prepoll_delay\",\n default=0.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after resize_revert before polling\"\n \" for status\"),\n cfg.FloatOpt(\"nova_server_resize_revert_timeout\",\n default=200.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize_revert timeout\"),\n cfg.FloatOpt(\"nova_server_resize_revert_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server resize_revert poll interval\"),\n # \"live_migrate\": (1, 400, 2)\n cfg.FloatOpt(\"nova_server_live_migrate_prepoll_delay\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after live_migrate before polling\"\n \" for status\"),\n cfg.FloatOpt(\"nova_server_live_migrate_timeout\",\n default=400.0,\n deprecated_group=\"benchmark\",\n help=\"Server live_migrate timeout\"),\n cfg.FloatOpt(\"nova_server_live_migrate_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server live_migrate poll interval\"),\n # \"migrate\": (1, 400, 2)\n cfg.FloatOpt(\"nova_server_migrate_prepoll_delay\",\n default=1.0,\n deprecated_group=\"benchmark\",\n help=\"Time to sleep after migrate before polling for status\"),\n cfg.FloatOpt(\"nova_server_migrate_timeout\",\n default=400.0,\n deprecated_group=\"benchmark\",\n help=\"Server migrate timeout\"),\n cfg.FloatOpt(\"nova_server_migrate_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Server migrate poll interval\"),\n # \"detach\":\n cfg.FloatOpt(\"nova_detach_volume_timeout\",\n default=200.0,\n deprecated_group=\"benchmark\",\n help=\"Nova volume detach timeout\"),\n cfg.FloatOpt(\"nova_detach_volume_poll_interval\",\n default=2.0,\n deprecated_group=\"benchmark\",\n help=\"Nova volume detach poll interval\")\n]}\n" }, { "alpha_fraction": 0.7110389471054077, "alphanum_fraction": 0.7110389471054077, "avg_line_length": 27.090909957885742, "blob_id": "055309b837efda24fc8c4b044d2be9266c471eb7", "content_id": "bd150f846d73e0550fa5e6c18ce23542569db020", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 308, "license_type": "permissive", "max_line_length": 78, "num_lines": 11, "path": "/rally-jobs/extra/README.rst", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "Extra files\n===========\n\nAll files from this directory will be copy pasted to gates, so you are able to\nuse absolute path in rally tasks. Files will be in ~/.rally/extra/*\n\nmurano/ directory\n-----------------\n\nHere we have Murano applications that is used to prepare Murano context and\nto deploy environment." }, { "alpha_fraction": 0.5994409918785095, "alphanum_fraction": 0.6024223566055298, "avg_line_length": 46.07602310180664, "blob_id": "b18f90d33978a2144e2bd90d6a1e3b6e3d09eb66", "content_id": "df2bb3c74ee95f6258d173db2f59becf862f62b7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16100, "license_type": "permissive", "max_line_length": 78, "num_lines": 342, "path": "/tests/unit/task/scenarios/heat/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally import exceptions\n\nfrom rally_openstack.task.scenarios.heat import utils\nfrom tests.unit import test\n\nHEAT_UTILS = \"rally_openstack.task.scenarios.heat.utils\"\n\nCONF = utils.CONF\n\n\nclass HeatScenarioTestCase(test.ScenarioTestCase):\n def setUp(self):\n super(HeatScenarioTestCase, self).setUp()\n self.stack = mock.Mock()\n self.scenario = utils.HeatScenario(self.context)\n self.default_template = \"heat_template_version: 2013-05-23\"\n self.dummy_parameters = {\"dummy_param\": \"dummy_key\"}\n self.dummy_files = [\"dummy_file.yaml\"]\n self.dummy_environment = {\"dummy_env\": \"dummy_env_value\"}\n self.default_output_key = \"dummy_output_key\"\n\n def test_list_stacks(self):\n scenario = utils.HeatScenario(self.context)\n return_stacks_list = scenario._list_stacks()\n self.clients(\"heat\").stacks.list.assert_called_once_with()\n self.assertEqual(list(self.clients(\"heat\").stacks.list.return_value),\n return_stacks_list)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.list_stacks\")\n\n def test_create_stack(self):\n self.clients(\"heat\").stacks.create.return_value = {\n \"stack\": {\"id\": \"test_id\"}\n }\n self.clients(\"heat\").stacks.get.return_value = self.stack\n return_stack = self.scenario._create_stack(self.default_template,\n self.dummy_parameters,\n self.dummy_files,\n self.dummy_environment)\n args, kwargs = self.clients(\"heat\").stacks.create.call_args\n self.assertIn(self.dummy_parameters, kwargs.values())\n self.assertIn(self.default_template, kwargs.values())\n self.assertIn(self.dummy_files, kwargs.values())\n self.assertIn(self.dummy_environment, kwargs.values())\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"CREATE_COMPLETE\"],\n failure_statuses=[\"CREATE_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_create_poll_interval,\n timeout=CONF.openstack.heat_stack_create_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self.assertEqual(self.mock_wait_for_status.mock.return_value,\n return_stack)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"heat.create_stack\")\n\n def test_update_stack(self):\n self.clients(\"heat\").stacks.update.return_value = None\n scenario = utils.HeatScenario(self.context)\n scenario._update_stack(self.stack, self.default_template,\n self.dummy_parameters, self.dummy_files,\n self.dummy_environment)\n args, kwargs = self.clients(\"heat\").stacks.update.call_args\n self.assertIn(self.dummy_parameters, kwargs.values())\n self.assertIn(self.default_template, kwargs.values())\n self.assertIn(self.dummy_files, kwargs.values())\n self.assertIn(self.dummy_environment, kwargs.values())\n self.assertIn(self.stack.id, args)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"UPDATE_COMPLETE\"],\n failure_statuses=[\"UPDATE_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_update_poll_interval,\n timeout=CONF.openstack.heat_stack_update_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.update_stack\")\n\n def test_check_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._check_stack(self.stack)\n self.clients(\"heat\").actions.check.assert_called_once_with(\n self.stack.id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"CHECK_COMPLETE\"],\n failure_statuses=[\"CHECK_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_check_poll_interval,\n timeout=CONF.openstack.heat_stack_check_timeout)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.check_stack\")\n\n def test_delete_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._delete_stack(self.stack)\n self.stack.delete.assert_called_once_with()\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n ready_statuses=[\"DELETE_COMPLETE\"],\n failure_statuses=[\"DELETE_FAILED\", \"ERROR\"],\n check_deletion=True,\n update_resource=self.mock_get_from_manager.mock.return_value,\n check_interval=CONF.openstack.heat_stack_delete_poll_interval,\n timeout=CONF.openstack.heat_stack_delete_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.delete_stack\")\n\n def test_suspend_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._suspend_stack(self.stack)\n self.clients(\"heat\").actions.suspend.assert_called_once_with(\n self.stack.id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"SUSPEND_COMPLETE\"],\n failure_statuses=[\"SUSPEND_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_suspend_poll_interval,\n timeout=CONF.openstack.heat_stack_suspend_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.suspend_stack\")\n\n def test_resume_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._resume_stack(self.stack)\n self.clients(\"heat\").actions.resume.assert_called_once_with(\n self.stack.id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"RESUME_COMPLETE\"],\n failure_statuses=[\"RESUME_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_resume_poll_interval,\n timeout=CONF.openstack.heat_stack_resume_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.resume_stack\")\n\n def test_snapshot_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._snapshot_stack(self.stack)\n self.clients(\"heat\").stacks.snapshot.assert_called_once_with(\n self.stack.id)\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"SNAPSHOT_COMPLETE\"],\n failure_statuses=[\"SNAPSHOT_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_snapshot_poll_interval,\n timeout=CONF.openstack.heat_stack_snapshot_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.snapshot_stack\")\n\n def test_restore_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._restore_stack(self.stack, \"dummy_id\")\n self.clients(\"heat\").stacks.restore.assert_called_once_with(\n self.stack.id, \"dummy_id\")\n self.mock_wait_for_status.mock.assert_called_once_with(\n self.stack,\n update_resource=self.mock_get_from_manager.mock.return_value,\n ready_statuses=[\"RESTORE_COMPLETE\"],\n failure_statuses=[\"RESTORE_FAILED\", \"ERROR\"],\n check_interval=CONF.openstack.heat_stack_restore_poll_interval,\n timeout=CONF.openstack.heat_stack_restore_timeout)\n self.mock_get_from_manager.mock.assert_called_once_with()\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.restore_stack\")\n\n def test__count_instances(self):\n self.clients(\"heat\").resources.list.return_value = [\n mock.Mock(resource_type=\"OS::Nova::Server\"),\n mock.Mock(resource_type=\"OS::Nova::Server\"),\n mock.Mock(resource_type=\"OS::Heat::AutoScalingGroup\")]\n scenario = utils.HeatScenario(self.context)\n self.assertEqual(scenario._count_instances(self.stack), 2)\n self.clients(\"heat\").resources.list.assert_called_once_with(\n self.stack.id,\n nested_depth=1)\n\n def test__scale_stack(self):\n scenario = utils.HeatScenario(self.context)\n scenario._count_instances = mock.Mock(side_effect=[3, 3, 2])\n scenario._stack_webhook = mock.Mock()\n\n scenario._scale_stack(self.stack, \"test_output_key\", -1)\n\n scenario._stack_webhook.assert_called_once_with(self.stack,\n \"test_output_key\")\n self.mock_wait_for.mock.assert_called_once_with(\n self.stack,\n is_ready=mock.ANY,\n failure_statuses=[\"UPDATE_FAILED\", \"ERROR\"],\n update_resource=self.mock_get_from_manager.mock.return_value,\n timeout=CONF.openstack.heat_stack_scale_timeout,\n check_interval=CONF.openstack.heat_stack_scale_poll_interval)\n self.mock_get_from_manager.mock.assert_called_once_with()\n\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.scale_with_test_output_key\")\n\n @mock.patch(\"requests.post\")\n def test_stack_webhook(self, mock_post):\n env_context = {\n \"env\": {\n \"spec\": {\n \"existing@openstack\": {\n \"https_cacert\": \"cacert.crt\",\n \"https_insecure\": False\n }\n }\n }\n }\n env_context.update(self.context)\n scenario = utils.HeatScenario(env_context)\n stack = mock.Mock(outputs=[\n {\"output_key\": \"output1\", \"output_value\": \"url1\"},\n {\"output_key\": \"output2\", \"output_value\": \"url2\"}])\n\n scenario._stack_webhook(stack, \"output1\")\n mock_post.assert_called_with(\"url1\", verify=\"cacert.crt\")\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.output1_webhook\")\n\n @mock.patch(\"requests.post\")\n def test_stack_webhook_insecure(self, mock_post):\n env_context = {\n \"env\": {\n \"spec\": {\n \"existing@openstack\": {\n \"https_cacert\": \"cacert.crt\",\n \"https_insecure\": True\n }\n }\n }\n }\n env_context.update(self.context)\n scenario = utils.HeatScenario(env_context)\n stack = mock.Mock(outputs=[\n {\"output_key\": \"output1\", \"output_value\": \"url1\"},\n {\"output_key\": \"output2\", \"output_value\": \"url2\"}])\n\n scenario._stack_webhook(stack, \"output1\")\n mock_post.assert_called_with(\"url1\", verify=False)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.output1_webhook\")\n\n @mock.patch(\"requests.post\")\n def test_stack_webhook_invalid_output_key(self, mock_post):\n scenario = utils.HeatScenario(self.context)\n stack = mock.Mock()\n stack.outputs = [{\"output_key\": \"output1\", \"output_value\": \"url1\"},\n {\"output_key\": \"output2\", \"output_value\": \"url2\"}]\n\n self.assertRaises(exceptions.InvalidConfigException,\n scenario._stack_webhook, stack, \"bogus\")\n\n def test_stack_show_output(self):\n scenario = utils.HeatScenario(self.context)\n scenario._stack_show_output(self.stack, self.default_output_key)\n self.clients(\"heat\").stacks.output_show.assert_called_once_with(\n self.stack.id, self.default_output_key)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.show_output\")\n\n def test_stack_show_output_via_API(self):\n scenario = utils.HeatScenario(self.context)\n scenario._stack_show_output_via_API(\n self.stack, self.default_output_key)\n self.clients(\"heat\").stacks.get.assert_called_once_with(\n stack_id=self.stack.id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.show_output_via_API\")\n\n def test_stack_list_output(self):\n scenario = utils.HeatScenario(self.context)\n scenario._stack_list_output(self.stack)\n self.clients(\"heat\").stacks.output_list.assert_called_once_with(\n self.stack.id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.list_output\")\n\n def test_stack_list_output_via_API(self):\n scenario = utils.HeatScenario(self.context)\n scenario._stack_list_output_via_API(self.stack)\n self.clients(\"heat\").stacks.get.assert_called_once_with(\n stack_id=self.stack.id)\n self._test_atomic_action_timer(scenario.atomic_actions(),\n \"heat.list_output_via_API\")\n\n\nclass HeatScenarioNegativeTestCase(test.ScenarioTestCase):\n patch_task_utils = False\n\n def test_failed_create_stack(self):\n self.clients(\"heat\").stacks.create.return_value = {\n \"stack\": {\"id\": \"test_id\"}\n }\n stack = mock.Mock()\n resource = mock.Mock()\n resource.stack_status = \"CREATE_FAILED\"\n stack.manager.get.return_value = resource\n self.clients(\"heat\").stacks.get.return_value = stack\n scenario = utils.HeatScenario(context=self.context)\n ex = self.assertRaises(exceptions.GetResourceErrorStatus,\n scenario._create_stack, \"stack_name\")\n self.assertIn(\"has CREATE_FAILED status\", str(ex))\n\n def test_failed_update_stack(self):\n stack = mock.Mock()\n resource = mock.Mock()\n resource.stack_status = \"UPDATE_FAILED\"\n stack.manager.get.return_value = resource\n self.clients(\"heat\").stacks.get.return_value = stack\n scenario = utils.HeatScenario(context=self.context)\n ex = self.assertRaises(exceptions.GetResourceErrorStatus,\n scenario._update_stack, stack,\n \"heat_template_version: 2013-05-23\")\n self.assertIn(\"has UPDATE_FAILED status\", str(ex))\n" }, { "alpha_fraction": 0.6175073981285095, "alphanum_fraction": 0.6186943650245667, "avg_line_length": 35.2365608215332, "blob_id": "caebf41d78ca342608e21fbecac93c2e74ef36de", "content_id": "8449c8e7aaa1a987fcfbb5fdb63d6b0fa92faac5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3370, "license_type": "permissive", "max_line_length": 78, "num_lines": 93, "path": "/rally_openstack/common/services/image/glance_common.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally import exceptions\nfrom rally.task import atomic\n\nfrom rally_openstack.common.services.image import image as image_service\n\n\nclass GlanceMixin(object):\n\n def _get_client(self):\n return self._clients.glance(self.version)\n\n def get_image(self, image):\n \"\"\"Get specified image.\n\n :param image: ID or object with ID of image to obtain.\n \"\"\"\n from glanceclient import exc as glance_exc\n\n image_id = getattr(image, \"id\", image)\n try:\n aname = \"glance_v%s.get_image\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().images.get(image_id)\n except glance_exc.HTTPNotFound:\n raise exceptions.GetResourceNotFound(resource=image)\n\n def delete_image(self, image_id):\n \"\"\"Delete image.\"\"\"\n aname = \"glance_v%s.delete_image\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().images.delete(image_id)\n\n def download_image(self, image_id, do_checksum=True):\n \"\"\"Retrieve data of an image.\n\n :param image_id: ID of the image to download.\n :param do_checksum: Enable/disable checksum validation.\n :returns: An iterable body or None\n \"\"\"\n aname = \"glance_v%s.download_image\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().images.data(image_id,\n do_checksum=do_checksum)\n\n\nclass UnifiedGlanceMixin(object):\n\n @staticmethod\n def _unify_image(image):\n if hasattr(image, \"visibility\"):\n return image_service.UnifiedImage(id=image.id, name=image.name,\n status=image.status,\n visibility=image.visibility)\n else:\n return image_service.UnifiedImage(\n id=image.id, name=image.name,\n status=image.status,\n visibility=(\"public\" if image.is_public else \"private\"))\n\n def get_image(self, image):\n \"\"\"Get specified image.\n\n :param image: ID or object with ID of image to obtain.\n \"\"\"\n image_obj = self._impl.get_image(image=image)\n return self._unify_image(image_obj)\n\n def delete_image(self, image_id):\n \"\"\"Delete image.\"\"\"\n self._impl.delete_image(image_id=image_id)\n\n def download_image(self, image_id, do_checksum=True):\n \"\"\"Download data for an image.\n\n :param image_id: image id to look up\n :param do_checksum: Enable/disable checksum validation\n :rtype: iterable containing image data or None\n \"\"\"\n return self._impl.download_image(image_id, do_checksum=do_checksum)\n" }, { "alpha_fraction": 0.5332034826278687, "alphanum_fraction": 0.5365141034126282, "avg_line_length": 35.810035705566406, "blob_id": "00130596e0485956298776bfc17005eac549a3be", "content_id": "85b3d81e8050f4dc28fe5e495270c56e4d92d48f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10270, "license_type": "permissive", "max_line_length": 77, "num_lines": 279, "path": "/tests/unit/task/contexts/glance/test_images.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport copy\nfrom unittest import mock\n\nimport ddt\n\nfrom rally_openstack.task.contexts.glance import images\nfrom tests.unit import test\n\nCTX = \"rally_openstack.task.contexts.glance.images\"\nSCN = \"rally_openstack.task.scenarios.glance\"\n\n\[email protected]\nclass ImageGeneratorTestCase(test.ScenarioTestCase):\n\n tenants_num = 1\n users_per_tenant = 5\n users_num = tenants_num * users_per_tenant\n threads = 10\n\n def setUp(self):\n super(ImageGeneratorTestCase, self).setUp()\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n \"resource_management_workers\": self.threads,\n }\n },\n \"admin\": {\"credential\": mock.MagicMock()},\n \"users\": [],\n \"task\": {\"uuid\": \"task_id\"}\n })\n patch = mock.patch(\n \"rally_openstack.common.services.image.image.Image\")\n self.addCleanup(patch.stop)\n self.mock_image = patch.start()\n\n def _gen_tenants(self, count):\n tenants = {}\n for id_ in range(count):\n tenants[str(id_)] = {\"name\": str(id_)}\n return tenants\n\n @ddt.data(\n {},\n {\"min_disk\": 1, \"min_ram\": 2},\n {\"image_name\": \"foo\"},\n {\"tenants\": 3, \"users_per_tenant\": 2, \"images_per_tenant\": 5})\n @ddt.unpack\n @mock.patch(\"rally_openstack.common.osclients.Clients\")\n def test_setup(self, mock_clients,\n container_format=\"bare\", disk_format=\"qcow2\",\n image_url=\"http://example.com/fake/url\",\n tenants=1, users_per_tenant=1, images_per_tenant=1,\n image_name=None, min_ram=None, min_disk=None,\n visibility=\"public\"):\n image_service = self.mock_image.return_value\n\n tenant_data = self._gen_tenants(tenants)\n users = []\n for tenant_id in tenant_data:\n for i in range(users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": tenant_id,\n \"credential\": mock.MagicMock()})\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": tenants,\n \"users_per_tenant\": users_per_tenant,\n \"concurrent\": 10,\n },\n \"images\": {\n \"image_url\": image_url,\n \"disk_format\": disk_format,\n \"container_format\": container_format,\n \"images_per_tenant\": images_per_tenant,\n \"visibility\": visibility,\n }\n },\n \"admin\": {\n \"credential\": mock.MagicMock()\n },\n \"users\": users,\n \"tenants\": tenant_data\n })\n\n expected_image_args = {}\n if image_name is not None:\n self.context[\"config\"][\"images\"][\"image_name\"] = image_name\n if min_ram is not None:\n self.context[\"config\"][\"images\"][\"min_ram\"] = min_ram\n expected_image_args[\"min_ram\"] = min_ram\n if min_disk is not None:\n self.context[\"config\"][\"images\"][\"min_disk\"] = min_disk\n expected_image_args[\"min_disk\"] = min_disk\n\n new_context = copy.deepcopy(self.context)\n for tenant_id in new_context[\"tenants\"].keys():\n new_context[\"tenants\"][tenant_id][\"images\"] = [\n image_service.create_image.return_value.id\n ] * images_per_tenant\n\n images_ctx = images.ImageGenerator(self.context)\n images_ctx.setup()\n self.assertEqual(new_context, self.context)\n\n wrapper_calls = []\n wrapper_calls.extend([mock.call(mock_clients.return_value.glance,\n images_ctx)] * tenants)\n wrapper_calls.extend(\n [mock.call().create_image(\n container_format, image_url, disk_format,\n name=mock.ANY, **expected_image_args)]\n * tenants * images_per_tenant)\n\n mock_clients.assert_has_calls([mock.call(mock.ANY)] * tenants)\n\n @mock.patch(\"%s.image.Image\" % CTX)\n @mock.patch(\"%s.LOG\" % CTX)\n def test_setup_with_deprecated_args(self, mock_log, mock_image):\n image_type = \"itype\"\n image_container = \"icontainer\"\n is_public = True\n d_min_ram = mock.Mock()\n d_min_disk = mock.Mock()\n self.context.update({\n \"config\": {\n \"images\": {\"image_type\": image_type,\n \"image_container\": image_container,\n \"image_args\": {\"is_public\": is_public,\n \"min_ram\": d_min_ram,\n \"min_disk\": d_min_disk}}\n },\n \"users\": [{\"tenant_id\": \"foo-tenant\",\n \"credential\": mock.MagicMock()}],\n \"tenants\": {\"foo-tenant\": {}}\n })\n images_ctx = images.ImageGenerator(self.context)\n images_ctx.setup()\n\n mock_image.return_value.create_image.assert_called_once_with(\n image_name=None,\n container_format=image_container,\n image_location=None,\n disk_format=image_type,\n visibility=\"public\",\n min_disk=d_min_disk,\n min_ram=d_min_ram\n )\n expected_warns = [\n mock.call(\"The 'image_type' argument is deprecated since \"\n \"Rally 0.10.0, use disk_format argument instead\"),\n mock.call(\"The 'image_container' argument is deprecated since \"\n \"Rally 0.10.0; use container_format argument instead\"),\n mock.call(\"The 'image_args' argument is deprecated since \"\n \"Rally 0.10.0; specify arguments in a root \"\n \"section of context instead\")]\n\n self.assertEqual(expected_warns, mock_log.warning.call_args_list)\n\n mock_image.return_value.create_image.reset_mock()\n mock_log.warning.reset_mock()\n\n min_ram = mock.Mock()\n min_disk = mock.Mock()\n visibility = \"foo\"\n disk_format = \"dformat\"\n container_format = \"cformat\"\n\n self.context[\"config\"][\"images\"].update({\n \"min_ram\": min_ram,\n \"min_disk\": min_disk,\n \"visibility\": visibility,\n \"disk_format\": disk_format,\n \"container_format\": container_format\n })\n\n images_ctx = images.ImageGenerator(self.context)\n images_ctx.setup()\n\n # check that deprecated arguments are not used\n mock_image.return_value.create_image.assert_called_once_with(\n image_name=None,\n container_format=container_format,\n image_location=None,\n disk_format=disk_format,\n visibility=visibility,\n min_disk=min_disk,\n min_ram=min_ram\n )\n # No matter will be deprecated arguments used or not, if they are\n # specified, warning message should be printed.\n self.assertEqual(expected_warns, mock_log.warning.call_args_list)\n\n @ddt.data({\"admin\": True})\n @ddt.unpack\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup(self, mock_cleanup, admin=None):\n images_per_tenant = 5\n\n tenants = self._gen_tenants(self.tenants_num)\n users = []\n created_images = []\n for tenant_id in tenants:\n for i in range(self.users_per_tenant):\n users.append({\"id\": i, \"tenant_id\": tenant_id,\n \"credential\": mock.MagicMock()})\n tenants[tenant_id].setdefault(\"images\", [])\n for j in range(images_per_tenant):\n image = mock.Mock()\n created_images.append(image)\n tenants[tenant_id][\"images\"].append(image)\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant,\n \"concurrent\": 10,\n },\n \"images\": {}\n },\n \"users\": mock.Mock()\n })\n\n if admin:\n self.context[\"admin\"] = {\"credential\": mock.MagicMock()}\n else:\n # ensure that there is no admin\n self.context.pop(\"admin\")\n\n images_ctx = images.ImageGenerator(self.context)\n images_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"glance.images\", \"cinder.image_volumes_cache\"],\n admin=self.context.get(\"admin\"),\n admin_required=None if admin else False,\n users=self.context[\"users\"],\n superclass=images_ctx.__class__,\n task_id=self.context[\"owner_id\"])\n\n @mock.patch(\"%s.rutils.make_name_matcher\" % CTX)\n @mock.patch(\"%s.resource_manager.cleanup\" % CTX)\n def test_cleanup_for_predefined_name(self, mock_cleanup,\n mock_make_name_matcher):\n self.context.update({\n \"config\": {\n \"images\": {\"image_name\": \"foo\"}\n },\n \"users\": mock.Mock()\n })\n\n images_ctx = images.ImageGenerator(self.context)\n images_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"glance.images\", \"cinder.image_volumes_cache\"],\n admin=self.context.get(\"admin\"),\n admin_required=None,\n users=self.context[\"users\"],\n superclass=mock_make_name_matcher.return_value,\n task_id=self.context[\"owner_id\"])\n" }, { "alpha_fraction": 0.5509025454521179, "alphanum_fraction": 0.5541516542434692, "avg_line_length": 36.43243408203125, "blob_id": "9f0fd72ba0b57c3932d75705bcd241d04a29df3c", "content_id": "833ce31db8c494c130ed95616f1247a190e52447", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5540, "license_type": "permissive", "max_line_length": 79, "num_lines": 148, "path": "/tests/unit/task/contexts/sahara/test_sahara_cluster.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom rally.common import cfg\nfrom rally import exceptions\nfrom rally_openstack.task.contexts.sahara import sahara_cluster\nfrom rally_openstack.task.scenarios.sahara import utils as sahara_utils\nfrom tests.unit import test\n\nCONF = cfg.CONF\n\nCTX = \"rally_openstack.task.contexts.sahara\"\n\n\nclass SaharaClusterTestCase(test.ScenarioTestCase):\n\n patch_task_utils = False\n\n def setUp(self):\n super(SaharaClusterTestCase, self).setUp()\n self.tenants_num = 2\n self.users_per_tenant = 2\n self.users = self.tenants_num * self.users_per_tenant\n\n self.tenants = {}\n self.users_key = []\n\n for i in range(self.tenants_num):\n self.tenants[str(i)] = {\"id\": str(i), \"name\": str(i),\n \"sahara\": {\"image\": \"42\"}}\n for j in range(self.users_per_tenant):\n self.users_key.append({\"id\": \"%s_%s\" % (str(i), str(j)),\n \"tenant_id\": str(i),\n \"credential\": mock.MagicMock()})\n\n CONF.set_override(\"sahara_cluster_check_interval\", 0, \"openstack\")\n\n self.context.update({\n \"config\": {\n \"users\": {\n \"tenants\": self.tenants_num,\n \"users_per_tenant\": self.users_per_tenant\n },\n \"sahara_cluster\": {\n \"master_flavor_id\": \"test_flavor_m\",\n \"worker_flavor_id\": \"test_flavor_w\",\n \"workers_count\": 2,\n \"plugin_name\": \"test_plugin\",\n \"hadoop_version\": \"test_version\"\n }\n },\n \"admin\": {\"credential\": mock.MagicMock()},\n \"users\": self.users_key,\n \"tenants\": self.tenants\n })\n\n @mock.patch(\"%s.sahara_cluster.resource_manager.cleanup\" % CTX)\n @mock.patch(\"%s.sahara_cluster.utils.SaharaScenario._launch_cluster\" % CTX,\n return_value=mock.MagicMock(id=42))\n def test_setup_and_cleanup(self, mock_sahara_scenario__launch_cluster,\n mock_cleanup):\n sahara_ctx = sahara_cluster.SaharaCluster(self.context)\n\n launch_cluster_calls = []\n\n for i in self.tenants:\n launch_cluster_calls.append(mock.call(\n flavor_id=None,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n workers_count=2,\n image_id=self.context[\"tenants\"][i][\"sahara\"][\"image\"],\n floating_ip_pool=None,\n volumes_per_node=None,\n volumes_size=1,\n auto_security_group=True,\n security_groups=None,\n node_configs=None,\n cluster_configs=None,\n enable_anti_affinity=False,\n enable_proxy=False,\n wait_active=False,\n use_autoconfig=True\n ))\n\n self.clients(\"sahara\").clusters.get.side_effect = [\n mock.MagicMock(status=\"not-active\"),\n mock.MagicMock(status=\"active\")]\n sahara_ctx.setup()\n\n mock_sahara_scenario__launch_cluster.assert_has_calls(\n launch_cluster_calls)\n sahara_ctx.cleanup()\n mock_cleanup.assert_called_once_with(\n names=[\"sahara.clusters\"],\n users=self.context[\"users\"],\n superclass=sahara_utils.SaharaScenario,\n task_id=self.context[\"owner_id\"])\n\n @mock.patch(\"%s.sahara_cluster.utils.SaharaScenario._launch_cluster\" % CTX,\n return_value=mock.MagicMock(id=42))\n def test_setup_and_cleanup_error(self,\n mock_sahara_scenario__launch_cluster):\n sahara_ctx = sahara_cluster.SaharaCluster(self.context)\n\n launch_cluster_calls = []\n\n for i in self.tenants:\n launch_cluster_calls.append(mock.call(\n flavor_id=None,\n plugin_name=\"test_plugin\",\n hadoop_version=\"test_version\",\n master_flavor_id=\"test_flavor_m\",\n worker_flavor_id=\"test_flavor_w\",\n workers_count=2,\n image_id=self.context[\"tenants\"][i][\"sahara\"][\"image\"],\n floating_ip_pool=None,\n volumes_per_node=None,\n volumes_size=1,\n auto_security_groups=True,\n security_groups=None,\n node_configs=None,\n cluster_configs=None,\n wait_active=False,\n use_autoconfig=True\n ))\n\n self.clients(\"sahara\").clusters.get.side_effect = [\n mock.MagicMock(status=\"not-active\"),\n mock.MagicMock(status=\"error\")\n ]\n\n self.assertRaises(exceptions.ContextSetupFailure, sahara_ctx.setup)\n" }, { "alpha_fraction": 0.626541256904602, "alphanum_fraction": 0.6272718906402588, "avg_line_length": 42.62151336669922, "blob_id": "b8450780156c66b263884badeb999c36965dd2a9", "content_id": "c80a4788ea57e21e0fbcf51165847e93c635fe58", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10949, "license_type": "permissive", "max_line_length": 78, "num_lines": 251, "path": "/rally_openstack/task/scenarios/octavia/loadbalancers.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# Copyright 2018: Red Hat Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rally.task import validation\n\nfrom rally_openstack.common import consts\nfrom rally_openstack.task import scenario\nfrom rally_openstack.task.scenarios.octavia import utils as octavia_utils\n\n\"\"\"Scenarios for Octavia Loadbalancer.\"\"\"\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_list_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndListLoadbalancers(octavia_utils.OctaviaBase):\n\n def run(self, description=None, admin_state=True,\n listeners=None, flavor_id=None, provider=None,\n vip_qos_policy_id=None):\n \"\"\"Create a loadbalancer per each subnet and then list loadbalancers.\n\n :param description: Human-readable description of the loadbalancer\n :param admin_state: The administrative state of the loadbalancer,\n which is up(true) or down(false)\n :param listeners: The associated listener id, if any\n :param flavor_id: The ID of the flavor\n :param provider: Provider name for the loadbalancer\n :param vip_qos_policy_id: The ID of the QoS policy\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n subnet_id=subnet_id,\n description=description,\n admin_state=admin_state,\n project_id=project_id,\n listeners=listeners,\n flavor_id=flavor_id,\n provider=provider,\n vip_qos_policy_id=vip_qos_policy_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.load_balancer_list()\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_delete_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndDeleteLoadbalancers(octavia_utils.OctaviaBase):\n\n def run(self, description=None, admin_state=True,\n listeners=None, flavor_id=None, provider=None,\n vip_qos_policy_id=None):\n \"\"\"Create a loadbalancer per each subnet and then delete loadbalancer\n\n :param description: Human-readable description of the loadbalancer\n :param admin_state: The administrative state of the loadbalancer,\n which is up(true) or down(false)\n :param listeners: The associated listener id, if any\n :param flavor_id: The ID of the flavor\n :param provider: Provider name for the loadbalancer\n :param vip_qos_policy_id: The ID of the QoS policy\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n subnet_id=subnet_id,\n description=description,\n admin_state=admin_state,\n project_id=project_id,\n listeners=listeners,\n flavor_id=flavor_id,\n provider=provider,\n vip_qos_policy_id=vip_qos_policy_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.load_balancer_delete(\n loadbalancer[\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_update_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndUpdateLoadBalancers(octavia_utils.OctaviaBase):\n\n def run(self, description=None, admin_state=True,\n listeners=None, flavor_id=None, provider=None,\n vip_qos_policy_id=None):\n \"\"\"Create a loadbalancer per each subnet and then update\n\n :param description: Human-readable description of the loadbalancer\n :param admin_state: The administrative state of the loadbalancer,\n which is up(true) or down(false)\n :param listeners: The associated listener id, if any\n :param flavor_id: The ID of the flavor\n :param provider: Provider name for the loadbalancer\n :param vip_qos_policy_id: The ID of the QoS policy\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n subnet_id=subnet_id,\n description=description,\n admin_state=admin_state,\n project_id=project_id,\n listeners=listeners,\n flavor_id=flavor_id,\n provider=provider,\n vip_qos_policy_id=vip_qos_policy_id)\n loadbalancers.append(lb)\n\n update_loadbalancer = {\n \"name\": self.generate_random_name()\n }\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.load_balancer_set(\n lb_id=loadbalancer[\"id\"],\n lb_update_args=update_loadbalancer)\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_stats_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndShowStatsLoadBalancers(octavia_utils.OctaviaBase):\n\n def run(self, description=None, admin_state=True,\n listeners=None, flavor_id=None, provider=None,\n vip_qos_policy_id=None):\n \"\"\"Create a loadbalancer per each subnet and stats\n\n :param description: Human-readable description of the loadbalancer\n :param admin_state: The administrative state of the loadbalancer,\n which is up(true) or down(false)\n :param listeners: The associated listener id, if any\n :param flavor_id: The ID of the flavor\n :param provider: Provider name for the loadbalancer\n :param vip_qos_policy_id: The ID of the QoS policy\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n subnet_id=subnet_id,\n description=description,\n admin_state=admin_state,\n project_id=project_id,\n listeners=listeners,\n flavor_id=flavor_id,\n provider=provider,\n vip_qos_policy_id=vip_qos_policy_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.load_balancer_stats_show(\n loadbalancer[\"id\"])\n\n\[email protected](\"required_services\", services=[consts.Service.OCTAVIA])\[email protected](\"required_platform\", platform=\"openstack\", users=True)\[email protected](\"required_contexts\", contexts=[\"network\"])\[email protected](context={\"cleanup@openstack\": [\"octavia\"]},\n name=\"Octavia.create_and_show_loadbalancers\",\n platform=\"openstack\")\nclass CreateAndShowLoadBalancers(octavia_utils.OctaviaBase):\n\n def run(self, description=None, admin_state=True,\n listeners=None, flavor_id=None, provider=None,\n vip_qos_policy_id=None):\n \"\"\"Create a loadbalancer per each subnet and then compare\n\n :param description: Human-readable description of the loadbalancer\n :param admin_state: The administrative state of the loadbalancer,\n which is up(true) or down(false)\n :param listeners: The associated listener id, if any\n :param flavor_id: The ID of the flavor\n :param provider: Provider name for the loadbalancer\n :param vip_qos_policy_id: The ID of the QoS policy\n \"\"\"\n subnets = []\n loadbalancers = []\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n project_id = self.context[\"tenant\"][\"id\"]\n for network in networks:\n subnets.extend(network.get(\"subnets\", []))\n for subnet_id in subnets:\n lb = self.octavia.load_balancer_create(\n subnet_id=subnet_id,\n description=description,\n admin_state=admin_state,\n project_id=project_id,\n listeners=listeners,\n flavor_id=flavor_id,\n provider=provider,\n vip_qos_policy_id=vip_qos_policy_id)\n loadbalancers.append(lb)\n\n for loadbalancer in loadbalancers:\n self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)\n self.octavia.load_balancer_show(\n loadbalancer[\"id\"])\n" }, { "alpha_fraction": 0.6372881531715393, "alphanum_fraction": 0.6401129961013794, "avg_line_length": 33.70588302612305, "blob_id": "cf135aa8ae8a219d9d2d3ea06dbbbdc43e7be536", "content_id": "e66601a1dbb8e5342022121e4edde53c36deeaa5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "permissive", "max_line_length": 78, "num_lines": 51, "path": "/tests/unit/task/scenarios/monasca/test_utils.py", "repo_name": "openstack/rally-openstack", "src_encoding": "UTF-8", "text": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\n\nfrom rally_openstack.task.scenarios.monasca import utils\nfrom tests.unit import test\n\n\[email protected]\nclass MonascaScenarioTestCase(test.ScenarioTestCase):\n\n def setUp(self):\n super(MonascaScenarioTestCase, self).setUp()\n self.scenario = utils.MonascaScenario(self.context)\n self.kwargs = {\n \"dimensions\": {\n \"region\": \"fake_region\",\n \"hostname\": \"fake_host_name\",\n \"service\": \"fake_service\",\n \"url\": \"fake_url\"\n }\n }\n\n def test_list_metrics(self):\n return_metric_value = self.scenario._list_metrics()\n self.assertEqual(return_metric_value,\n self.clients(\"monasca\").metrics.list.return_value)\n self._test_atomic_action_timer(self.scenario.atomic_actions(),\n \"monasca.list_metrics\")\n\n @ddt.data(\n {\"name\": \"\"},\n {\"name\": \"fake_metric\"},\n )\n @ddt.unpack\n def test_create_metrics(self, name=None):\n self.name = name\n self.scenario._create_metrics(name=self.name, kwargs=self.kwargs)\n self.assertEqual(1, self.clients(\"monasca\").metrics.create.call_count)\n" } ]
335
PANIC-Project/pastegrep
https://github.com/PANIC-Project/pastegrep
6c3ad3312986281ba06048ceec321475146b266d
d42d5ab90d016dd23b37886460f8eebf58cc0d0a
92a4ecad53633bf16aee189bd192f0fd5fc56e70
refs/heads/master
2021-01-16T20:35:20.435238
2012-03-23T01:51:55
2012-03-23T01:51:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6267874240875244, "alphanum_fraction": 0.6334604620933533, "avg_line_length": 35.8070182800293, "blob_id": "97a3e022d5c6bc4f3671045f405c91f0d09cbcba", "content_id": "fcd75011684da423ea104a6f59128ff0162c0528", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2098, "license_type": "no_license", "max_line_length": 109, "num_lines": 57, "path": "/pastegrep.py", "repo_name": "PANIC-Project/pastegrep", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom time import sleep\nfrom urllib2 import urlopen\nfrom re import search, I, M\nfrom os import mkdir\nfrom os.path import isdir, exists\noutdir = 'pastes'\n\ndef save_paste(identifier, data):\n fname = \"{0}/paste_{1}.txt\".format(outdir, identifier)\n with open(fname, 'w') as out:\n out.write(data)\n print \"Saved {0}\".format(fname)\n\ndef pastegrep(expression, cb=save_paste, interval=20, endpoint='http://pastebin.com/ajax/realtime_data.php'):\n \"\"\"Loops endlessly and grabs the index of recent pastes from pastebin.\n Whenever a new paste is discovered, it is fetched and evaluated against\n `expression`. If `expression` is found inside the paste, the callback `cb`\n is called with the paste identifier and body data. The `interval` is a\n time in seconds, and the endpoint is a pastebin.com URL with a listing\n recent pastes to scrape.\"\"\"\n while True:\n fetch(expression, endpoint)\n sleep(interval)\n\nseen = set()\ndef fetch(expression, url):\n if not isdir(outdir) and not exists(outdir):\n mkdir(outdir)\n\n feed = urlopen(url)\n for line in feed:\n if \"created a new\" in line:\n match = search(r'\\[<a href=\"/(.*?)\">.*?</a>\\]', line)\n if match:\n identifier = match.groups()[0]\n if identifier not in seen:\n handle_paste(identifier, expression)\n seen.add(identifier)\n\ndef handle_paste(identifier, expression, save_func=save_paste):\n url = \"http://pastebin.com/raw.php?i={0}\".format(identifier)\n try:\n data = urlopen(url).read()\n if search(expression, data, I|M):\n save_func(identifier, data)\n except Exception as e:\n print \"Caught an exception: {0}\".format(e)\n print \"URL was {0}\".format(url)\n\nif __name__ == \"__main__\":\n from sys import argv\n if len(argv) <= 1: expression = 'password'\n else: expression = '|'.join(('({0})'.format(p) for p in argv[1:]))\n\n print \"Fetching all pastes that match the regular expression '{0}'\".format(expression)\n pastegrep(expression)\n" }, { "alpha_fraction": 0.7693727016448975, "alphanum_fraction": 0.7693727016448975, "avg_line_length": 40.69230651855469, "blob_id": "f679eceffd63cd5608fddfa8d5f9661ac78c1474", "content_id": "8c3c4ed57d8dc0394b77d9c81f3defb1ded3f7ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 542, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/README.md", "repo_name": "PANIC-Project/pastegrep", "src_encoding": "UTF-8", "text": "## Pastegrep ##\n\nThis project was created of an evening with the intent to make it easy to watch\npastebin for a given keyword and cache the result. It's written in python and\ndesigned to be easily used as a module. With very little work you should be\nable to import a function from pastegrep, pass it an expression you want to\nwatch for, and optionally give it a function to call whenever it finds a paste\nthat matches your regular expression. \n\nExamples:\n\n ./pastegrep.py password\n ./pastegrep.py @gmail.com @yahoo.com anon password\n" } ]
2
gilmoore/VAE_Tacotron2
https://github.com/gilmoore/VAE_Tacotron2
6b35f2a2c802a3e74a86b9c81820b64e3675d721
e12b83558ecd97b5ea98a3bcc7ed1c5140af451e
9d492631aa251e1bc8a6a00a8c0b1584975b6595
refs/heads/master
2023-07-24T06:04:39.665138
2020-01-20T12:30:49
2020-01-20T12:30:49
232,853,341
4
2
MIT
2020-01-09T16:27:28
2021-11-26T01:19:46
2023-07-06T21:20:34
Python
[ { "alpha_fraction": 0.696825385093689, "alphanum_fraction": 0.7023809552192688, "avg_line_length": 24.200000762939453, "blob_id": "c616c5b1ab49d8cc32b98b8184460d775a32fbb5", "content_id": "b9a9c09e793880a96c03595813dbaed581d66942", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1260, "license_type": "permissive", "max_line_length": 90, "num_lines": 50, "path": "/tacotron/utils/plot.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np \n\n\ndef split_title_line(title_text, max_words=5):\n\t\"\"\"\n\tA function that splits any string based on specific character\n\t(returning it with the string), with maximum number of words on it\n\t\"\"\"\n\tseq = title_text.split()\n\treturn '\\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])\n\ndef plot_alignment(alignment, path, info=None, split_title=False):\n\tfig, ax = plt.subplots()\n\tim = ax.imshow(\n\t\talignment,\n\t\taspect='auto',\n\t\torigin='lower',\n\t\tinterpolation='none')\n\tfig.colorbar(im, ax=ax)\n\txlabel = 'Decoder timestep'\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\tplt.xlabel(xlabel)\n\tplt.title(title)\n\tplt.ylabel('Encoder timestep')\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n\n\ndef plot_spectrogram(spectrogram, path, info=None, split_title=False):\n\tplt.figure()\n\tplt.imshow(np.rot90(spectrogram))\n\tplt.colorbar(shrink=0.65, orientation='horizontal')\n\tplt.ylabel('mels')\n\txlabel = 'frames'\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\tplt.xlabel(xlabel)\n\tplt.title(title)\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n" }, { "alpha_fraction": 0.7158135771751404, "alphanum_fraction": 0.7211611866950989, "avg_line_length": 38.69696807861328, "blob_id": "06645dc980635b4b89e3a46f57ccd8a7a0d67eb3", "content_id": "78b31a71d340920094462ed60ef1150b87427eb0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1309, "license_type": "permissive", "max_line_length": 107, "num_lines": 33, "path": "/train.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import argparse\nfrom tacotron.train import tacotron_train\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--base_dir', default='.')\n\tparser.add_argument('--hparams', default='',\n\t\thelp='Hyperparameter overrides as a comma-separated list of name=value pairs')\n\tparser.add_argument('--input', default='training_data/train.txt')\n\tparser.add_argument('--name', help='Name of logging directory.')\n\tparser.add_argument('--model', default='Tacotron')\n\tparser.add_argument('--restore', type=bool, default=True, help='Set this to False to do a fresh training')\n\tparser.add_argument('--summary_interval', type=int, default=100,\n\t\thelp='Steps between running summary ops')\n\tparser.add_argument('--checkpoint_interval', type=int, default=500,\n\t\thelp='Steps between writing checkpoints')\n\tparser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')\n\targs = parser.parse_args()\n\n\taccepted_models = ['Tacotron', 'Wavenet']\n\n\tif args.model not in accepted_models:\n\t\traise ValueError('please enter a valid model to train: {}'.format(accepted_models))\n\n\tif args.model == 'Tacotron':\n\t\ttacotron_train(args)\n\telif args.model == 'Wavenet':\n\t\traise NotImplementedError('Wavenet is still a work in progress, thank you for your patience!')\n\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.7214375734329224, "alphanum_fraction": 0.7459016442298889, "avg_line_length": 43.80226135253906, "blob_id": "befe7d6b398187910dcc745c04d64b37fd6643f0", "content_id": "4c6ea40f8b07a241d2e95aea05bdec30029d551d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7930, "license_type": "permissive", "max_line_length": 160, "num_lines": 177, "path": "/hparams.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nimport numpy as np \n\n\n# Default hyperparameters\nhparams = tf.contrib.training.HParams(\n\t# Comma-separated list of cleaners to run on text prior to training and eval. For non-English\n\t# text, you may want to use \"basic_cleaners\" or \"transliteration_cleaners\".\n\tcleaners='english_cleaners',\n\n\n\t#Audio\n\tnum_mels = 80, \n\tnum_freq = 513, #only used when adding linear spectrograms post processing network\n\trescale = True, \n\trescaling_max = 0.999,\n\ttrim_silence = True,\n\n\t#Mel spectrogram\n\tfft_size = 1024,\n\thop_size = 256,\n\tsample_rate = 22050, #22050 Hz (corresponding to ljspeech dataset)\n\tframe_shift_ms = None,\n\n\t#Mel and Linear spectrograms normalization/scaling and clipping\n\tmel_normalization = False,\n\tsignal_normalization = True,\n\tallow_clipping_in_normalization = True, #Only relevant if mel_normalization = True\n\tsymmetric_mels = True, #Whether to scale the data to be symmetric around 0\n\tmax_abs_value = 4., #max absolute value of data. If symmetric, data will be [-max, max] else [0, max] \n\n\t#Limits\n\tmin_level_db =- 100,\n\tref_level_db = 20,\n\tfmin = 125,\n\tfmax = 7600,\n\n\t#Griffin Lim\n\tpower = 1.55,\n\tgriffin_lim_iters = 60,\n\n\t# VAE:\n\tuse_vae=True,\n\tvae_dim=32,\n\tvae_warming_up=15000,\n\tinit_vae_weights=0.001,\n\tvae_weight_multiler=0.002,\n\tfilters=[32, 32, 64, 64, 128, 128],\n\n\t#Tacotron\n\toutputs_per_step = 1, #number of frames to generate at each decoding step (speeds up computation and allows for higher batch size)\n\tstop_at_any = True, #Determines whether the decoder should stop when predicting <stop> to any frame or to all of them\n\n\tembedding_dim = 512, #dimension of embedding space\n\n\tenc_conv_num_layers = 3, #number of encoder convolutional layers\n\tenc_conv_kernel_size = (5, ), #size of encoder convolution filters for each layer\n\tenc_conv_channels = 512, #number of encoder convolutions filters for each layer\n\tencoder_lstm_units = 256, #number of lstm units for each direction (forward and backward)\n\tencoder_depth=512,\n\tsmoothing = False, #Whether to smooth the attention normalization function \n\tattention_dim = 128, #dimension of attention space\n\tattention_filters = 32, #number of attention convolution filters\n\tattention_kernel = (31, ), #kernel size of attention convolution\n\tcumulative_weights = True, #Whether to cumulate (sum) all previous attention weights or simply feed previous weights (Recommended: True)\n\n\tprenet_layers = [256, 256], #number of layers and number of units of prenet\n\tdecoder_layers = 2, #number of decoder lstm layers\n\tdecoder_lstm_units = 1024, #number of decoder lstm units on each layer\n\tmax_iters = 2500, #Max decoder steps during inference (Just for safety from infinite loop cases)\n\n\tpostnet_num_layers = 5, #number of postnet convolutional layers\n\tpostnet_kernel_size = (5, ), #size of postnet convolution filters for each layer\n\tpostnet_channels = 512, #number of postnet convolution filters for each layer\n\n\tmask_encoder = False, #whether to mask encoder padding while computing attention\n\timpute_finished = False, #Whether to use loss mask for padded sequences\n\tmask_finished = False, #Whether to mask alignments beyond the <stop_token> (False for debug, True for style)\n\n\tpredict_linear = False, #Whether to add a post-processing network to the Tacotron to predict linear spectrograms (True mode Not tested!!)\n\n\n\t#Wavenet\n\t# Input type:\n\t# 1. raw [-1, 1]\n\t# 2. mulaw [-1, 1]\n\t# 3. mulaw-quantize [0, mu]\n\t# If input_type is raw or mulaw, network assumes scalar input and\n\t# discretized mixture of logistic distributions output, otherwise one-hot\n\t# input and softmax output are assumed.\n\t# **NOTE**: if you change the one of the two parameters below, you need to\n\t# re-run preprocessing before training.\n\t# **NOTE**: scaler input (raw or mulaw) is experimental. Use it your own risk.\n\tinput_type=\"mulaw-quantize\",\n\tquantize_channels=256, # 65536 or 256\n\n\tsilence_threshold=2,\n\n\t# Mixture of logistic distributions:\n\tlog_scale_min=float(np.log(1e-14)),\n\n\t#TODO model params\n\n\n\t#Tacotron Training\n\ttacotron_batch_size = 32, #number of training samples on each training steps\n\ttacotron_reg_weight = 1e-6, #regularization weight (for l2 regularization)\n\ttacotron_scale_regularization = True, #Whether to rescale regularization weight to adapt for outputs range (used when reg_weight is high and biasing the model)\n\n\ttacotron_decay_learning_rate = True, #boolean, determines if the learning rate will follow an exponential decay\n\ttacotron_start_decay = 50000, #Step at which learning decay starts\n\ttacotron_decay_steps = 50000, #starting point for learning rate decay (and determines the decay slope) (UNDER TEST)\n\ttacotron_decay_rate = 0.4, #learning rate decay rate (UNDER TEST)\n\ttacotron_initial_learning_rate = 1e-3, #starting learning rate\n\ttacotron_final_learning_rate = 1e-5, #minimal learning rate\n\n\ttacotron_adam_beta1 = 0.9, #AdamOptimizer beta1 parameter\n\ttacotron_adam_beta2 = 0.999, #AdamOptimizer beta2 parameter\n\ttacotron_adam_epsilon = 1e-6, #AdamOptimizer beta3 parameter\n\n\ttacotron_zoneout_rate = 0.1, #zoneout rate for all LSTM cells in the network\n\ttacotron_dropout_rate = 0.5, #dropout rate for all convolutional layers + prenet\n\n\ttacotron_teacher_forcing_ratio = 1., #Value from [0., 1.], 0.=0%, 1.=100%, determines the % of times we force next decoder inputs\n\t\n\n\t#Wavenet Training TODO\n\n\n\n\t#Eval sentences\n\tsentences = [\n\t# From July 8, 2017 New York Times:\n\t'Scientists at the CERN laboratory say they have discovered a new particle.',\n\t'There\\'s a way to measure the acute emotional intelligence that has never gone out of style.',\n\t'President Trump met with other leaders at the Group of 20 conference.',\n\t'The Senate\\'s bill to repeal and replace the Affordable Care Act is now imperiled.',\n\t# From Google's Tacotron example page:\n\t'Generative adversarial network or variational auto-encoder.',\n\t'Basilar membrane and otolaryngology are not auto-correlations.',\n\t'He has read the whole thing.',\n\t'He reads books.',\n\t\"Don't desert me here in the desert!\",\n\t'He thought it was time to present the present.',\n\t'Thisss isrealy awhsome.',\n\t'Punctuation sensitivity, is working.',\n\t'Punctuation sensitivity is working.',\n\t\"The buses aren't the problem, they actually provide a solution.\",\n\t\"The buses aren't the PROBLEM, they actually provide a SOLUTION.\",\n\t\"The quick brown fox jumps over the lazy dog.\",\n\t\"Does the quick brown fox jump over the lazy dog?\",\n\t\"Peter Piper picked a peck of pickled peppers. How many pickled peppers did Peter Piper pick?\",\n\t\"She sells sea-shells on the sea-shore. The shells she sells are sea-shells I'm sure.\",\n\t\"The blue lagoon is a nineteen eighty American romance adventure film.\",\n\t\"Tajima Airport serves Toyooka.\",\n\t'Talib Kweli confirmed to AllHipHop that he will be releasing an album in the next year.',\n\t#From Training data:\n\t'the rest being provided with barrack beds, and in dimensions varying from thirty feet by fifteen to fifteen feet by ten.',\n\t'in giltspur street compter, where he was first lodged.',\n\t'a man named burnett came with his wife and took up his residence at whitchurch, hampshire, at no great distance from laverstock,',\n\t'it appears that oswald had only one caller in response to all of his fpcc activities,',\n\t'he relied on the absence of the strychnia.',\n\t'scoggins thought it was lighter.',\n\t'''would, it is probable, have eventually overcome the reluctance of some of the prisoners at least, \n\tand would have possessed so much moral dignity''',\n\t'''the only purpose of this whole sentence is to evaluate the scalability of the model for very long sentences. \n\tThis is not even a long sentence anymore, it has become an entire paragraph. \n\tShould I stop now? Let\\'s add this last sentence in which we talk about nothing special.''',\n\t'Thank you so much for your support!!'\n\t]\n\n\t)\n\ndef hparams_debug_string():\n\tvalues = hparams.values()\n\thp = [' %s: %s' % (name, values[name]) for name in sorted(values) if name != 'sentences']\n\treturn 'Hyperparameters:\\n' + '\\n'.join(hp)\n" }, { "alpha_fraction": 0.6993696689605713, "alphanum_fraction": 0.7100575566291809, "avg_line_length": 37.819149017333984, "blob_id": "3efe3d58eb94bb6b6a18058a9892b3f2222a3fa0", "content_id": "8ed428dd1dc08a78c69f2511d8f177981495c896", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3649, "license_type": "permissive", "max_line_length": 123, "num_lines": 94, "path": "/tacotron/synthesizer.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport tensorflow as tf\nfrom hparams import hparams\nfrom librosa import effects\nfrom tacotron.models import create_model\nfrom tacotron.utils.text import text_to_sequence\nfrom tacotron.utils import plot\nfrom datasets import audio\nfrom datetime import datetime\n\n\nclass Synthesizer:\n\tdef load(self, checkpoint_path, gta=False, model_name='Tacotron'):\n\t\tprint('Constructing model: %s' % model_name)\n\t\tinputs = tf.placeholder(tf.int32, [1, None], 'inputs')\n\t\tinput_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')\n\n\t\twith tf.variable_scope('model') as scope:\n\t\t\tself.model = create_model(model_name, hparams)\n\t\t\tif hparams.use_vae:\n\t\t\t\tref_targets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'ref_targets')\n\t\t\tif gta:\n\t\t\t\ttargets = tf.placeholder(tf.float32, [1, None, hparams.num_mels], 'mel_targets')\n\t\t\t\t\n\t\t\t\tif hparams.use_vae:\n\t\t\t\t\tself.model.initialize(inputs, input_lengths, targets, gta=gta, reference_mel=ref_targets)\n\t\t\t\telse:\n\t\t\t\t\tself.model.initialize(inputs, input_lengths, targets, gta=gta)\n\t\t\telse:\n\t\t\t\tif hparams.use_vae:\n\t\t\t\t\tself.model.initialize(inputs, input_lengths, reference_mel=ref_targets)\n\t\t\t\telse:\n\t\t\t\t\tself.model.initialize(inputs, input_lengths)\n\t\t\tself.mel_outputs = self.model.mel_outputs\n\t\t\tself.alignment = self.model.alignments[0]\n\n\t\tself.gta = gta\n\t\tprint('Loading checkpoint: %s' % checkpoint_path)\n\t\tself.session = tf.Session()\n\t\tself.session.run(tf.global_variables_initializer())\n\t\tsaver = tf.train.Saver()\n\t\tsaver.restore(self.session, checkpoint_path)\n\n\n\tdef synthesize(self, text, index, out_dir, log_dir, mel_filename, reference_mel):\n\t\tcleaner_names = [x.strip() for x in hparams.cleaners.split(',')]\n\t\tseq = text_to_sequence(text, cleaner_names)\n\t\tfeed_dict = {\n\t\t\tself.model.inputs: [np.asarray(seq, dtype=np.int32)],\n\t\t\tself.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)\n\t\t}\n\n\t\tif self.gta:\n\t\t\tfeed_dict[self.model.mel_targets] = np.load(mel_filename).reshape(1, -1, 80)\n\t\t\tfeed_dict[self.model.reference_mel] = np.load(mel_filename).reshape(1, -1, 80)\n\t\telif hparams.use_vae:\n\t\t\treference_mel = [np.asarray(reference_mel, dtype=np.float32)]\n\t\t\tfeed_dict[self.model.reference_mel] = reference_mel\n\n\n\t\tif self.gta or not hparams.predict_linear:\n\t\t\tmels, alignment = self.session.run([self.mel_outputs, self.alignment], feed_dict=feed_dict)\n\n\t\telse:\n\t\t\tlinear, mels, alignment = self.session.run([self.linear_outputs, self.mel_outputs, self.alignment], feed_dict=feed_dict)\n\t\t\tlinear = linear.reshape(-1, hparams.num_freq)\n\n\t\tmels = mels.reshape(-1, hparams.num_mels) #Thanks to @imdatsolak for pointing this out\n\n\t\t# Write the spectrogram to disk\n\t\t# Note: outputs mel-spectrogram files and target ones have same names, just different folders\n\t\tmel_filename = os.path.join(out_dir, 'speech-mel-{:05d}.npy'.format(index))\n\t\tnp.save(mel_filename, mels, allow_pickle=False)\n\n\t\tif log_dir is not None:\n\t\t\t#save wav (mel -> wav)\n\t\t\twav = audio.inv_mel_spectrogram(mels.T)\n\t\t\taudio.save_wav(wav, os.path.join(log_dir, 'wavs/speech-wav-{:05d}-mel.wav'.format(index)))\n\n\t\t\tif hparams.predict_linear:\n\t\t\t\t#save wav (linear -> wav)\n\t\t\t\twav = audio.inv_linear_spectrogram(linear.T)\n\t\t\t\taudio.save_wav(wav, os.path.join(log_dir, 'wavs/speech-wav-{:05d}-linear.wav'.format(index)))\n\n\t\t\t#save alignments\n\t\t\tplot.plot_alignment(alignment, os.path.join(log_dir, 'plots/speech-alignment-{:05d}.png'.format(index)),\n\t\t\t\tinfo='{}'.format(text), split_title=True)\n\n\t\t\t#save mel spectrogram plot\n\t\t\tplot.plot_spectrogram(mels, os.path.join(log_dir, 'plots/speech-mel-{:05d}.png'.format(index)),\n\t\t\t\tinfo='{}'.format(text), split_title=True)\n\n\t\treturn mel_filename\n" }, { "alpha_fraction": 0.7028352618217468, "alphanum_fraction": 0.7104214429855347, "avg_line_length": 41.78688430786133, "blob_id": "82c34292ad9ccd1ec5ad51c1cf54720333e13770", "content_id": "d7fddcbb3e702836f9a32cc32f3adece3e4d6ba5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13050, "license_type": "permissive", "max_line_length": 158, "num_lines": 305, "path": "/tacotron/models/tacotron.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tacotron.utils.symbols import symbols\nfrom tacotron.utils.infolog import log\nfrom tacotron.models.helpers import TacoTrainingHelper, TacoTestHelper\nfrom tacotron.models.modules import *\nfrom tacotron.models.zoneout_LSTM import ZoneoutLSTMCell\nfrom tensorflow.contrib.seq2seq import dynamic_decode\nfrom tacotron.models.Architecture_wrappers import TacotronEncoderCell, TacotronDecoderCell\nfrom tacotron.models.custom_decoder import CustomDecoder\nfrom tacotron.models.attention import LocationSensitiveAttention\nfrom tacotron.utils.util import shape_list, vae_weight\n\n\nclass Tacotron():\n\t\"\"\"vae_tacotron2 Feature prediction Model.\n\t\"\"\"\n\tdef __init__(self, hparams):\n\t\tself._hparams = hparams\n\n\tdef initialize(self, inputs, input_lengths, mel_targets=None, mel_lengths=None, stop_token_targets=None, linear_targets=None, gta=False, reference_mel=None):\n\t\t\"\"\"\n\t\tInitializes the model for inference\n\n\t\tsets \"mel_outputs\" and \"alignments\" fields.\n\n\t\tArgs:\n\t\t\t- inputs: int32 Tensor with shape [N, T_in] where N is batch size, T_in is number of\n\t\t\t steps in the input time series, and values are character IDs\n\t\t\t- input_lengths: int32 Tensor with shape [N] where N is batch size and values are the lengths\n\t\t\tof each sequence in inputs.\n\t\t\t- mel_targets: float32 Tensor with shape [N, T_out, M] where N is batch size, T_out is number\n\t\t\tof steps in the output time series, M is num_mels, and values are entries in the mel\n\t\t\tspectrogram. Only needed for training.\n\t\t\"\"\"\n\t\tif mel_targets is None and stop_token_targets is not None:\n\t\t\traise ValueError('no mel targets were provided but token_targets were given')\n\t\tif mel_targets is not None and stop_token_targets is None and not gta:\n\t\t\traise ValueError('Mel targets are provided without corresponding token_targets')\n\t\tif gta==False and self._hparams.predict_linear==True and linear_targets is None:\n\t\t\traise ValueError('Model is set to use post processing to predict linear spectrograms in training but no linear targets given!')\n\t\tif gta and linear_targets is not None:\n\t\t\traise ValueError('Linear spectrogram prediction is not supported in GTA mode!')\n\n\t\twith tf.variable_scope('inference') as scope:\n\t\t\tis_training = mel_targets is not None and not gta\n\t\t\tbatch_size = tf.shape(inputs)[0]\n\t\t\thp = self._hparams\n\t\t\t#GTA is only used for predicting mels to train Wavenet vocoder, so we ommit post processing when doing GTA synthesis\n\t\t\tpost_condition = hp.predict_linear and not gta\n\n\t\t\t# Embeddings ==> [batch_size, sequence_length, embedding_dim]\n\t\t\tembedding_table = tf.get_variable(\n\t\t\t\t'inputs_embedding', [len(symbols), hp.embedding_dim], dtype=tf.float32)\n\t\t\tembedded_inputs = tf.nn.embedding_lookup(embedding_table, inputs)\n\n\n\t\t\t#Encoder Cell ==> [batch_size, encoder_steps, encoder_lstm_units]\n\t\t\tencoder_cell = TacotronEncoderCell(\n\t\t\t\tEncoderConvolutions(is_training, kernel_size=hp.enc_conv_kernel_size,\n\t\t\t\t\tchannels=hp.enc_conv_channels, scope='encoder_convolutions'),\n\t\t\t\tEncoderRNN(is_training, size=hp.encoder_lstm_units,\n\t\t\t\t\tzoneout=hp.tacotron_zoneout_rate, scope='encoder_LSTM'))\n\n\t\t\tencoder_outputs = encoder_cell(embedded_inputs, input_lengths)\n\t\t\tif hp.use_vae:\n\t\t\t\tif is_training:\n\t\t\t\t\treference_mel = mel_targets\n\t\t\t\t\n\t\t\t\tstyle_embeddings, mu, log_var = VAE(\n\t\t\t\t\tinputs=reference_mel,\n\t\t\t\t\tinput_lengths=mel_lengths,\n\t\t\t\t\tfilters=hp.filters,\n\t\t\t\t\tkernel_size=(3, 3),\n\t\t\t\t\tstrides=(2, 2),\n\t\t\t\t\tnum_units=hp.vae_dim,\n\t\t\t\t\tis_training=is_training,\n\t\t\t\t\tscope='vae')\n\n\t\t\t\tself.mu = mu\n\t\t\t\tself.log_var = log_var\n\t\t\t\tstyle_embeddings = tf.layers.dense(style_embeddings, hp.encoder_depth)\n\t\t\t\tstyle_embeddings = tf.expand_dims(style_embeddings, axis=1)\n\t\t\t\tstyle_embeddings = tf.tile(style_embeddings, [1, shape_list(encoder_outputs)[1], 1]) # [N, T_in, 256]\n\t\t\t\tencoder_outputs = encoder_outputs + style_embeddings\n\n\t\t\t#For shape visualization purpose\n\t\t\tenc_conv_output_shape = encoder_cell.conv_output_shape\n\n\n\t\t\t#Decoder Parts\n\t\t\t#Attention Decoder Prenet\n\t\t\tprenet = Prenet(is_training, layer_sizes=hp.prenet_layers, scope='decoder_prenet')\n\t\t\t#Attention Mechanism\n\t\t\tattention_mechanism = LocationSensitiveAttention(hp.attention_dim, encoder_outputs,\n\t\t\t\tmask_encoder=hp.mask_encoder, memory_sequence_length=input_lengths, smoothing=hp.smoothing,\n\t\t\t\tcumulate_weights=hp.cumulative_weights)\n\t\t\t#Decoder LSTM Cells\n\t\t\tdecoder_lstm = DecoderRNN(is_training, layers=hp.decoder_layers,\n\t\t\t\tsize=hp.decoder_lstm_units, zoneout=hp.tacotron_zoneout_rate, scope='decoder_lstm')\n\t\t\t#Frames Projection layer\n\t\t\tframe_projection = FrameProjection(hp.num_mels * hp.outputs_per_step, scope='linear_transform')\n\t\t\t#<stop_token> projection layer\n\t\t\tstop_projection = StopProjection(is_training, scope='stop_token_projection')\n\n\n\t\t\t#Decoder Cell ==> [batch_size, decoder_steps, num_mels * r] (after decoding)\n\t\t\tdecoder_cell = TacotronDecoderCell(\n\t\t\t\tprenet,\n\t\t\t\tattention_mechanism,\n\t\t\t\tdecoder_lstm,\n\t\t\t\tframe_projection,\n\t\t\t\tstop_projection,\n\t\t\t\tmask_finished=hp.mask_finished)\n\n\n\t\t\t#Define the helper for our decoder\n\t\t\tif (is_training or gta) == True:\n\t\t\t\tself.helper = TacoTrainingHelper(batch_size, mel_targets, stop_token_targets,\n\t\t\t\t\thp.num_mels, hp.outputs_per_step, hp.tacotron_teacher_forcing_ratio, gta)\n\t\t\telse:\n\t\t\t\tself.helper = TacoTestHelper(batch_size, hp.num_mels, hp.outputs_per_step)\n\n\n\t\t\t#initial decoder state\n\t\t\tdecoder_init_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)\n\n\t\t\t#Only use max iterations at synthesis time\n\t\t\tmax_iters = hp.max_iters if not is_training else None\n\n\t\t\t#Decode\n\t\t\t(frames_prediction, stop_token_prediction, _), final_decoder_state, _ = dynamic_decode(\n\t\t\t\tCustomDecoder(decoder_cell, self.helper, decoder_init_state),\n\t\t\t\timpute_finished=hp.impute_finished,\n\t\t\t\tmaximum_iterations=max_iters)\n\n\n\t\t\t# Reshape outputs to be one output per entry\n\t\t\t#==> [batch_size, non_reduced_decoder_steps (decoder_steps * r), num_mels]\n\t\t\tdecoder_output = tf.reshape(frames_prediction, [batch_size, -1, hp.num_mels])\n\t\t\tstop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])\n\n\n\t\t\t#Postnet\n\t\t\tpostnet = Postnet(is_training, kernel_size=hp.postnet_kernel_size,\n\t\t\t\tchannels=hp.postnet_channels, scope='postnet_convolutions')\n\n\t\t\t#Compute residual using post-net ==> [batch_size, decoder_steps * r, postnet_channels]\n\t\t\tresidual = postnet(decoder_output)\n\n\t\t\t#Project residual to same dimension as mel spectrogram\n\t\t\t#==> [batch_size, decoder_steps * r, num_mels]\n\t\t\tresidual_projection = FrameProjection(hp.num_mels, scope='postnet_projection')\n\t\t\tprojected_residual = residual_projection(residual)\n\n\n\t\t\t#Compute the mel spectrogram\n\t\t\tmel_outputs = decoder_output + projected_residual\n\n\n\t\t\tif post_condition:\n\t\t\t\t#Based on https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py\n\t\t\t\t#Post-processing Network to map mels to linear spectrograms using same architecture as the encoder\n\t\t\t\tpost_processing_cell = TacotronEncoderCell(\n\t\t\t\tEncoderConvolutions(is_training, kernel_size=hp.enc_conv_kernel_size,\n\t\t\t\t\tchannels=hp.enc_conv_channels, scope='post_processing_convolutions'),\n\t\t\t\tEncoderRNN(is_training, size=hp.encoder_lstm_units,\n\t\t\t\t\tzoneout=hp.tacotron_zoneout_rate, scope='post_processing_LSTM'))\n\n\t\t\t\texpand_outputs = post_processing_cell(mel_outputs)\n\t\t\t\tlinear_outputs = FrameProjection(hp.num_freq, scope='post_processing_projection')(expand_outputs)\n\n\t\t\t#Grab alignments from the final decoder state\n\t\t\talignments = tf.transpose(final_decoder_state.alignment_history.stack(), [1, 2, 0])\n\n\t\t\tself.inputs = inputs\n\t\t\tself.input_lengths = input_lengths\n\t\t\tself.decoder_output = decoder_output\n\t\t\tself.alignments = alignments\n\t\t\tself.stop_token_prediction = stop_token_prediction\n\t\t\tself.stop_token_targets = stop_token_targets\n\t\t\tself.mel_outputs = mel_outputs\n\t\t\tself.reference_mel = reference_mel\n\t\t\tif post_condition:\n\t\t\t\tself.linear_outputs = linear_outputs\n\t\t\t\tself.linear_targets = linear_targets\n\t\t\tself.mel_targets = mel_targets\n\t\t\tself.mel_lengths = mel_lengths\n\t\t\tlog('Initialized Tacotron model. Dimensions (? = dynamic shape): ')\n\t\t\tlog(' embedding: {}'.format(embedded_inputs.shape))\n\t\t\tlog(' enc conv out: {}'.format(enc_conv_output_shape))\n\t\t\tlog(' encoder out: {}'.format(encoder_outputs.shape))\n\t\t\tlog(' decoder out: {}'.format(decoder_output.shape))\n\t\t\tlog(' residual out: {}'.format(residual.shape))\n\t\t\tlog(' projected residual out: {}'.format(projected_residual.shape))\n\t\t\tlog(' mel out: {}'.format(mel_outputs.shape))\n\t\t\tif post_condition:\n\t\t\t\tlog(' linear out: {}'.format(linear_outputs.shape))\n\t\t\tlog(' <stop_token> out: {}'.format(stop_token_prediction.shape))\n\n\n\tdef add_loss(self, global_step):\n\t\t'''Adds loss to the model. Sets \"loss\" field. initialize must have been called.'''\n\t\twith tf.variable_scope('loss') as scope:\n\t\t\thp = self._hparams\n\n\t\t\t# Compute loss of predictions before postnet\n\t\t\tbefore = tf.losses.mean_squared_error(self.mel_targets, self.decoder_output)\n\t\t\t# Compute loss after postnet\n\t\t\tafter = tf.losses.mean_squared_error(self.mel_targets, self.mel_outputs)\n\t\t\t#Compute <stop_token> loss (for learning dynamic generation stop)\n\t\t\tstop_token_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n\t\t\t\tlabels=self.stop_token_targets,\n\t\t\t\tlogits=self.stop_token_prediction))\n\n\t\t\tif hp.predict_linear:\n\t\t\t\t#Compute linear loss\n\t\t\t\t#From https://github.com/keithito/tacotron/blob/tacotron2-work-in-progress/models/tacotron.py\n\t\t\t\t#Prioritize loss for frequencies under 2000 Hz.\n\t\t\t\tl1 = tf.abs(self.linear_targets - self.linear_outputs)\n\t\t\t\tn_priority_freq = int(2000 / (hp.sample_rate * 0.5) * hp.num_mels)\n\t\t\t\tlinear_loss = 0.5 * tf.reduce_mean(l1) + 0.5 * tf.reduce_mean(l1[:,:,0:n_priority_freq])\n\t\t\telse:\n\t\t\t\tlinear_loss = 0.\n\n\t\t\t# Compute the regularization weight\n\t\t\tif hp.tacotron_scale_regularization:\n\t\t\t\treg_weight_scaler = 1. / (2 * hp.max_abs_value) if hp.symmetric_mels else 1. / (hp.max_abs_value)\n\t\t\t\treg_weight = hp.tacotron_reg_weight * reg_weight_scaler\n\t\t\telse:\n\t\t\t\treg_weight = hp.tacotron_reg_weight\n\n\t\t\t# Get all trainable variables\n\t\t\tall_vars = tf.trainable_variables()\n\t\t\tregularization = tf.add_n([tf.nn.l2_loss(v) for v in all_vars\n\t\t\t\tif not('bias' in v.name or 'Bias' in v.name)]) * reg_weight\n\n\t\t\t# Compute final loss term\n\t\t\tself.before_loss = before\n\t\t\tself.after_loss = after\n\t\t\tself.stop_token_loss = stop_token_loss\n\t\t\tself.regularization_loss = regularization\n\t\t\tself.linear_loss = linear_loss\n\n\t\t\tself.loss = self.before_loss + self.after_loss + self.stop_token_loss + self.regularization_loss + self.linear_loss\n\n\t\t\tif hp.use_vae:\n\t\t\t\tself.ki_loss = -0.5 * tf.reduce_sum(1 + self.log_var - tf.pow(self.mu, 2) - tf.exp(self.log_var))\n\t\t\t\tvae_loss_weight = vae_weight(global_step)\n\t\t\t\tself.loss += self.ki_loss * vae_loss_weight\n\n\n\tdef add_optimizer(self, global_step):\n\t\t'''Adds optimizer. Sets \"gradients\" and \"optimize\" fields. add_loss must have been called.\n\n\t\tArgs:\n\t\t\tglobal_step: int32 scalar Tensor representing current global step in training\n\t\t'''\n\t\twith tf.variable_scope('optimizer') as scope:\n\t\t\thp = self._hparams\n\t\t\tif hp.tacotron_decay_learning_rate:\n\t\t\t\tself.decay_steps = hp.tacotron_decay_steps\n\t\t\t\tself.decay_rate = hp.tacotron_decay_rate\n\t\t\t\tself.learning_rate = self._learning_rate_decay(hp.tacotron_initial_learning_rate, global_step)\n\t\t\telse:\n\t\t\t\tself.learning_rate = tf.convert_to_tensor(hp.tacotron_initial_learning_rate)\n\n\t\t\toptimizer = tf.train.AdamOptimizer(self.learning_rate, hp.tacotron_adam_beta1,\n\t\t\t\thp.tacotron_adam_beta2, hp.tacotron_adam_epsilon)\n\t\t\tgradients, variables = zip(*optimizer.compute_gradients(self.loss))\n\t\t\tself.gradients = gradients\n\t\t\t#Just for causion\n\t\t\t#https://github.com/Rayhane-mamah/Tacotron-2/issues/11\n\t\t\tclipped_gradients, _ = tf.clip_by_global_norm(gradients, 0.5)\n\n\t\t\t# Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:\n\t\t\t# https://github.com/tensorflow/tensorflow/issues/1122\n\t\t\twith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n\t\t\t\tself.optimize = optimizer.apply_gradients(zip(clipped_gradients, variables),\n\t\t\t\t\tglobal_step=global_step)\n\n\tdef _learning_rate_decay(self, init_lr, global_step):\n\t\t#################################################################\n\t\t# Narrow Exponential Decay:\n\n\t\t# Phase 1: lr = 1e-3\n\t\t# We only start learning rate decay after 50k steps\n\n\t\t# Phase 2: lr in ]1e-3, 1e-5[\n\t\t# decay reach minimal value at step 300k\n\n\t\t# Phase 3: lr = 1e-5\n\t\t# clip by minimal learning rate value (step > 300k)\n\t\t#################################################################\n\t\thp = self._hparams\n\n\t\t#Compute natural exponential decay\n\t\tlr = tf.train.exponential_decay(init_lr,\n\t\t\tglobal_step - hp.tacotron_start_decay, #lr = 1e-3 at step 50k\n\t\t\tself.decay_steps,\n\t\t\tself.decay_rate, #lr = 1e-5 around step 300k\n\t\t\tname='exponential_decay')\n\n\n\t\t#clip learning rate by max and min values (initial and final values)\n\t\treturn tf.minimum(tf.maximum(lr, hp.tacotron_final_learning_rate), init_lr)\n" }, { "alpha_fraction": 0.6836611032485962, "alphanum_fraction": 0.6965282559394836, "avg_line_length": 30.930233001708984, "blob_id": "38183d59d4046ede0a16b587bd366066494286a6", "content_id": "bb7346d3428baf1e0223dd94d9b73b2e85d9fd91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4119, "license_type": "permissive", "max_line_length": 127, "num_lines": 129, "path": "/tacotron/utils/audio.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import librosa\nimport librosa.filters\nimport numpy as np\nfrom scipy import signal\nfrom hparams import hparams\nimport tensorflow as tf\n\n\ndef load_wav(path):\n\treturn librosa.core.load(path, sr=hparams.sample_rate)[0]\n\ndef save_wav(wav, path):\n\twav *= 32767 / max(0.01, np.max(np.abs(wav)))\n\tlibrosa.output.write_wav(path, wav.astype(np.int16), hparams.sample_rate)\n\ndef trim_silence(wav):\n\t'''Trim leading and trailing silence\n\n\tUseful for M-AILABS dataset if we choose to trim the extra 0.5 silences.\n\t'''\n\treturn librosa.effects.trim(wav)[0]\n\ndef preemphasis(x):\n\treturn signal.lfilter([1, -hparams.preemphasis], [1], x)\n\ndef inv_preemphasis(x):\n\treturn signal.lfilter([1], [1, -hparams.preemphasis], x)\n\ndef get_hop_size():\n\thop_size = hparams.hop_size\n\tif hop_size is None:\n\t\tassert hparams.frame_shift_ms is not None\n\t\thop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)\n\treturn hop_size\n\ndef melspectrogram(wav):\n\tD = _stft(wav)\n\tS = _amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db\n\n\tif hparams.mel_normalization:\n\t\treturn _normalize(S)\n\treturn S\n\n\ndef inv_mel_spectrogram(mel_spectrogram):\n\t'''Converts mel spectrogram to waveform using librosa'''\n\tif hparams.mel_normalization:\n\t\tD = _denormalize(mel_spectrogram)\n\telse:\n\t\tD = mel_spectrogram\n\n\tS = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db)) # Convert back to linear\n\n\treturn _griffin_lim(S ** hparams.power)\n\ndef _griffin_lim(S):\n\t'''librosa implementation of Griffin-Lim\n\tBased on https://github.com/librosa/librosa/issues/434\n\t'''\n\tangles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n\tS_complex = np.abs(S).astype(np.complex)\n\ty = _istft(S_complex * angles)\n\tfor i in range(hparams.griffin_lim_iters):\n\t\tangles = np.exp(1j * np.angle(_stft(y)))\n\t\ty = _istft(S_complex * angles)\n\treturn y\n\ndef _stft(y):\n\treturn librosa.stft(y=y, n_fft=hparams.fft_size, hop_length=get_hop_size())\n\ndef _istft(y):\n\treturn librosa.istft(y, hop_length=get_hop_size())\n\n\n# Conversions\n_mel_basis = None\n_inv_mel_basis = None\n\ndef _linear_to_mel(spectogram):\n\tglobal _mel_basis\n\tif _mel_basis is None:\n\t\t_mel_basis = _build_mel_basis()\n\treturn np.dot(_mel_basis, spectogram)\n\ndef _mel_to_linear(mel_spectrogram):\n\tglobal _inv_mel_basis\n\tif _inv_mel_basis is None:\n\t\t_inv_mel_basis = np.linalg.pinv(_build_mel_basis())\n\treturn np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))\n\ndef _build_mel_basis():\n\tassert hparams.fmax <= hparams.sample_rate // 2\n\treturn librosa.filters.mel(hparams.sample_rate, hparams.fft_size, n_mels=hparams.num_mels,\n\t\t\t\t\t\t\t fmin=hparams.fmin, fmax=hparams.fmax)\n\ndef _amp_to_db(x):\n\tmin_level = np.exp(hparams.min_level_db / 20 * np.log(10))\n\treturn 20 * np.log10(np.maximum(min_level, x))\n\ndef _db_to_amp(x):\n\treturn np.power(10.0, (x) * 0.05)\n\ndef _normalize(S):\n\tif hparams.allow_clipping_in_normalization:\n\t\tif hparams.symmetric_mels:\n\t\t\treturn np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,\n\t\t\t -hparams.max_abs_value, hparams.max_abs_value)\n\t\telse:\n\t\t\treturn np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)\n\n\tassert S.max() <= 0 and S.min() - hparams.min_level_db >= 0\n\tif hparams.symmetric_mels:\n\t\treturn (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value\n\telse:\n\t\treturn hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))\n\ndef _denormalize(D):\n\tif hparams.allow_clipping_in_normalization:\n\t\tif hparams.symmetric_mels:\n\t\t\treturn (((np.clip(D, -hparams.max_abs_value,\n\t\t\t\thparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))\n\t\t\t\t+ hparams.min_level_db)\n\t\telse:\n\t\t\treturn ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)\n\n\tif hparams.symmetric_mels:\n\t\treturn (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)\n\telse:\n\t\treturn ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)\n" }, { "alpha_fraction": 0.70783531665802, "alphanum_fraction": 0.7121514081954956, "avg_line_length": 37.126583099365234, "blob_id": "901380b0efaef24926f1c4a049b281fbd694fc88", "content_id": "4db6e3e60ada28389f5303a382ad2ea214b574d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3012, "license_type": "permissive", "max_line_length": 103, "num_lines": 79, "path": "/tacotron/synthesize.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport re\nfrom hparams import hparams, hparams_debug_string\nfrom tacotron.synthesizer import Synthesizer\nimport tensorflow as tf\nimport time\nfrom tqdm import tqdm\nfrom tacotron.utils.audio import load_wav, melspectrogram\n\ndef run_eval(args, checkpoint_path, output_dir):\n\tprint(hparams_debug_string())\n\tsynth = Synthesizer()\n\tsynth.load(checkpoint_path)\n\teval_dir = os.path.join(output_dir, 'eval')\n\tlog_dir = os.path.join(output_dir, 'logs-eval')\n\twav = load_wav(args.reference_audio)\n\treference_mel = melspectrogram(wav).transpose()\n\t#Create output path if it doesn't exist\n\tos.makedirs(eval_dir, exist_ok=True)\n\tos.makedirs(log_dir, exist_ok=True)\n\tos.makedirs(os.path.join(log_dir, 'wavs'), exist_ok=True)\n\tos.makedirs(os.path.join(log_dir, 'plots'), exist_ok=True)\n\n\twith open(os.path.join(eval_dir, 'map.txt'), 'w') as file:\n\t\tfor i, text in enumerate(tqdm(hparams.sentences)):\n\t\t\tstart = time.time()\n\t\t\tmel_filename = synth.synthesize(text, i+1, eval_dir, log_dir, None, reference_mel)\n\n\t\t\tfile.write('{}|{}\\n'.format(text, mel_filename))\n\tprint('synthesized mel spectrograms at {}'.format(eval_dir))\n\ndef run_synthesis(args, checkpoint_path, output_dir):\n\tmetadata_filename = os.path.join(args.input_dir, 'train.txt')\n\tprint(hparams_debug_string())\n\tsynth = Synthesizer()\n\tsynth.load(checkpoint_path, gta=args.GTA)\n\twith open(metadata_filename, encoding='utf-8') as f:\n\t\tmetadata = [line.strip().split('|') for line in f]\n\t\tframe_shift_ms = hparams.hop_size / hparams.sample_rate\n\t\thours = sum([int(x[4]) for x in metadata]) * frame_shift_ms / (3600)\n\t\tprint('Loaded metadata for {} examples ({:.2f} hours)'.format(len(metadata), hours))\n\n\tif args.GTA==True:\n\t\tsynth_dir = os.path.join(output_dir, 'gta')\n\telse:\n\t\tsynth_dir = os.path.join(output_dir, 'natural')\n\n\t#Create output path if it doesn't exist\n\tos.makedirs(synth_dir, exist_ok=True)\n\n\tprint('starting synthesis')\n\tmel_dir = os.path.join(args.input_dir, 'mels')\n\twav_dir = os.path.join(args.input_dir, 'audio')\n\twith open(os.path.join(synth_dir, 'map.txt'), 'w') as file:\n\t\tfor i, meta in enumerate(tqdm(metadata)):\n\t\t\ttext = meta[5]\n\t\t\tmel_filename = os.path.join(mel_dir, meta[1])\n\t\t\twav_filename = os.path.join(wav_dir, meta[0])\n\t\t\tmel_output_filename = synth.synthesize(text, None, i+1, synth_dir, None, mel_filename)\n\n\t\t\tfile.write('{}|{}|{}|{}\\n'.format(text, mel_filename, mel_output_filename, wav_filename))\n\tprint('synthesized mel spectrograms at {}'.format(synth_dir))\n\ndef tacotron_synthesize(args):\n\thparams.parse(args.hparams)\n\tos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\toutput_dir = 'tacotron_' + args.output_dir\n\n\ttry:\n\t\tcheckpoint_path = tf.train.get_checkpoint_state(args.checkpoint).model_checkpoint_path\n\t\tprint('loaded model at {}'.format(checkpoint_path))\n\texcept:\n\t\traise AssertionError('Cannot restore checkpoint: {}, did you train a model?'.format(args.checkpoint))\n\n\tif args.mode == 'eval':\n\t\trun_eval(args, checkpoint_path, output_dir)\n\telse:\n\t\trun_synthesis(args, checkpoint_path, output_dir)\n" }, { "alpha_fraction": 0.5688329935073853, "alphanum_fraction": 0.5946890115737915, "avg_line_length": 30.755556106567383, "blob_id": "fe89329d8790d4f76d5d02ca2312e10481622d6b", "content_id": "c5e1c7c85f260eb41e5477bdfb8e5f867ee829ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 192, "num_lines": 45, "path": "/tacotron/utils/util.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nfrom hparams import hparams as hp\n\ndef shape_list(x):\n \"\"\"Return list of dims, statically where possible.\"\"\"\n x = tf.convert_to_tensor(x)\n\n # If unknown rank, return dynamic shape\n if x.get_shape().dims is None:\n return tf.shape(x)\n\n static = x.get_shape().as_list()\n shape = tf.shape(x)\n\n ret = []\n for i in range(len(static)):\n dim = static[i]\n if dim is None:\n dim = shape[i]\n ret.append(dim)\n return ret\n\ndef vae_weight(global_step):\n warm_up_step = hp.vae_warming_up\n w1 = tf.cond(\n global_step < warm_up_step,\n lambda: tf.cond(\n global_step % 100 < 1,\n lambda: tf.convert_to_tensor(hp.init_vae_weights) + tf.cast(global_step / 100 * hp.vae_weight_multiler, tf.float32),\n lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)\n ),\n lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)\n )\n \n w2 = tf.cond(\n global_step > warm_up_step,\n lambda: tf.cond(\n global_step % 400 < 1,\n lambda: tf.convert_to_tensor(hp.init_vae_weights) + tf.cast((global_step - warm_up_step) / 400 * hp.vae_weight_multiler + warm_up_step / 100 * hp.vae_weight_multiler, tf.float32),\n lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)\n ),\n lambda: tf.cast(tf.convert_to_tensor(0), tf.float32)\n ) \n return tf.maximum(w1, w2)\n\n\n" }, { "alpha_fraction": 0.7010720372200012, "alphanum_fraction": 0.727748692035675, "avg_line_length": 34.495574951171875, "blob_id": "29570ae1c320a6c9c38c10dabba7ef27d7467783", "content_id": "e8eb0f764924092ca89c714dc05964318ca1d42e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4207, "license_type": "permissive", "max_line_length": 435, "num_lines": 113, "path": "/README.md", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/gilmoore/VAE_Tacotron2/blob/master/VAE_Tacotron2.ipynb)\n\n\n# VAE Tacotron-2:\nUnofficial Implementation of [Learning latent representations for style control and transfer in end-to-end speech synthesis](https://arxiv.org/pdf/1812.04342.pdf)\n\n\n# Repository Structure:\n\tTacotron-2\n\t├── datasets\n\t├── LJSpeech-1.1\t(0)\n\t│   └── wavs\n\t├── logs-Tacotron\t(2)\n\t│   ├── mel-spectrograms\n\t│   ├── plots\n\t│   ├── pretrained\n\t│   └── wavs\n\t├── papers\n\t├── tacotron\n\t│   ├── models\n\t│   └── utils\n\t├── tacotron_output\t(3)\n\t│   ├── eval\n\t│   ├── gta\n\t│   ├── logs-eval\n\t│   │   ├── plots\n\t│   │   └── wavs\n\t│   └── natural\n\t└── training_data\t(1)\n\t    ├── audio\n\t    └── mels\n\n\n\n\n\nThe previous tree shows what the current state of the repository.\n\n- Step **(0)**: Get your dataset, here I have set the examples of **Ljspeech**.\n- Step **(1)**: Preprocess your data. This will give you the **training_data** folder.\n- Step **(2)**: Train your Tacotron model. Yields the **logs-Tacotron** folder.\n- Step **(3)**: Synthesize/Evaluate the Tacotron model. Gives the **tacotron_output** folder.\n\n\n# Requirements\nfirst, you need to have python 3.5 installed along with [Tensorflow v1.6](https://www.tensorflow.org/install/).\n\nnext you can install the requirements :\n\n> pip install -r requirements.txt\n\nelse:\n\n> pip3 install -r requirements.txt\n\n# Dataset:\nThis repo tested on the [ljspeech dataset](https://keithito.com/LJ-Speech-Dataset/), which has almost 24 hours of labeled single actress voice recording.\n\n# Preprocessing\nBefore running the following steps, please make sure you are inside **Tacotron-2 folder**\n\n> cd Tacotron-2\n\nPreprocessing can then be started using:\n\n> python preprocess.py\n\nor\n\n> python3 preprocess.py\n\ndataset can be chosen using the **--dataset** argument. Default is **Ljspeech**.\n\n# Training:\nFeature prediction model can be **trained** using:\n\n> python train.py --model='Tacotron'\n\nor\n\n> python3 train.py --model='Tacotron'\n\n# Synthesis\nThere are **three types** of mel spectrograms synthesis for the Spectrogram prediction network (Tacotron):\n\n- **Evaluation** (synthesis on custom sentences). This is what we'll usually use after having a full end to end model.\n\n> python synthesize.py --model='Tacotron' --mode='eval' --reference_audio='ref_1.wav'\n\nor\n\n> python3 synthesize.py --model='Tacotron' --mode='eval' --reference_audio='ref_1.wav'\n\n**Note:**\n- This implementation not completly tested for all scenarios but training and synthesis with reference audio working.\n- Though it only tested on synthesize without GTA and with `eval` mode.\n- After training 250k step with 32 batch size on LJSpeech, KL error settled down near to zero (around 0.001) still not get good style transfer and control, may be because this model trained on LJSpeech which is not quite expressive datasets and only have 24 hrs of data, it might be produce good result on expressive dataset like `Blizzard 2013 voice dataset` though author of the paper used 105 hrs of Blizzard Challenge 2013 dataset.\n- In my testing, I havn't get good results so far on style transfer side may be some more tweaking required, this implementation easily integrated with `wavenet` as well as `WaveRNN`.\n- Feel free to suggest some changes or even better raise PR.\n\n# Pretrained model and Samples:\nTODO\nClaimed Samples from research paper : http://home.ustc.edu.cn/~zyj008/ICASSP2019\n\n# References and Resources:\n- [Tensorflow original tacotron implementation](https://github.com/keithito/tacotron)\n- [Original tacotron paper](https://arxiv.org/pdf/1703.10135.pdf)\n- [Attention-Based Models for Speech Recognition](https://arxiv.org/pdf/1506.07503.pdf)\n- [Natural TTS synthesis by conditioning Wavenet on MEL spectogram predictions](https://arxiv.org/pdf/1712.05884.pdf)\n- [r9y9/Tacotron-2](https://github.com/r9y9/Tacotron-2)\n- [yanggeng1995/vae_tacotron](https://github.com/yanggeng1995/vae_tacotron)\n\n**Work in progress**\n" }, { "alpha_fraction": 0.7316272854804993, "alphanum_fraction": 0.7316272854804993, "avg_line_length": 43.85293960571289, "blob_id": "467b5b49215f768bbce5e8e970b7804e11d7b6d5", "content_id": "62ed5f1a8ae0cd95611c4e476a73e6f7a394474f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "permissive", "max_line_length": 136, "num_lines": 34, "path": "/synthesize.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import argparse\nfrom tacotron.synthesize import tacotron_synthesize\n\n\ndef main():\n\taccepted_modes = ['eval', 'synthesis']\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--checkpoint', default='logs-Tacotron/pretrained/', help='Path to model checkpoint')\n\tparser.add_argument('--hparams', default='',\n\t\thelp='Hyperparameter overrides as a comma-separated list of name=value pairs')\n\tparser.add_argument('--reference_audio', required=True)\n\tparser.add_argument('--model', default='Tacotron')\n\tparser.add_argument('--input_dir', default='training_data/', help='folder to contain inputs sentences/targets')\n\tparser.add_argument('--output_dir', default='output/', help='folder to contain synthesized mel spectrograms')\n\tparser.add_argument('--mode', default='synthesis', help='mode of run: can be one of {}'.format(accepted_modes))\n\tparser.add_argument('--GTA', default=False, help='Ground truth aligned synthesis, defaults to True, only considered in synthesis mode')\n\targs = parser.parse_args()\n\t\n\taccepted_models = ['Tacotron', 'Wavenet']\n\n\tif args.model not in accepted_models:\n\t\traise ValueError('please enter a valid model to train: {}'.format(accepted_models))\n\n\tif args.mode not in accepted_modes:\n\t\traise ValueError('accepted modes are: {}, found {}'.format(accepted_modes, args.mode))\n\n\tif args.model == 'Tacotron':\n\t\ttacotron_synthesize(args)\n\telif args.model == 'Wavenet':\n\t\traise NotImplementedError('Wavenet is still a work in progress, thank you for your patience!')\n\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.6921824216842651, "alphanum_fraction": 0.6959408521652222, "avg_line_length": 38.32019805908203, "blob_id": "d6be61aad2a8c39cea8aa3ef32b5a17710d71923", "content_id": "5e861a7c500851c81c6685fa92e1370c5c19562a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7982, "license_type": "permissive", "max_line_length": 141, "num_lines": 203, "path": "/tacotron/train.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import numpy as np \nfrom datetime import datetime\nimport os\nimport subprocess\nimport time\nimport tensorflow as tf \nimport traceback\nimport argparse\n\nfrom tacotron.feeder import Feeder\nfrom hparams import hparams, hparams_debug_string\nfrom tacotron.models import create_model\nfrom tacotron.utils.text import sequence_to_text\nfrom tacotron.utils import infolog, plot, ValueWindow\nfrom datasets import audio\nlog = infolog.log\n\n\ndef add_stats(model):\n\twith tf.variable_scope('stats') as scope:\n\t\ttf.summary.histogram('mel_outputs', model.mel_outputs)\n\t\ttf.summary.histogram('mel_targets', model.mel_targets)\n\t\ttf.summary.scalar('before_loss', model.before_loss)\n\t\ttf.summary.scalar('after_loss', model.after_loss)\n\t\tif hparams.predict_linear:\n\t\t\ttf.summary.scalar('linear loss', model.linear_loss)\n\t\ttf.summary.scalar('regularization_loss', model.regularization_loss)\n\t\ttf.summary.scalar('stop_token_loss', model.stop_token_loss)\n\t\ttf.summary.scalar('loss', model.loss)\n\t\ttf.summary.scalar('learning_rate', model.learning_rate) #control learning rate decay speed\n\t\t# gradient_norms = [tf.norm(grad) for grad in model.gradients]\n\t\t# tf.summary.histogram('gradient_norm', gradient_norms)\n\t\t# tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms)) #visualize gradients (in case of explosion)\n\t\tif hparams.use_vae:\n\t\t\ttf.summary.scalar('ki_loss', model.ki_loss)\n\t\treturn tf.summary.merge_all()\n\ndef time_string():\n\treturn datetime.now().strftime('%Y-%m-%d %H:%M')\n\ndef train(log_dir, args):\n\tsave_dir = os.path.join(log_dir, 'pretrained/')\n\tcheckpoint_path = os.path.join(save_dir, 'model.ckpt')\n\tinput_path = os.path.join(args.base_dir, args.input)\n\tplot_dir = os.path.join(log_dir, 'plots')\n\twav_dir = os.path.join(log_dir, 'wavs')\n\tmel_dir = os.path.join(log_dir, 'mel-spectrograms')\n\tos.makedirs(plot_dir, exist_ok=True)\n\tos.makedirs(wav_dir, exist_ok=True)\n\tos.makedirs(mel_dir, exist_ok=True)\n\n\tif hparams.predict_linear:\n\t\tlinear_dir = os.path.join(log_dir, 'linear-spectrograms')\n\t\tos.makedirs(linear_dir, exist_ok=True)\n\n\tlog('Checkpoint path: {}'.format(checkpoint_path))\n\tlog('Loading training data from: {}'.format(input_path))\n\tlog('Using model: {}'.format(args.model))\n\tlog(hparams_debug_string())\n\n\t#Set up data feeder\n\tcoord = tf.train.Coordinator()\n\twith tf.variable_scope('datafeeder') as scope:\n\t\tfeeder = Feeder(coord, input_path, hparams)\n\n\t#Set up model:\n\tstep_count = 0\n\ttry:\n\t\t#simple text file to keep count of global step\n\t\twith open(os.path.join(log_dir, 'step_counter.txt'), 'r') as file:\n\t\t\tstep_count = int(file.read())\n\texcept:\n\t\tprint('no step_counter file found, assuming there is no saved checkpoint')\n\n\tglobal_step = tf.Variable(step_count, name='global_step', trainable=False)\n\twith tf.variable_scope('model') as scope:\n\t\tmodel = create_model(args.model, hparams)\n\t\tif hparams.predict_linear:\n\t\t\tmodel.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.mel_lengths, feeder.token_targets, feeder.linear_targets)\n\t\telse:\n\t\t\tmodel.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.mel_lengths, feeder.token_targets)\n\t\tmodel.add_loss(global_step)\n\t\tmodel.add_optimizer(global_step)\n\t\tstats = add_stats(model)\n\n\t#Book keeping\n\tstep = 0\n\ttime_window = ValueWindow(100)\n\tloss_window = ValueWindow(100)\n\tsaver = tf.train.Saver(max_to_keep=5)\n\n\t#Memory allocation on the GPU as needed\n\tconfig = tf.ConfigProto()\n\tconfig.gpu_options.allow_growth = True\n\n\t#Train\n\twith tf.Session(config=config) as sess:\n\t\ttry:\n\t\t\tsummary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n\t\t\tsess.run(tf.global_variables_initializer())\n\n\t\t\t#saved model restoring\n\t\t\tif args.restore:\n\t\t\t\t#Restore saved model if the user requested it, Default = True.\n\t\t\t\ttry:\n\t\t\t\t\tcheckpoint_state = tf.train.get_checkpoint_state(save_dir)\n\t\t\t\texcept tf.errors.OutOfRangeError as e:\n\t\t\t\t\tlog('Cannot restore checkpoint: {}'.format(e))\n\n\t\t\tif (checkpoint_state and checkpoint_state.model_checkpoint_path):\n\t\t\t\tlog('Loading checkpoint {}'.format(checkpoint_state.model_checkpoint_path))\n\t\t\t\tsaver.restore(sess, checkpoint_state.model_checkpoint_path)\n\n\t\t\telse:\n\t\t\t\tif not args.restore:\n\t\t\t\t\tlog('Starting new training!')\n\t\t\t\telse:\n\t\t\t\t\tlog('No model to load at {}'.format(save_dir))\n\n\t\t\t#initializing feeder\n\t\t\tfeeder.start_in_session(sess)\n\n\t\t\t#Training loop\n\t\t\twhile not coord.should_stop():\n\t\t\t\tstart_time = time.time()\n\t\t\t\tstep, loss, opt = sess.run([global_step, model.loss, model.optimize])\n\t\t\t\ttime_window.append(time.time() - start_time)\n\t\t\t\tloss_window.append(loss)\n\t\t\t\tmessage = 'Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]'.format(\n\t\t\t\t\tstep, time_window.average, loss, loss_window.average)\n\t\t\t\tlog(message, end='\\r')\n\n\t\t\t\tif loss > 100 or np.isnan(loss):\n\t\t\t\t\tlog('Loss exploded to {:.5f} at step {}'.format(loss, step))\n\t\t\t\t\traise Exception('Loss exploded')\n\n\t\t\t\tif step % args.summary_interval == 0:\n\t\t\t\t\tlog('\\nWriting summary at step: {}'.format(step))\n\t\t\t\t\tsummary_writer.add_summary(sess.run(stats), step)\n\t\t\t\t\n\t\t\t\tif step % args.checkpoint_interval == 0:\n\t\t\t\t\twith open(os.path.join(log_dir,'step_counter.txt'), 'w') as file:\n\t\t\t\t\t\tfile.write(str(step))\n\t\t\t\t\tlog('Saving checkpoint to: {}-{}'.format(checkpoint_path, step))\n\t\t\t\t\tsaver.save(sess, checkpoint_path, global_step=step)\n\t\t\t\t\t\n\t\t\t\t\tlog('Saving alignment, Mel-Spectrograms and griffin-lim inverted waveform..')\n\t\t\t\t\tif hparams.predict_linear:\n\t\t\t\t\t\tinput_seq, mel_prediction, linear_prediction, alignment, target = sess.run([\n\t\t\t\t\t\t\tmodel.inputs[0],\n\t\t\t\t\t\t\tmodel.mel_outputs[0],\n\t\t\t\t\t\t\tmodel.linear_outputs[0],\n\t\t\t\t\t\t\tmodel.alignments[0],\n\t\t\t\t\t\t\tmodel.mel_targets[0],\n\t\t\t\t\t\t\t])\n\n\t\t\t\t\t\t#save predicted linear spectrogram to disk (debug)\n\t\t\t\t\t\tlinear_filename = 'linear-prediction-step-{}.npy'.format(step)\n\t\t\t\t\t\tnp.save(os.path.join(linear_dir, linear_filename), linear_prediction.T, allow_pickle=False)\n\n\t\t\t\t\t\t#save griffin lim inverted wav for debug (linear -> wav)\n\t\t\t\t\t\twav = audio.inv_linear_spectrogram(linear_prediction.T)\n\t\t\t\t\t\taudio.save_wav(wav, os.path.join(wav_dir, 'step-{}-waveform-linear.wav'.format(step)))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tinput_seq, mel_prediction, alignment, target = sess.run([model.inputs[0],\n\t\t\t\t\t\t\tmodel.mel_outputs[0],\n\t\t\t\t\t\t\tmodel.alignments[0],\n\t\t\t\t\t\t\tmodel.mel_targets[0],\n\t\t\t\t\t\t\t])\n\n\t\t\t\t\t#save predicted mel spectrogram to disk (debug)\n\t\t\t\t\tmel_filename = 'mel-prediction-step-{}.npy'.format(step)\n\t\t\t\t\tnp.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False)\n\n\t\t\t\t\t#save griffin lim inverted wav for debug (mel -> wav)\n\t\t\t\t\twav = audio.inv_mel_spectrogram(mel_prediction.T)\n\t\t\t\t\taudio.save_wav(wav, os.path.join(wav_dir, 'step-{}-waveform-mel.wav'.format(step)))\n\n\t\t\t\t\t#save alignment plot to disk (control purposes)\n\t\t\t\t\tplot.plot_alignment(alignment, os.path.join(plot_dir, 'step-{}-align.png'.format(step)),\n\t\t\t\t\t\tinfo='{}, {}, step={}, loss={:.5f}'.format(args.model, time_string(), step, loss))\n\t\t\t\t\t#save real mel-spectrogram plot to disk (control purposes)\n\t\t\t\t\tplot.plot_spectrogram(target, os.path.join(plot_dir, 'step-{}-real-mel-spectrogram.png'.format(step)),\n\t\t\t\t\t\tinfo='{}, {}, step={}, Real'.format(args.model, time_string(), step, loss))\n\t\t\t\t\t#save predicted mel-spectrogram plot to disk (control purposes)\n\t\t\t\t\tplot.plot_spectrogram(mel_prediction, os.path.join(plot_dir, 'step-{}-pred-mel-spectrogram.png'.format(step)),\n\t\t\t\t\t\tinfo='{}, {}, step={}, loss={:.5}'.format(args.model, time_string(), step, loss))\n\t\t\t\t\tlog('Input at step {}: {}'.format(step, sequence_to_text(input_seq)))\n\n\t\texcept Exception as e:\n\t\t\tlog('Exiting due to exception: {}'.format(e), slack=True)\n\t\t\ttraceback.print_exc()\n\t\t\tcoord.request_stop(e)\n\ndef tacotron_train(args):\n\thparams.parse(args.hparams)\n\tos.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)\n\trun_name = args.name or args.model\n\tlog_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))\n\tos.makedirs(log_dir, exist_ok=True)\n\tinfolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name)\n\ttrain(log_dir, args)\n" }, { "alpha_fraction": 0.5352370738983154, "alphanum_fraction": 0.5421929359436035, "avg_line_length": 40.23396301269531, "blob_id": "411e917f38dc1f155fcc0b5d11ee96f66cad4f3f", "content_id": "5de4fbbd05ce0533a5e6e9c1ba2364c1bcf6cb12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10926, "license_type": "permissive", "max_line_length": 115, "num_lines": 265, "path": "/tacotron/models/zoneout_LSTM.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops.rnn_cell import RNNCell\n\n\n# Thanks to 'initializers_enhanced.py' of Project RNN Enhancement:\n# https://github.com/nicolas-ivanov/Seq2Seq_Upgrade_TensorFlow/blob/master/rnn_enhancement/initializers_enhanced.py\ndef orthogonal_initializer(scale=1.0):\n def _initializer(shape, dtype=tf.float32):\n flat_shape = (shape[0], np.prod(shape[1:]))\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v\n q = q.reshape(shape)\n return tf.constant(scale * q[:shape[0], :shape[1]], dtype=tf.float32)\n return _initializer\n\n\nclass ZoneoutLSTMCell(RNNCell):\n \"\"\"Zoneout Regularization for LSTM-RNN.\n \"\"\"\n\n def __init__(self, num_units, is_training, input_size=None,\n use_peepholes=False, cell_clip=None,\n #initializer=orthogonal_initializer(),\n initializer=tf.contrib.layers.xavier_initializer(),\n num_proj=None, proj_clip=None, ext_proj=None,\n forget_bias=1.0,\n state_is_tuple=True,\n activation=tf.tanh,\n zoneout_factor_cell=0.0,\n zoneout_factor_output=0.0,\n reuse=None):\n \"\"\"Initialize the parameters for an LSTM cell.\n Args:\n num_units: int, The number of units in the LSTM cell.\n is_training: bool, set True when training.\n use_peepholes: bool, set True to enable diagonal/peephole\n connections.\n cell_clip: (optional) A float value, if provided the cell state\n is clipped by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight\n matrices.\n num_proj: (optional) int, The output dimensionality for\n the projection matrices. If None, no projection is performed.\n forget_bias: Biases of the forget gate are initialized by default\n to 1 in order to reduce the scale of forgetting at the beginning of\n the training.\n activation: Activation function of the inner states.\n \"\"\"\n if not state_is_tuple:\n tf.logging.warn(\n \"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if input_size is not None:\n tf.logging.warn(\n \"%s: The input_size parameter is deprecated.\", self)\n\n if not (zoneout_factor_cell >= 0.0 and zoneout_factor_cell <= 1.0):\n raise ValueError(\n \"Parameter zoneout_factor_cell must be in [0 1]\")\n\n if not (zoneout_factor_output >= 0.0 and zoneout_factor_output <= 1.0):\n raise ValueError(\n \"Parameter zoneout_factor_cell must be in [0 1]\")\n\n self.num_units = num_units\n self.is_training = is_training\n self.use_peepholes = use_peepholes\n self.cell_clip = cell_clip\n self.num_proj = num_proj\n self.proj_clip = proj_clip\n self.initializer = initializer\n self.forget_bias = forget_bias\n self.state_is_tuple = state_is_tuple\n self.activation = activation\n self.zoneout_factor_cell = zoneout_factor_cell\n self.zoneout_factor_output = zoneout_factor_output\n\n if num_proj:\n self._state_size = (\n tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)\n if state_is_tuple else num_units + num_proj)\n self._output_size = num_proj\n else:\n self._state_size = (\n tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)\n if state_is_tuple else 2 * num_units)\n self._output_size = num_units\n\n self._ext_proj = ext_proj\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n if self._ext_proj is None:\n return self._output_size\n return self._ext_proj\n\n def __call__(self, inputs, state, scope=None):\n\n num_proj = self.num_units if self.num_proj is None else self.num_proj\n\n if self.state_is_tuple:\n (c_prev, h_prev) = state\n else:\n c_prev = tf.slice(state, [0, 0], [-1, self.num_units])\n h_prev = tf.slice(state, [0, self.num_units], [-1, num_proj])\n\n # c_prev : Tensor with the size of [batch_size, state_size]\n # h_prev : Tensor with the size of [batch_size, state_size/2]\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n\n with tf.variable_scope(scope or type(self).__name__):\n if input_size.value is None:\n raise ValueError(\n \"Could not infer input size from inputs.get_shape()[-1]\")\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n lstm_matrix = _linear([inputs, h_prev], 4 * self.num_units, True)\n i, j, f, o = tf.split(lstm_matrix, 4, 1)\n\n # diagonal connections\n if self.use_peepholes:\n w_f_diag = tf.get_variable(\n \"W_F_diag\", shape=[self.num_units], dtype=dtype)\n w_i_diag = tf.get_variable(\n \"W_I_diag\", shape=[self.num_units], dtype=dtype)\n w_o_diag = tf.get_variable(\n \"W_O_diag\", shape=[self.num_units], dtype=dtype)\n\n with tf.name_scope(None, \"zoneout\"):\n # make binary mask tensor for cell\n keep_prob_cell = tf.convert_to_tensor(\n self.zoneout_factor_cell,\n dtype=c_prev.dtype\n )\n random_tensor_cell = keep_prob_cell\n random_tensor_cell += \\\n tf.random_uniform(tf.shape(c_prev),\n seed=None, dtype=c_prev.dtype)\n binary_mask_cell = tf.floor(random_tensor_cell)\n # 0 <-> 1 swap\n binary_mask_cell_complement = tf.ones(tf.shape(c_prev)) \\\n - binary_mask_cell\n\n # make binary mask tensor for output\n keep_prob_output = tf.convert_to_tensor(\n self.zoneout_factor_output,\n dtype=h_prev.dtype\n )\n random_tensor_output = keep_prob_output\n random_tensor_output += \\\n tf.random_uniform(tf.shape(h_prev),\n seed=None, dtype=h_prev.dtype)\n binary_mask_output = tf.floor(random_tensor_output)\n # 0 <-> 1 swap\n binary_mask_output_complement = tf.ones(tf.shape(h_prev)) \\\n - binary_mask_output\n\n # apply zoneout for cell\n if self.use_peepholes:\n c_temp = c_prev * \\\n tf.sigmoid(f + self.forget_bias +\n w_f_diag * c_prev) + \\\n tf.sigmoid(i + w_i_diag * c_prev) * \\\n self.activation(j)\n if self.is_training and self.zoneout_factor_cell > 0.0:\n c = binary_mask_cell * c_prev + \\\n binary_mask_cell_complement * c_temp\n else:\n c = c_temp\n else:\n c_temp = c_prev * tf.sigmoid(f + self.forget_bias) + \\\n tf.sigmoid(i) * self.activation(j)\n if self.is_training and self.zoneout_factor_cell > 0.0:\n c = binary_mask_cell * c_prev + \\\n binary_mask_cell_complement * c_temp\n else:\n c = c_temp\n\n if self.cell_clip is not None:\n c = tf.clip_by_value(c, -self.cell_clip, self.cell_clip)\n\n # apply zoneout for output\n if self.use_peepholes:\n h_temp = tf.sigmoid(o + w_o_diag * c) * self.activation(c)\n if self.is_training and self.zoneout_factor_output > 0.0:\n h = binary_mask_output * h_prev + \\\n binary_mask_output_complement * h_temp\n else:\n h = h_temp\n else:\n h_temp = tf.sigmoid(o) * self.activation(c)\n if self.is_training and self.zoneout_factor_output > 0.0:\n h = binary_mask_output * h_prev + \\\n binary_mask_output_complement * h_temp\n else:\n h = h_temp\n\n # apply prejection\n if self.num_proj is not None:\n w_proj = tf.get_variable(\n \"W_P\", [self.num_units, num_proj], dtype=dtype)\n\n h = tf.matmul(h, w_proj)\n if self.proj_clip is not None:\n h = tf.clip_by_value(h, -self.proj_clip, self.proj_clip)\n\n new_state = (tf.nn.rnn_cell.LSTMStateTuple(c, h)\n if self.state_is_tuple else tf.concat(1, [c, h]))\n\n return h, new_state\n\n\ndef _linear(args, output_size, bias, bias_start=0.0, scope=None):\n \"\"\"Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.\n Args:\n args: a 2D Tensor or a list of 2D, batch x n, Tensors.\n output_size: int, second dimension of W[i].\n bias: boolean, whether to add a bias term or not.\n bias_start: starting value to initialize the bias; 0 by default.\n scope: VariableScope for the created subgraph; defaults to \"Linear\".\n Returns:\n A 2D Tensor with shape [batch x output_size] equal to\n sum_i(args[i] * W[i]), where W[i]s are newly created matrices.\n Raises:\n ValueError: if some of the arguments has unspecified or wrong shape.\n \"\"\"\n if args is None or (isinstance(args, (list, tuple)) and not args):\n raise ValueError(\"`args` must be specified\")\n if not isinstance(args, (list, tuple)):\n args = [args]\n\n # Calculate the total size of arguments on dimension 1.\n total_arg_size = 0\n shapes = [a.get_shape().as_list() for a in args]\n for shape in shapes:\n if len(shape) != 2:\n raise ValueError(\n \"Linear is expecting 2D arguments: %s\" % str(shapes))\n if not shape[1]:\n raise ValueError(\n \"Linear expects shape[1] of arguments: %s\" % str(shapes))\n else:\n total_arg_size += shape[1]\n\n # Now the computation.\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [total_arg_size, output_size])\n if len(args) == 1:\n res = tf.matmul(args[0], matrix)\n else:\n res = tf.matmul(tf.concat(args, 1), matrix)\n if not bias:\n return res\n bias_term = tf.get_variable(\n \"Bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n return res + bias_term" }, { "alpha_fraction": 0.6937512159347534, "alphanum_fraction": 0.7004256248474121, "avg_line_length": 35.024391174316406, "blob_id": "4846da7ec37ca2ab6a7c1887d77b256b53fe6014", "content_id": "f62699df3f97104d5c9f3688f1f6c3f9d5254762", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10338, "license_type": "permissive", "max_line_length": 119, "num_lines": 287, "path": "/tacotron/models/modules.py", "repo_name": "gilmoore/VAE_Tacotron2", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nfrom tacotron.models.zoneout_LSTM import ZoneoutLSTMCell\nfrom tensorflow.contrib.rnn import LSTMBlockCell\nfrom hparams import hparams\nfrom tensorflow.contrib.rnn import GRUCell\nfrom tacotron.utils.util import shape_list\n\ndef VAE(inputs, input_lengths, filters, kernel_size, strides, num_units, is_training, scope):\n with tf.variable_scope(scope):\n outputs = ReferenceEncoder(\n inputs=inputs,\n input_lengths=input_lengths,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n is_training=is_training)\n\n mu = tf.layers.dense(outputs, num_units, name='mean')\n log_var = tf.layers.dense(outputs, num_units, name='vari')\n std = tf.exp(log_var)\n z = tf.random_normal(shape=[tf.shape(mu)[0], num_units], mean=0.0, stddev=1.0)\n output = mu + z * std\n return output, mu, log_var\n\ndef ReferenceEncoder(inputs, input_lengths, filters, kernel_size, strides, is_training, scope='reference_encoder'):\n with tf.variable_scope(scope):\n reference_output = tf.expand_dims(inputs, axis=-1)\n for i, channel in enumerate(filters):\n reference_output = conv2d(reference_output, channel, kernel_size,\n strides, tf.nn.relu, is_training, 'conv2d_{}'.format(i))\n\n shape = shape_list(reference_output)\n reference_output = tf.reshape(reference_output, shape[:-2] + [shape[2] * shape[3]])\n\n #GRU\n encoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n cell=GRUCell(128),\n inputs=reference_output,\n sequence_length=input_lengths,\n dtype=tf.float32\n )\n return encoder_state\n\n\ndef conv1d(inputs, kernel_size, channels, activation, is_training, scope):\n\tdrop_rate = hparams.tacotron_dropout_rate\n\n\twith tf.variable_scope(scope):\n\t\tconv1d_output = tf.layers.conv1d(\n\t\t\tinputs,\n\t\t\tfilters=channels,\n\t\t\tkernel_size=kernel_size,\n\t\t\tactivation=None,\n\t\t\tpadding='same')\n\t\tbatched = tf.layers.batch_normalization(conv1d_output, training=is_training)\n\t\tactivated = activation(batched)\n\t\treturn tf.layers.dropout(activated, rate=drop_rate, training=is_training,\n\t\t\t\t\t\t\t\tname='dropout_{}'.format(scope))\n\n\ndef conv2d(inputs, filters, kernel_size, strides, activation, is_training, scope):\n with tf.variable_scope(scope):\n conv2d_output = tf.layers.conv2d(\n inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='same')\n\n batch_norm_output = tf.layers.batch_normalization(\n conv2d_output, training=is_training, name='batch_norm')\n if activation is not None:\n conv2d_output = activation(batch_norm_output)\n\n return conv2d_output\n\nclass EncoderConvolutions:\n\t\"\"\"Encoder convolutional layers used to find local dependencies in inputs characters.\n\t\"\"\"\n\tdef __init__(self, is_training, kernel_size=(5, ), channels=512, activation=tf.nn.relu, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control dropout\n\t\t\tkernel_size: tuple or integer, The size of convolution kernels\n\t\t\tchannels: integer, number of convolutional kernels\n\t\t\tactivation: callable, postnet activation function for each convolutional layer\n\t\t\tscope: Postnet scope.\n\t\t\"\"\"\n\t\tsuper(EncoderConvolutions, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.kernel_size = kernel_size\n\t\tself.channels = channels\n\t\tself.activation = activation\n\t\tself.scope = 'enc_conv_layers' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\tx = inputs\n\t\t\tfor i in range(hparams.enc_conv_num_layers):\n\t\t\t\tx = conv1d(x, self.kernel_size, self.channels, self.activation,\n\t\t\t\t\tself.is_training, 'conv_layer_{}_'.format(i + 1)+self.scope)\n\t\treturn x\n\n\nclass EncoderRNN:\n\t\"\"\"Encoder bidirectional one layer LSTM\n\t\"\"\"\n\tdef __init__(self, is_training, size=256, zoneout=0.1, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control zoneout\n\t\t\tsize: integer, the number of LSTM units for each direction\n\t\t\tzoneout: the zoneout factor\n\t\t\tscope: EncoderRNN scope.\n\t\t\"\"\"\n\t\tsuper(EncoderRNN, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.size = size\n\t\tself.zoneout = zoneout\n\t\tself.scope = 'encoder_LSTM' if scope is None else scope\n\n\t\t#Create LSTM Cell\n\t\tself._cell = ZoneoutLSTMCell(size, is_training,\n\t\t\tzoneout_factor_cell=zoneout,\n\t\t\tzoneout_factor_output=zoneout)\n\n\tdef __call__(self, inputs, input_lengths):\n\t\twith tf.variable_scope(self.scope):\n\t\t\toutputs, (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\tself._cell,\n\t\t\t\tself._cell,\n\t\t\t\tinputs,\n\t\t\t\tsequence_length=input_lengths,\n\t\t\t\tdtype=tf.float32)\n\n\t\t\treturn tf.concat(outputs, axis=2) # Concat and return forward + backward outputs\n\n\nclass Prenet:\n\t\"\"\"Two fully connected layers used as an information bottleneck for the attention.\n\t\"\"\"\n\tdef __init__(self, is_training, layer_sizes=[256, 256], activation=tf.nn.relu, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is in training or inference to control dropout\n\t\t\tlayer_sizes: list of integers, the length of the list represents the number of pre-net\n\t\t\t\tlayers and the list values represent the layers number of units\n\t\t\tactivation: callable, activation functions of the prenet layers.\n\t\t\tscope: Prenet scope.\n\t\t\"\"\"\n\t\tsuper(Prenet, self).__init__()\n\t\tself.drop_rate = hparams.tacotron_dropout_rate\n\n\t\tself.layer_sizes = layer_sizes\n\t\tself.is_training = is_training\n\t\tself.activation = activation\n\t\t\n\t\tself.scope = 'prenet' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\tx = inputs\n\n\t\twith tf.variable_scope(self.scope):\n\t\t\tfor i, size in enumerate(self.layer_sizes):\n\t\t\t\tdense = tf.layers.dense(x, units=size, activation=self.activation,\n\t\t\t\t\tname='dense_{}'.format(i + 1))\n\t\t\t\t#The paper discussed introducing diversity in generation at inference time\n\t\t\t\t#by using a dropout of 0.5 only in prenet layers (in both training and inference).\n\t\t\t\tx = tf.layers.dropout(dense, rate=self.drop_rate, training=True,\n\t\t\t\t\tname='dropout_{}'.format(i + 1) + self.scope)\n\t\treturn x\n\n\nclass DecoderRNN:\n\t\"\"\"Decoder two uni directional LSTM Cells\n\t\"\"\"\n\tdef __init__(self, is_training, layers=2, size=1024, zoneout=0.1, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is in training or inference to control zoneout\n\t\t\tlayers: integer, the number of LSTM layers in the decoder\n\t\t\tsize: integer, the number of LSTM units in each layer\n\t\t\tzoneout: the zoneout factor\n\t\t\"\"\"\n\t\tsuper(DecoderRNN, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.layers = layers\n\t\tself.size = size\n\t\tself.zoneout = zoneout\n\t\tself.scope = 'decoder_rnn' if scope is None else scope\n\n\t\t#Create a set of LSTM layers\n\t\tself.rnn_layers = [ZoneoutLSTMCell(size, is_training, \n\t\t\tzoneout_factor_cell=zoneout,\n\t\t\tzoneout_factor_output=zoneout) for i in range(layers)]\n\n\t\tself._cell = tf.contrib.rnn.MultiRNNCell(self.rnn_layers, state_is_tuple=True)\n\n\tdef __call__(self, inputs, states):\n\t\twith tf.variable_scope(self.scope):\n\t\t\treturn self._cell(inputs, states)\n\n\nclass FrameProjection:\n\t\"\"\"Projection layer to r * num_mels dimensions or num_mels dimensions\n\t\"\"\"\n\tdef __init__(self, shape=80, activation=None, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tshape: integer, dimensionality of output space (r*n_mels for decoder or n_mels for postnet)\n\t\t\tactivation: callable, activation function\n\t\t\tscope: FrameProjection scope.\n\t\t\"\"\"\n\t\tsuper(FrameProjection, self).__init__()\n\n\t\tself.shape = shape\n\t\tself.activation = activation\n\t\t\n\t\tself.scope = 'Linear_projection' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\t#If activation==None, this returns a simple Linear projection\n\t\t\t#else the projection will be passed through an activation function\n\t\t\toutput = tf.layers.dense(inputs, units=self.shape, activation=self.activation,\n\t\t\t\tname='projection_{}'.format(self.scope))\n\n\t\t\treturn output\n\n\nclass StopProjection:\n\t\"\"\"Projection to a scalar and through a sigmoid activation\n\t\"\"\"\n\tdef __init__(self, is_training, shape=hparams.outputs_per_step, activation=tf.nn.sigmoid, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, to control the use of sigmoid function as it is useless to use it\n\t\t\t\tduring training since it is integrate inside the sigmoid_crossentropy loss\n\t\t\tshape: integer, dimensionality of output space. Defaults to 1 (scalar)\n\t\t\tactivation: callable, activation function. only used during inference\n\t\t\tscope: StopProjection scope.\n\t\t\"\"\"\n\t\tsuper(StopProjection, self).__init__()\n\t\tself.is_training = is_training\n\t\t\n\t\tself.shape = shape\n\t\tself.activation = activation\n\t\tself.scope = 'stop_token_projection' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\toutput = tf.layers.dense(inputs, units=self.shape,\n\t\t\t\tactivation=None, name='projection_{}'.format(self.scope))\n\n\t\t\t#During training, don't use activation as it is integrated inside the sigmoid_cross_entropy loss function\n\t\t\tif self.is_training:\n\t\t\t\treturn output\n\t\t\treturn self.activation(output)\n\n\nclass Postnet:\n\t\"\"\"Postnet that takes final decoder output and fine tunes it (using vision on past and future frames)\n\t\"\"\"\n\tdef __init__(self, is_training, kernel_size=(5, ), channels=512, activation=tf.nn.tanh, scope=None):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tis_training: Boolean, determines if the model is training or in inference to control dropout\n\t\t\tkernel_size: tuple or integer, The size of convolution kernels\n\t\t\tchannels: integer, number of convolutional kernels\n\t\t\tactivation: callable, postnet activation function for each convolutional layer\n\t\t\tscope: Postnet scope.\n\t\t\"\"\"\n\t\tsuper(Postnet, self).__init__()\n\t\tself.is_training = is_training\n\n\t\tself.kernel_size = kernel_size\n\t\tself.channels = channels\n\t\tself.activation = activation\n\t\tself.scope = 'postnet_convolutions' if scope is None else scope\n\n\tdef __call__(self, inputs):\n\t\twith tf.variable_scope(self.scope):\n\t\t\tx = inputs\n\t\t\tfor i in range(hparams.postnet_num_layers - 1):\n\t\t\t\tx = conv1d(x, self.kernel_size, self.channels, self.activation,\n\t\t\t\t\tself.is_training, 'conv_layer_{}_'.format(i + 1)+self.scope)\n\t\t\tx = conv1d(x, self.kernel_size, self.channels, lambda _: _, self.is_training, 'conv_layer_{}_'.format(5)+self.scope)\n\t\treturn x" } ]
13
regondi96/oneyearofnothing
https://github.com/regondi96/oneyearofnothing
f03485be3811639090071e0a06e787f676e0f715
e96f3c06ce9c6aa4d5cea491e822466944c27068
0a7cc52e86448cf1a34bcd2e26459f2e6a990127
refs/heads/master
2021-02-11T13:37:51.277046
2020-04-22T23:51:41
2020-04-22T23:51:41
244,495,956
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.33114755153656006, "alphanum_fraction": 0.3967213034629822, "avg_line_length": 15.88888931274414, "blob_id": "4e42b953d9cc87ac2441d51b6a7a4ec36dfdb2e1", "content_id": "b78b10bdcb6986c22de022220bf727614d6d7920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 37, "num_lines": 18, "path": "/Chapter_two/SumaDeMatrices.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#SumaDeMatrices\n\nA = [[3, 0, -2], [2, -1, 4]]\nB = [[5, -3, 6], [1, 2, -5]]\n\nc = [[0, 0, 0], [0, 0, 0]]\n\n\n#for i in range(len(A)):\n #for j in range(len(B[1])):\n #c[i][j] += A[i][j] + B[i][j]\n\n\nfor i in range(len(A)):\n for j in range(len(A[0])):\n c[i][j] += A[i][j] + B[i][j]\n\nprint(c)\n\n" }, { "alpha_fraction": 0.597744345664978, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 18, "blob_id": "6892d6247572241c5d3efeaf3c393c6ecb9c84f5", "content_id": "beed81c06108bed0316f1edd43c96e3dde995312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 67, "num_lines": 14, "path": "/Chapter_one/Exercise 9.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 9\n\n#Input\n\nname = input(\"Introduce tu nombre: \\n\", )\n\nprint(\"Hola\", name, \"eres un pendeko\")\n\nday = input(\"Del 1 al 10, ¿cómo va tu día?: \\n\")\n\nprint(\"Tu día es una verdadera mierda :)\", day, \"es un mal número\")\n\nprint(name[0], name[1:3])\nprint(name[3:5])\n" }, { "alpha_fraction": 0.440559446811676, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 12.090909004211426, "blob_id": "e016dbeb3ce4d33806dd85f3d5d90feed0464d03", "content_id": "db981eb373e1be78a12014274d609d61c9d9d537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/Chapter_three/Exercise 6.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 6\n\nmaximum = 0\n\nnumber = [4, 2, 7, 3, 5, 8, 11, 2, 6, 0, 4]\n\nfor i in number:\n if i > maximum:\n maximum = i\n\nprint(maximum)" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6938775777816772, "avg_line_length": 18.799999237060547, "blob_id": "4b528ddff85e5c24188814ab109b35d4b15979a4", "content_id": "852c78fb76afd48673a9c4a19dbbb25cfcf313b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/Chapter_three/Exercise 4.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 4 if __name__ = '__main__' statement\n\nfrom mainfunction import result\n\nprint(result * 2)" }, { "alpha_fraction": 0.4351145029067993, "alphanum_fraction": 0.49618321657180786, "avg_line_length": 19.076923370361328, "blob_id": "14d51de0b883fa00c4f88e8778dd40420dcbe88f", "content_id": "b06755ecc0b994154627438d578d1f738b340fe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/Chapter_three/Exercise 7.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 7\n\nl = [5, 8, 1, 3, 2, 1, 2, 11, 14]\nstill_swapping = True\n\nwhile still_swapping:\n still_swapping = False\n for i in range(len(l) - 1):\n if l[i] > l[i+1]:\n l[i], l[i+1] = l[i+1], l[i]\n still_swapping = True\n\nprint(l)\n\n" }, { "alpha_fraction": 0.6719492673873901, "alphanum_fraction": 0.6957210898399353, "avg_line_length": 20.066667556762695, "blob_id": "840a9be46f6ff7784a1f1051bdf3576493f35947", "content_id": "70aa0f2e8c38d1774965279441f1f2f1bc919182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 51, "num_lines": 30, "path": "/Chapter_two/Exercise 8.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 8\n\nitems = ['apple', 'orange', 'banana']\nquantity = [5, 3, 2]\n\norders = zip(items, quantity)\n#print(list(orders))\n\n#We can also turn a zip() object into a dictionary\n\norders1 = zip(items, quantity)\n#print(dict(orders1))\n\norders2 = {\n 'apple': 5,\n 'orange': 3,\n 'banana': 2\n}\n\n#Obtener los valores del diccionario\nprint(orders2.values())\n#Obtener la lista de de los valores del diccionario\nprint(list(orders2.values()))\n#Obtener las keys del diccionario en forma de lista\nprint(list(orders2.keys()))\n#Convertir en tuples\nprint(list(orders2.items()))\n#Iterar los tuples\nfor i in list(orders2.items()):\n print(i)" }, { "alpha_fraction": 0.6232876777648926, "alphanum_fraction": 0.6671232581138611, "avg_line_length": 20.878787994384766, "blob_id": "53357f29459f4394bcb7f382a20e8c63e3b0c4c4", "content_id": "702f10355359b44fafb78683af6f95f5918c7dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/Chapter_one/Exercise 13.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 13\n\n\"\"\"\nLoops\n\"\"\"\n\n#There are three key components to most loops:\n#1. The star of the loop\n#2. The end of the loop\n#3. The increment between numbers in the loop\n\n#Python distinguishes between two fundamentals kinds of loops:\n#1. While loops\n#2. For loops\n\n#While Loops:\n#In a whle loop, a designated segment of code repeats provided\n#a particular conditions is true, when te condition evaluates\n#to false, the while loops stop running.\n\ni = 1 \nwhile i <= 10:\n print(i)\n i += 1\n\n#Find the first number greater than 100 that is divisible by 17\nx = 100\nwhile x <= 1000:\n x += 1\n if x % 17 == 0:\n print( x, 'is the first number greater than 100 '\n 'that is divisible by 17.')\n break\n " }, { "alpha_fraction": 0.3447204828262329, "alphanum_fraction": 0.44720497727394104, "avg_line_length": 14.380952835083008, "blob_id": "f703cf67ecac33a3f2fc4ad18b38f48c3d0775e8", "content_id": "e48286576f5206a4bd7549759e5aaecb91f612d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 74, "num_lines": 21, "path": "/Chapter_one/Exercise 11.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 11\nage = 20\n\nprint(age < 13)\n\nprint(age >= 20 and age <= 21)\n\nprint(age != 21)\n\nprint(age == 21)\n\nprint(6 == 6.0)\n\nprint((age >= 20 and age < 30) or (age >= 30 and age < 40))\n\nprint(20 <= age < 30) or (30 <= age < 40)\n\nprint(\"------------------------------------------------- \\n \\t \\t \\t Str\")\n\n\nprint('a' < 'c')" }, { "alpha_fraction": 0.6532257795333862, "alphanum_fraction": 0.6693548560142517, "avg_line_length": 23.799999237060547, "blob_id": "4828af62875f16d12b79eed7c35e5409a9d66ed3", "content_id": "ae4678c5c408ce18e3426a887a41b797072879af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 64, "num_lines": 10, "path": "/Chapter_three/mainfunction.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "result = 0\n\nfor n in range(0, 11):\n result += n\n\n#Sirve para que en caso de que este script se corra, imprima\n#directamente el resulto, y si se llama desde otro lado, obtenga\n#el valor de la variable\nif __name__ == '__main__':\n print(result)\n" }, { "alpha_fraction": 0.5985401272773743, "alphanum_fraction": 0.6532846689224243, "avg_line_length": 14.277777671813965, "blob_id": "381bf33ab3594af261e3e8ad134ea4680a30909d", "content_id": "eb25d35419f331caed1fd04ca61d79b67d7adfdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 36, "num_lines": 18, "path": "/Chapter_two/Exercise 5.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 5\n\nshopping = ['bread', 'milk', 'eggs']\n\nprint(shopping)\n\nlist1 = [1, 2, 3]\nlist2 = [4, 5, 6]\nfinal_list = list1 + list2\nprint(final_list)\n\nlist3 = ['oi']\nprint(list3 * 3)\n\nshopping.append('apple')\nshopping.append('roses')\nshopping.insert(0, 'ham')\nprint(shopping)" }, { "alpha_fraction": 0.5234795808792114, "alphanum_fraction": 0.5458044409751892, "avg_line_length": 25, "blob_id": "92d1a8779b96036818ed0509cb532197f44bdf23", "content_id": "de440902fbab3afb40ddc91b50e427fa8efa837c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 69, "num_lines": 50, "path": "/Chapter_one/Exercise 17.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 17 \n\n\"\"\"\nFor loops\n\"\"\"\n#for Keyword often goes with the in keyword (for, in) the variable\n#var i is generic. The phrase , <<for i in>> means that Python\n#is going to check what comes next and look at its individual \n#components\n\nprint('-------------------------------------------')\nfor i in 'Portland':\n print(i)\n\nprint('-------------------------------------------')\nfor i in range(1, 10, 2):\n print(i)\n\nprint('-------------------------------------------')\nfor i in range(10, 1, -1):\n print(i)\n\nprint('-------------------------------------------')\nname = 'Alfonso'\nfor i in range(1):\n for i in name:\n print(i)\n\n\n\n#continue is another Python keyword designed for loops. When python\n#reaches continue kewyword, it stops the code and goes back to\n#the beginning of the loop. Is similar to break\n#In this example the continue keyword is eliminating numbers\n#that are not prime\n#Comienza con 10, evalúa la condición if, como el resto de 10 es\n#0, entonces aplica el método continue, y reinicia el loop sin contar\n#esa evaluación\nprint('-------------------------------------------')\n\nfor num in range(10,100):\n if num % 2 == 0:\n continue\n if num % 3 == 0:\n continue\n if num % 5 == 0:\n continue\n if num % 7 == 0:\n continue\n print(num)" }, { "alpha_fraction": 0.7288135886192322, "alphanum_fraction": 0.7348668575286865, "avg_line_length": 29.629629135131836, "blob_id": "b0d5be710fe480a7d90d688e20e743efc9fe175d", "content_id": "214c501c10e8d4352b2c571f6ca33465dc3ecf43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 93, "num_lines": 27, "path": "/Chapter_two/Exercise 9.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 9 \n\n#Los objetos tipo tuple son similares a la listas, sin embargo estas\n#no pueden se cambiadas. Son secuencias inmutables, cuyos valores\n#no pueden ser cambiados una vez son iniciados. Sirven para representar \n#eventos únicos o valores únicos cmos días de la semana\n\nweekly_tuple = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\n\n#No se pueden añadir elementos al tuple, pero sí se puede concatenar\n\nprint(weekly_tuple + ('Diciembre', 'Enero'))\nprint(weekly_tuple)\n\n#Se pueden añadir variables de todo tipo\n\nt_mixed = ('apple', True, 3)\nprint(t_mixed)\n\nt_shopping = (('apple', 3), ('orange', 2), ('banana', 5))\nprint(t_shopping)\n\n#Al igual que los diccionarios, soportan tuples anidadas y pueden\n#declararse variables sin usar paréntesis\n\nt_nest = t_shopping + t_mixed\nprint(t_nest)" }, { "alpha_fraction": 0.5451011061668396, "alphanum_fraction": 0.5785381197929382, "avg_line_length": 22.381818771362305, "blob_id": "2fb66d9878d4f12776bb8e975705180ee0ea2a6e", "content_id": "dcb1dd32aa6b02705622276824062537429b90c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 68, "num_lines": 55, "path": "/Chapter_two/Matriz_elementos.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Arreglo\n\nm = [[11, 12, 13, 14, 15],\n [7, 8, 9, 0]]\n\n#Loop para imprimir cada elemento \n\nfor i in range(len(m)):\n for j in range(len(m[i])):\n print(m[i][j])\n\nprint(\"\\n------------------------------- \\n\")\n\n#Para contar cada elemento del arreglo es necesario\n#contar la cantidad de elementos que este contiene\n#En este ejemplo contiene 9 elementos.\n\n#Primero se cuentan los elementos que contiene el arreglo\n#(m = [1],[2]) y los ordena por índice (0, 1)\n\nfor i in range(len(m)):\n print(i)\n\nprint(\"\\n------------------------------- \\n\")\n\n#Aquí se cuentan los elementos de cada arreglo individual que\n#está en cada arreglo. La lista que está en el índice 0\n#contiene 5 elementos, la siguiente lista en el índice 1 contiene 4\n#Es por eso que en la primera iteración i = 5 y en la segunda, i = 4\n\nfor j in range(len(m[i])):\n print(j)\n\nprint(\"\\n------------------------------- \\n\")\n\n#Ahora se juntan los elemntos de cada loop para ubicar\n# valores correspondientes, de tal modo\n#que, todos los elementos del sean contados por su ubicación\n# en el arreglo, el output es:\n# (0 0)\n# (0 1)\n# (0 2)\n# (0 3)\n# (0 4)\n# (1 0)\n# (1 1)\n# (1 2)\n# (1 3)\n\nfor i in range(len(m)):\n for j in range(len(m[i])):\n print(i, j)\n\n\nprint(\"\\n------------------------------- \\n\")\n" }, { "alpha_fraction": 0.6561086177825928, "alphanum_fraction": 0.6787330508232117, "avg_line_length": 17.5, "blob_id": "666dd24825e8befb77c2453093e4dadda06bec11", "content_id": "e81aa4c0965aa02cedc14b1eef6e828d4dc2312d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/Chapter_three/Exercise 5.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 5 \n\"\"\" This script print the actual time \"\"\"\n\nimport datetime\nimport time\n\ncurrent_time = datetime\n\nfor i in range(0, 10):\n current_time = datetime.datetime.now()\n time.sleep(1) \n print(current_time)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5612244606018066, "avg_line_length": 8.380952835083008, "blob_id": "ab1bddb33fcefe57d3b98aaacf725ee301210dc9", "content_id": "ad11d9aa2205762d7c0a4dfdef92e58d9fd5ea1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 35, "num_lines": 21, "path": "/Chapter_one/Exercise 4.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "\"\"\"\nidk what im doing with my life \n\n\"\"\"\n\n#Exercise 5\nx = 5\ny = 2\n\nprint(x + x - y ** 2)\n\n#Assign in one line\n\nx, y = 8, 5\n\nprint (x // y)\n\n# Set the variable pi equal to 3.14\npi = 3.14\n\nprint(pi)" }, { "alpha_fraction": 0.4864864945411682, "alphanum_fraction": 0.5115830302238464, "avg_line_length": 17.428571701049805, "blob_id": "f0b28ade798f87b5b4847b22438a6ac1c424ff93", "content_id": "9d1fb4ef947fb068683db057076aac697ed460d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 38, "num_lines": 28, "path": "/Chapter_two/Exercise 7.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 7\n\nemployees = []\nemployee1 = {\n 'Name': 'John Mckee',\n 'Age': 38,\n 'Deparment': 'Sales'\n}\nemployee2 = {\n 'Name': 'Lisa Crawford',\n 'Age': 29,\n 'Deparment': 'Marketing'\n} \nemployee3 = {\n 'Name': 'Susan Patel',\n 'Age': 33,\n 'Deparment': 'HR'\n}\n\nemployees.append(employee1)\nemployees.append(employee2)\nemployees.append(employee3)\n\nfor i in range(len(employees)):\n print('----------------------')\n for j in employees[i]:\n print(j, ':', employees[i][j])\nprint('----------------------')\n\n\n" }, { "alpha_fraction": 0.6328358054161072, "alphanum_fraction": 0.674626886844635, "avg_line_length": 17.66666603088379, "blob_id": "fc49f2c31d51ece8e183e4f0e1d8dee06fc59525", "content_id": "29c7568b407784fb6c5bf3dd0a0e11aacd080402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 56, "num_lines": 18, "path": "/Chapter_one/exercise 5.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "\"\"\"\nPythagorean distance between 3 points\n\"\"\"\n\n#Pythagorean distance is the distance between two points\n#The formula for get the distance is: \n#Distance = sqrt((x1-x0)^2)+((y1-y0)^2)\n#For 3 points is the same, but added the third variable\n\nimport math\n\nx = 2\ny = 3\nz = 4\n\ndistance = math.sqrt(x ** 2 + y ** 2 + z ** 2)\n\nprint(distance)" }, { "alpha_fraction": 0.41874998807907104, "alphanum_fraction": 0.46041667461395264, "avg_line_length": 24.3157901763916, "blob_id": "040d2822bf1ee7fb1c85c77381c781f222b84536", "content_id": "69676cea60d9f5b45c033b691a733dee99724c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/Chapter_one/Exercise 14.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 14\n\n\"\"\"\nLeast Common Multiple for integers\n\"\"\"\n\nprint(\"--------------------------------------------\".center(60))\nprint(\"This program calculate the Least Common Multiple\".center(60))\nprint(\"--------------------------------------------\".center(60), \"\\n \\n\")\n\nx = int(input(\"Give a value for x: \"))\ny = int(input(\"Give a value for y: \"))\ni = 1\n\nwhile i <= 10000000:\n i += 1\n if (i % x == 0 and i % y == 0):\n print(\"LCM of\", x, \"and\", y, \"is:\", i)\n break" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.7395833134651184, "avg_line_length": 18.399999618530273, "blob_id": "7528eb2e1e809dc684df737c53ff345b200da7df", "content_id": "9304b6a81667358974be1a73046ebe268eecf33e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/Chapter_three/Exercise 2.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 2. Creando mi primer modulo\n\nfrom my_module import compute\n\nprint(compute([5, 7, 11]))" }, { "alpha_fraction": 0.5657708644866943, "alphanum_fraction": 0.608203649520874, "avg_line_length": 17.605262756347656, "blob_id": "a9f4ef8d6defb899d0dc150c182bd6d14cc16c85", "content_id": "87e4ecf686f7ccef0ad8fedc8e968842d9c54b3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 49, "num_lines": 38, "path": "/Chapter_two/Exercise 6.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 6 \n\nemployee = {\n 'name': 'Jack Nelson',\n 'age': 32,\n 'deparment': 'sales'\n}\n\nprint(employee)\n\nmovie = {\n 'Title': 'The Godfather',\n 'Director': 'Francis Ford Cappola',\n 'Year': 1972,\n 'Rating': 9.2\n}\n\n#Uptade dictinary value\n\nmovie['Rating'] = (movie['Rating'] + 9.3)/2\n\nprint(movie['Rating'])\n\n#It can be construct from a empty dictionary\n\nmovie2 = {}\nmovie2['Title'] = 'The Grandfather'\nmovie2['Director'] = 'Francis Ford Coppola'\nmovie2['Year'] = 1972\nmovie2['Rating'] = 9.2\nmovie2['Actors'] = ['Marlon Brando', 'Al Pacino', 'James Caan']\nmovie2['other_details'] = {\n 'Runtime': 175,\n 'Lenguage': 'English'\n}\nprint(movie2)\n#Acceder al diccionario dentro del diccionario\nprint(movie2['other_details']['Lenguage'])\n" }, { "alpha_fraction": 0.6607999801635742, "alphanum_fraction": 0.7088000178337097, "avg_line_length": 24.95833396911621, "blob_id": "c6d588a2885fe45f25325b2b3312d30c7988b446", "content_id": "dd994411129ad20d357045803dd9bb6c5b15f7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 99, "num_lines": 24, "path": "/Chapter_two/Exercise 10.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 10\n\n#Además del diccionario, listas y tuples, existe otro tipo de \n#estructura de datos: sets(conjuntos). Este tipo de datos son una colección\n#de únicos y mutables objetos que soportan operaciones matemáticas\n#de la teoría de conjuntos.\n\n#Un conjunto o set, es na colección de objetos que son llamados, elementos o miembros del conjunto.\n#Por ejemlo un conjunto de números pares e impares.\nodds = set([9, 3, 1, 6, 6, 9, 1])\nevens = {2, 4, 6 ,8, 10}\n\nprint(evens)\nprint(odds)\nprint(type(evens))\n\ns3 = set([3,4,5,6,6,6,1,1,2])\nprint(s3)\n\ns4 = {'Apple', 'Orange', 'Banana'}\nprint(s4)\ns4.add('Pineapple')\n\nprint(s4)\n\n\n" }, { "alpha_fraction": 0.6462128758430481, "alphanum_fraction": 0.6644295454025269, "avg_line_length": 23.83333396911621, "blob_id": "2c3b28848892f887844720963349f770c5ce5db0", "content_id": "62613948603e9d5b078d1d55ada74577214da829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 63, "num_lines": 42, "path": "/Chapter_two/Exercise 1.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 1 Data structures\n\n#List is a type of container in Python that is used to store\n#multiple data at the same time. is like a array.\n#Each element in the list has its own distinct position\n#and index. \ntodo = ['Pick up laundry', 'buy Groceries', 'pay bills']\n\nshopping = [\"bread\", \"milk\", \"eggs\"]\n\n#Dictionary\n\nuser = {\n \"first_name\": \"Alfonso\",\n \"last_name\": \"Aguilar\",\n \"age\": 23,\n \"email\": \"[email protected]\"\n}\n\n#There are four types of data structures in Python\n#List, Tuple, Dictionary and Set\n\nmixed = [365, \"days\", True]\n\n\n#Most of the data we store in the real world is in the form\n#of a tabular data table, that is, rows and columns\n#instead of a one-dimensional flat list.\n#This kind of tables are calle matrices or dimensionals arrays.\n\n#Nested list, this is how we can do matrices. [row][column] \n\nm = [[1, 2, 3], \n [4, 5, 6]]\n\n#This print the value of row 2, column 2 which is 5.\n#Remember: 0 is the based index offset.\n#print(m[1][1])\n\nfor i in range(len(m)):\n for j in range(len(m[0])):\n print(m[i][j])\n" }, { "alpha_fraction": 0.5591985583305359, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 25.14285659790039, "blob_id": "440b149484f132afaf386664225d0562b7f8198c", "content_id": "84ccc88861c793c5aabbbf242a427e3580429971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/Chapter_two/Exercise 2.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 2 \n\nemployees = [\n['John Mckee', 38, 'Sales'], \n['Lisa Crawford', 29, 'Marketing'], \n['Susan Patel', 33, \"HR\"]\n]\n\nnumber_employee = input('Introduce a number: ')\nnumber_employee = int(number_employee) - 1\nif number_employee >= 0:\n print('Name:', employees[number_employee][0], \n '\\nAge:', employees[number_employee][1], \n '\\nDeparment:', employees[number_employee][2])\n\nelse:\n for employee in employees:\n print(\"\\nName:\", employee[0])\n print(\"Age:\", employee[1])\n print(\"Department:\", employee[2])\n print('-' * 20)\n" }, { "alpha_fraction": 0.6512096524238586, "alphanum_fraction": 0.7177419066429138, "avg_line_length": 26.55555534362793, "blob_id": "84e65fdeb2a36cdf14bdd0e4d550131b2943f6f1", "content_id": "de172c4b87b82fae585e9764f454b9459e21e47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 87, "num_lines": 18, "path": "/Chapter_two/Consumo.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Consumo\nConsumo_básico = 0.831\nConsumo_intermedio = 1.006\nConsumo_excedente = 2.941\n\nConsumo_básico_dic = 0.853\nConsumo_intermedio_dic = 1.031\t \nConsumo_excedente_dic = 3.018\n\nperbasic = ((Consumo_básico_dic - Consumo_básico)/Consumo_básico_dic) * 100\n\nperinter = ((Consumo_intermedio_dic - Consumo_intermedio)/Consumo_intermedio_dic) * 100\n\nperexc= ((Consumo_excedente_dic - Consumo_excedente)/Consumo_excedente_dic) * 100\n\nprint(perbasic, '%')\nprint(perinter, '%')\nprint(perexc, '%')\n" }, { "alpha_fraction": 0.5983379483222961, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 12.923076629638672, "blob_id": "07fecaee5abd07dfa2e92800cfc1561a4404f3c7", "content_id": "ee825fe8ba0c8951fe283b370ad3b102c1e612f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/Chapter_two/Exercise 11.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 11\n\n#Teoría de conjuntos\n\ns1 = {1, 2, 3, 4}\ns2 = {3, 4, 5, 6}\n\n#Operación unión\nprint(s1.union(s2))\n\n#Operación intersección\nprint(s1.intersection(s2))\n\n#Operación diferencia\nprint(s1.difference(s2))\n\n#Revisar si un conjunto contiene un subconjunto de otro\n\n#False\nprint(s1.issubset(s2))\n\ns3 = {1, 2, 3}\ns4 = {1, 2, 3, 4}\n\n#True\nprint(s3.issubset(s4))" }, { "alpha_fraction": 0.45576706528663635, "alphanum_fraction": 0.4916013479232788, "avg_line_length": 18.866666793823242, "blob_id": "4a15f8be9ad18199bd50d300b7f126d42390a14f", "content_id": "660053139380081e462c768823af6ffbb2661527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "no_license", "max_line_length": 58, "num_lines": 45, "path": "/Chapter_one/Exercise 12.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 12\n\n\"\"\"\nConditionals\n\"\"\"\n\nage = 20\n\n#Forma 1\n\nif age < 18:\n print('You aren\\'t old enough to vote.')\nelse:\n print('You can vote :)')\n\n#Forma 2\nprint(\"-------------------------------------------------\")\n\nif age >= 18 and age < 21:\n print('At least you can vote.')\n print('Poker will have to wait.')\n\nprint(\"-------------------------------------------------\")\n\nif age >= 18:\n print('You can vote.')\n if age >= 21:\n print('Also, you can play poker')\n\nprint(\"-------------------------------------------------\")\n\nage2 = int(input('Introduce your age: ', ))\n\nif age2 <= 10:\n print('Listen, learn, and have fun.')\nelif age2 <= 19:\n print('Go fearlessly forward.')\nelif age2 <= 29:\n print('Seize the day.')\nelif age2 <= 39:\n print('Go for what you want.')\nelif age2 <= 59:\n print('Stay physically and healthy.')\nelse:\n print('Each day is magical.')" }, { "alpha_fraction": 0.7292418479919434, "alphanum_fraction": 0.7545126080513, "avg_line_length": 16.375, "blob_id": "2ab22d322720ec53d6af960225172efac7c1e603", "content_id": "481ee150c8d86e54ef378fc4a10c87ca76808a72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 39, "num_lines": 16, "path": "/Chapter_three/Exercise 3.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 3\n\nimport math\n\nprint(math.exp(2))\n\n#También se puede importar desde inicio\n\nfrom math import exp\n#tamebién se puede importar todo\n#from math import *\n#para renombrar los módulos del import:\n#from math import exp as exponencial\n#exponencial(2): 7.38...\n\nprint(exp(2))" }, { "alpha_fraction": 0.25459039211273193, "alphanum_fraction": 0.25953391194343567, "avg_line_length": 29.12765884399414, "blob_id": "123bcb7c248fcf29beb1780a31d9d79e4d678af0", "content_id": "f7d8f3c9f244e8a006eb8b5fdc05fda54feb797e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2858, "license_type": "no_license", "max_line_length": 84, "num_lines": 94, "path": "/Chapter_one/Exercise 18.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 18\n\n\"\"\"\nBuilding a simple conversational bot\n\"\"\"\n\nprint(\"\"\"\n-----------------------------------------------------------------\n Hola, bienvenido al mundo acuático de Members Choice :)\n-----------------------------------------------------------------\n\n\n ¿Cuál es tu nombre?\n\"\"\")\n\nname = input()\n\nprint(\"\"\"\n\n-----------------------------------------------------------------\n Bienvenido\"\"\", name.capitalize(), \"\"\"\n-----------------------------------------------------------------\n\"\"\") \n\nprint(\"\"\"\n-----------------------------------------------------------------\n Imagino debes estar muy cansado,\n ¿qué te parece una bebida de fresa? \n\n\n\n\n 1. Sí 2. No \n-----------------------------------------------------------------\n\"\"\")\n\nbebida = int(input())\n\n\ncon_bebida = (\"\"\"\n-----------------------------------------------------------------\n Seguro que sí campeón, ya sabía, esta\n bebida va por la casa.\n----------------------------------------------------------------- \n\"\"\")\n\nsin_bebida = (\"\"\"\n-----------------------------------------------------------------\n No hay problema hermano, seguro por ahí\n habrá quién sí la quiera. UwU\n----------------------------------------------------------------- \n\"\"\")\n\nif bebida == 1:\n print(con_bebida)\nelif bebida == 2:\n print(sin_bebida)\n\n\nprint(\"\"\"\n-----------------------------------------------------------------\n En la escala del 1 al 10, ¿qué tan bien te sientes hoy?\n-----------------------------------------------------------------\n\"\"\")\n\nsentiment = int(input())\n\n\nif sentiment <= 3:\n print('''\n-----------------------------------------------------------------\n Ánimo''', name.capitalize(), '''vendran días mejores\n-----------------------------------------------------------------\n ''')\nelif sentiment <= 6:\n print('''\n-----------------------------------------------------------------\n Hay días buenos, y hay días malos, a veces'\n sólo hay días, ¿no es así''', name.capitalize(),'''? \n-----------------------------------------------------------------\n ''')\nelif sentiment <= 9:\n print('''\n-----------------------------------------------------------------\n Eso es todo''', name.capitalize(), '''la actitud es el mejor aliado.\n-----------------------------------------------------------------\n ''')\nelif sentiment == 10:\n print('''\n-----------------------------------------------------------------\n Pensé que era el único con este ánimo''', name.capitalize(), \n '''brincando, saltando...\n-----------------------------------------------------------------\n ''')\n" }, { "alpha_fraction": 0.703041136264801, "alphanum_fraction": 0.7137746214866638, "avg_line_length": 20.461538314819336, "blob_id": "9ab9c1b84c09559057715a67597708088b26d864", "content_id": "fa24c73e401445e9bc32b879c2ff2e36952393a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/Chapter_one/Exercise 7.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 7\n\n#Comma separators \n\nfrom cryptography.x509 import name\n\nitalian_greeting = \"ciao\"\n\nprint(\"should we greet people with\", italian_greeting, \"in North Beach?\")\n\n#Format\n\nowner = \"Aguilar Alfonso\"\nage = \"23\"\n\nprint(\"The founder of City titikaka, {}, is now {} years \\n\" \n\"old.\".format(owner, age))\n\n#The forma worsk as follows. Define variables, next in the\n#string, use {} in place of the variable. At the end of the\n#string, add a dot (.) followed by the format keyword.\n\n\n#Len function for determines the number of charaters\n\nprint(len(italian_greeting))\n\n" }, { "alpha_fraction": 0.8048780560493469, "alphanum_fraction": 0.8048780560493469, "avg_line_length": 12.666666984558105, "blob_id": "ad7e29f5314fce7338bef6a9d6ab1cd8cb0ef4b5", "content_id": "7b96555710d144106da41a22589869ac9ba3f96d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/README.md", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "# oneyearofnothing\n\nThis is my exercises\n" }, { "alpha_fraction": 0.728038489818573, "alphanum_fraction": 0.7292418479919434, "avg_line_length": 23.47058868408203, "blob_id": "ed31570fce0d901286f4c8a4623b5bbfbbf4cae4", "content_id": "00dede452562262e0c6837a86e889c6d264a4c87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 69, "num_lines": 34, "path": "/Chapter_one/Exercise 6.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 6\n\n\"\"\"\nStrings\n\"\"\"\n\nprint('Single quoted string with \" is no problem')\n\nprint('Single quoted string containing \\' is OK with backslash')\n\nprint(\"Double quoted string with ' is no problem\")\n\nprint(\"Double quoted string containing \\\" is OK with backslash\")\n\nprint('Backslash before \"n\", as in \\n, inserts a new line character')\n\nprint(r'Prefixed by \"r\" the \\n no longer inserts a new line')\n\nprint('''This string literal\nhas more than one\nline''')\n\nprint(\"\"\"This string literal\nalso has more than one\nline\"\"\")\n\nvacation_note = '''\nDuring our vacation to San Francisco, we waited in a long line by \nPowell St. Station to take the cable car. Tap dancers performed on \nwooden boards. By the time our cable car arrived, we started looking \nonline for a good place to eat. We're heading to North Beach.\n'''\n\nprint(vacation_note)" }, { "alpha_fraction": 0.2928176820278168, "alphanum_fraction": 0.37016573548316956, "avg_line_length": 14.863636016845703, "blob_id": "37d454580778b867123d968c8982cd517a95e22a", "content_id": "52e92e592e8e464a49ac485282fc931fab6b8442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 50, "num_lines": 22, "path": "/Chapter_two/Exercise 4.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 4\n\nx = [[1, 2],\n [3, 4], \n [5, 6]]\n\ny = [[1, 2, 3, 4], \n [5, 6, 7, 8]]\n\nresultado = [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]\n\n#Multiplicar \n\nfor i in range(len(x)):\n for j in range(len(y[0])):\n for k in range(len(y)):\n resultado[i][j] += (x[i][k] * y[k][j])\n\n\nprint(resultado)\n\n " }, { "alpha_fraction": 0.5524625182151794, "alphanum_fraction": 0.6017130613327026, "avg_line_length": 14.600000381469727, "blob_id": "1db3feb6485ec2706eed007e9ac518e240a92da2", "content_id": "d2c2a7d7164937ffe90d680248d7662cc04548f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 53, "num_lines": 30, "path": "/Chapter_one/Exercise 3.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 3 about types int n float\ny = 10\ny = y - 5.0\nprint(y)\nprint(type(y))\ny1 = int(y)\nprint(y1)\nprint(type(y1))\n\n#Exercise 3.1\n\ny += 1\ny1 += 1\narr = [y, y1]\nprint(arr)\nprint(type(arr))\n\n#Exercise 3.2\n\n# Recordar que los operedores de asignación aumentada\n# +=, -=, *=, **=, /=, //=, %=; son operadores que\n# realizan el siguiente tipo operación, ejemplo: \n# x += 1, es exactamente lo mismo que x = x + 1\n\nx = 14\nprint(x)\nx += 1\nprint(x)\nx = (x / 5) ** 2\nprint(x)" }, { "alpha_fraction": 0.302238792181015, "alphanum_fraction": 0.3973880708217621, "avg_line_length": 14.285714149475098, "blob_id": "bbd13f5a0997399d798c8cda73dc560a009e5b39", "content_id": "f85aec3fae1beab3831a1c623c13de2febe79d5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 41, "num_lines": 35, "path": "/Chapter_two/Excercise 3.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Excercise 3\n\nx = [[1, 2, 3], \n [4, 5, 6], \n [7, 8, 9]]\n\ny = [[10, 11, 12], \n [13, 14, 15], \n [16, 17, 18]]\n\nresult = [[0, 0, 0], \n [0, 0, 0], \n [0, 0, 0]]\n \n\nresult1 = [[0, 0, 0], \n [0, 0, 0], \n [0, 0, 0]]\n\n#Suma de matrices\n\nfor i in range(len(x)):\n for j in range(len(x[0])):\n result[i][j] = x[i][j] + y[i][j]\n\nprint(result)\n\n\n#Resta de matrices \n\nfor i in range(len(x)):\n for j in range(len(x[0])):\n result1[i][j] = y[i][j] - x[i][j]\n\nprint(result1)\n\n" }, { "alpha_fraction": 0.7322899699211121, "alphanum_fraction": 0.7397034764289856, "avg_line_length": 38.19355010986328, "blob_id": "099ed0aad64e51b463772997c30caa27b00f79de", "content_id": "ee91f18adfb88b98d2af96399c3c8e1465f2024f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 69, "num_lines": 31, "path": "/Chapter_one/Exercise 8.py", "repo_name": "regondi96/oneyearofnothing", "src_encoding": "UTF-8", "text": "#Exercise 8 \n\n#String methods\n\nname = \"AlFoNsO esTÁ apRenDiendo a codear\"\n\n#Desplega el string en mayúsculas\nprint(\"Función upper:\", name.upper())\n#Ordena el string comenzando con maýsucula\nprint(\"Función capitalize:\", name.capitalize())\n#Mueve el string en la terminal\nprint(\"Función center:\", name.center(60))\n#Cuenta la cantidad de que algo ocurra dentro del string\n#el método es x.count(sub, [start_index], [end_index])\nprint(\"Función count:\", name.count(\"o\", 0, 25))\n#Devuelve el string codificado por default en UTF-8\nprint(\"Función encode:\", name.encode())\n#Devuelve un boleano si el string termina con el valor\n#en el método, que es, x.endswith(suffix, [start_index], [end_index])\nprint(\"Función endswith:\", name.endswith(\"sO\", 0, 7))\n#Cambia el número de espacios del método tabulador (\\t) \nprint(\"Función expandtabs:\", name.expandtabs())\n#Busca un valor en el string y devuelve su posición\n#el método es x.find[sub, [start_index], [end_index])\nprint(\"Función find:\", name.find(\"a\"))\n#Despliega el string en minúsculas\nprint(\"Función lower:\", name.lower())\n\n#Existen muchos métodos para trabajar con strings\n#para acceder a ellos solo hay que añadir un punto (.)\n#al string para que se despliguen, str.xxx" } ]
35
kensaku-okada/Greenhouse-with-OPV-film-Model
https://github.com/kensaku-okada/Greenhouse-with-OPV-film-Model
d24413a69337f584a8cf4b92d9412892f8a48334
887bbf22f5fb7003df8ec25a83e47087bba0b97c
a24a3cfe5c74cc16d666d37059d5f53d73bd6c00
refs/heads/master
2021-06-09T00:14:11.961393
2020-01-08T15:57:44
2020-01-08T15:57:44
115,674,295
3
2
null
null
null
null
null
[ { "alpha_fraction": 0.6647738218307495, "alphanum_fraction": 0.7057027220726013, "avg_line_length": 46.985862731933594, "blob_id": "e4a3072a8bba077130650806a060432f4f4a3e38", "content_id": "c0e0fecd253703ee58525128f6c68808e45df267", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37554, "license_type": "permissive", "max_line_length": 316, "num_lines": 778, "path": "/CropElectricityYeildSimulatorConstant.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 06 Nov 2016\n# last edit date: 19 Apr 2017\n#######################################################\n\n##########import package files##########\nimport numpy as np\nimport math\nimport datetime\n\n# #########################################################################\n# ########### Reinforcement Learning (RL) constants start##################\n# #########################################################################\n# # labels of weights for features\n# w_0 = \"bias(w_0)\"\n# ##### 1 DLI to plants with the state and action\n# w_1 = \"DLIEachDayToPlants(w_1)\"\n# ##### 2 plant weight increase with the state and action\n# w_2 = \"unitDailyFreshWeightIncrease(w_2)\"\n# ##### 3 plant weight at the state\n# w_3 = \"accumulatedUnitDailyFreshWeightIncrease(w_3)\"\n# ##### 4 averageDLITillTheDay\n# w_4 = \"averageDLITillHarvestDay(w_4)\"\n# ##### 5 season effects (winter) representing dates.\n# w_5 = \"isSpring(w_5)\"\n# ##### 6 season effects (spring) representing dates.\n# w_6 = \"isSummer(w_6)\"\n# ##### 7 season effects (summer) representing dates.\n# w_7 = \"isAutumn(w_7)\"\n# ##### 8 season effects (autumn) representing dates.\n# w_8 = \"isWinter(w_8)\"\n#\n# # starts from middle of Feb\n# daysFromJanStartApril = 45\n# # starts from May first\n# daysFromJanStartSummer = 121\n# # starts from middle of September\n# daysFromJanStartAutumn = 259\n# # starts from middle of Winter\n# daysFromJanStartWinter = 320\n#\n# fileNameQLearningTrainedWeight = \"qLearningTraintedWeights\"\n#\n# ifRunTraining = True\n# ifSaveCalculatedWeight = True\n# ifLoadWeight = True\n# ##############################################\n# ########### RL constants end##################\n# ##############################################\n\n#####################################################\n############ filepath and file name start ###########\n#####################################################\nenvironmentData = \"20130101-20170101\" + \".csv\"\n\nromaineLettceRetailPriceFileName = \"romaineLettuceRetailPrice.csv\"\nromaineLettceRetailPriceFilePath = \"\"\n\naverageRetailPriceOfElectricityMonthly = \"averageRetailPriceOfElectricityMonthly.csv\"\n\nplantGrowthModelValidationData = \"plantGrowthModelValidationData.csv\"\n\n# source: https://www.eia.gov/dnav/ng/hist/n3010az3m.htm\nArizonaPriceOfNaturalGasDeliveredToResidentialConsumers = \"ArizonaPriceOfNaturalGasDeliveredToResidentialConsumers.csv\"\n\n###################################################\n############ filepath and file name end ###########\n###################################################\n\n\n###############################################################\n####################### If statement flag start ###############\n###############################################################\n# True: use the real data (the imported data whose source is the local weather station \"https://midcdmz.nrel.gov/ua_oasis/),\n# False: use the\nifUseOnlyRealData = False\n# ifUseOnlyRealData = True\n\n#If True, export measured horizontal and estimated data when the simulation day is one day.\n# ifExportMeasuredHorizontalAndExtimatedData = True\n\n#If True, export measured horizontal and estimated data only on 15th day each month.\nifGet15thDayData = True\n# ifGet15thDayData = False\n\n# if consider the photo inhibition by too strong sunlight, True, if not, False\n# IfConsiderPhotoInhibition = True\nIfConsiderPhotoInhibition = False\n\n# if consider the price discount by tipburn , True, if not, False\nIfConsiderDiscountByTipburn = False\n\n# make this false when running optimization algorithm\nexportCSVFiles = True\n# exportCSVFiles = False\n\n# if you want to export CVS file and figures, then true\nifExportCSVFile = True\n# ifExportCSVFile = False\n# ifExportFigures = True\nifExportFigures = False\n\n\n# if you want to refer to the price of lettuce grown at greenhouse (sales per head), True, if you sell lettuce at open field farming price (sales per kg), False\nsellLettuceByGreenhouseRetailPrice = True\n# sellLettuceByGreenhouseRetailPrice = False\nprint(\"sellLettuceByGreenhouseRetailPrice:{}\".format(sellLettuceByGreenhouseRetailPrice))\n\n#############################################################\n####################### If statement flag end################\n#############################################################\n\n########################################\n##########other constant start##########\n########################################\n# day on each month: days of Jan, Feb, ...., December\ndayperMonthArray = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\ndayperMonthLepArray = np.array([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n\n# keep them int\nsecondperMinute = 60\nminuteperHour = 60\nhourperDay = 24\ndayperYear = 365\nmonthsperYear = 12\ndayperLeapYear = 366\nnoonHour = 12\n\n#the temperature at STC (Standard Test Conditions) unit [Celsius]\nSTCtemperature = 25.0\n# current prepared data range: 20130101-20170101\", 20150101 to 20160815 was the only correctly observed period. some 2014 data work too.\n# do not choose \"20140201 to 20160101\" specifically. somehow it does not work.\n# do not include 1/19/2014 as start date because 1/19/2014 partially misses its hourly data\n# do not include after 8/18/2016 because the dates after 8/18/2016 do not correctly log the body temperature.\nSimulationStartDate=\"20150101\"\n# SimulationStartDate=\"20150323\"\n# SimulationStartDate=\"20151215\"\nSimulationEndDate = \"20151231\"\n# SimulationEndDate = \"20150419\"\n# one cultivation cycle\n# SimulationEndDate = \"20151215\"\n\nsunnyDaysList = [\"20150115\", \"20150217\", \"20150316\", \"20150413\", \"20150517\", \"20150615\", \"20150711\", \"20150815\", \"20150918\", \"20151013\", \"20151117\", \"20151215\"]\nprint(\"SimulationStartDate:{}, SimulationEndDate:{}\".format(SimulationStartDate, SimulationEndDate))\n\n# latitude at Tucson == 32.2800408 N\nLatitude = math.radians(32.2800408)\n# longitude at Tucson 110.9422745 W\nLongitude = math.radians(110.9422745)\n# lambda (longitude of the site) = 32.2800408 N: latitude,-110.9422745 W : longitude [degree]\n# lambda_R ( the longitude of the time zone in which the site is situated) = 33.4484, 112.074 [degree]\n# J' (the day angle in the year ) =\n\n# John Page, \"The Role of Solar-Radiation Climatology in the Design of Photovoltaic Systems\", 2nd edition\n# there is a equation calculating LAT.\n\n# p618 (46 in pdf file)\n# The passage of days is described mathematically by numbering the days continuously through the year to produce a Julian day number, J: 1\n# January, J 5 1; 1 February, J 5 32; 1 March, J 5 57 in a nonleap year and 58 in a leap year; and so on. Each day in the year can be then be\n# expressed in an angular form as a day angle, J0, in degrees by multiplying\n# J by 360/365.25. The day angle is used in the many of the trigonometric expressions that follow.\n# EOT (equation of time) =\n\n#watt To PPFD Conversion coefficient for sunlight (W m^-2) -> (μmol/m^2/s)\n# the range of wavelength considered is around from 280 to 2800 (shortwave sunlight), not from 400nm to 700nm (visible sunlight).\nwattToPPFDConversionRatio = 2.05\n# wattToPPFDConversionRatio = 4.57 <- this ratio is used when the range of wavelength of PAR [W m^-2] is between 400nm and 700nm (visible sunlight)\n\n# [W/m^2]\nsolarConstant = 1367.0\n# ground reflectance of sunlight. source: https://www2.pvlighthouse.com.au/resources/courses/altermatt/The%20Solar%20Spectrum/The%20reflectance%20of%20the%20ground.aspx\ngroundReflectance = 0.1\n# referred from A. Yano et. al., 2009, \"Electrical energy generated by photovoltaic modules mounted inside the roof of a north–south oriented greenhouse\"\n# this number should be carefully definted according to the weather property of the simulation region (very cloudy: 0.0 ~ 1.0: very sunny)\n# atmosphericTransmissivity = 0.6\n# atmosphericTransmissivity = 0.7\n# the reason why this coefficient was changed into this value is described in my paper\natmosphericTransmissivity = 0.643\nprint(\"atmosphericTransmissivity:{}\".format(atmosphericTransmissivity))\n\n# unit conversion. [cwt] -> [kg] US standard\nkgpercwt = 45.3630\n\n# if this is true, then continue to grow plants during the Summer period. the default value is False in the object(instance)\n# ifGrowForSummerPeriod = True\nifGrowForSummerPeriod = False\nprint(\"ifGrowForSummerPeriod:{}\".format(ifGrowForSummerPeriod))\n######################################\n##########other constant end##########\n######################################\n\n#########################################################\n##########Specification of the greenhouse start##########\n#########################################################\n\n# #########################################################\n# # Our real greenhouse specification source:\n# # http://www.gpstructures.com/pdfs/Windjammer.pdf\n# # We used 6' TALL SIDEWALL HEIGHT 30' width, Free Standing Structures in our project.\n# #greenhouse roof type\n# greenhouseRoofType = \"SimplifiedAFlame\"\n# #width of the greenhouse (m)\n# greenhouseWidth = 9.144 # = 30 [feet]\n# #depth of the greenhouse (m)\n# greenhouseDepth = 14.6\n# #area of the greenhouse (m**2)\n# greenhouseFloorArea = greenhouseWidth * greenhouseDepth\n# # print(\"greenhouseFloorArea[m^2]:{}\".format(greenhouseFloorArea))\n# #width of the greenhouse cultivation area (m)\n# greenhouseCultivationFloorWidth = 7.33\n# #depth of the greenhouse cultivation area(m)\n# greenhouseCultivationFloorDepth = 10.89\n# #floor area of greenhouse cultivation area(m**2)\n# greenhouseCultivationFloorArea = greenhouseCultivationFloorWidth * greenhouseCultivationFloorDepth\n# # greenhouseCultivationFloorArea = greenhouseWidth * greenhouseDepth * 0.9\n# # print(\"greenhouseCultivationFloorArea[m^2]:{}\".format(greenhouseCultivationFloorArea))\n# # number of roofs. If this is 1, the greenhouse is a single roof greenhouse. If >1, multi-roof greenhouse\n# numOfRoofs = 1\n# # the type of roof direction\n# roofDirectionNotation = \"EastWestDirectionRoof\"\n# #side wall height of greenhouse (m)\n# greenhouseHeightSideWall = 1.8288 # = 6[feet]\n# #center height of greenhouse (m)\n# greenhouseHeightRoofTop = 4.8768 # = 16[feet]\n# #width of the rooftop. calculate from the Pythagorean theorem. assumed that the shape of rooftop is straight, not curved.\n# greenhouseRoofWidth = math.sqrt((greenhouseWidth/2.0)**2.0 + (greenhouseHeightRoofTop-greenhouseHeightSideWall)**2.0)\n# #print (\"greenhouseRoofWidth: {}\".format(greenhouseRoofWidth))\n# #angle of the rooftop (theta θ). [rad]\n#\n# greenhouseRoofAngle = math.acos((greenhouseWidth/2.0) / greenhouseRoofWidth)\n# # print (\"greenhouseRoofAngle (rad) : {}\".format(greenhouseRoofAngle))\n# # the angle of the roof facing north or east [rad]\n# roofAngleNorthOrEast = greenhouseRoofAngle\n# # the angle of the roof facing south or west [rad]. This should be modified if the roof angle is different from the other side.\n# roofAngleWestOrSouth = greenhouseRoofAngle\n# #area of the rooftop [m^2]. summing the left and right side of rooftops from the center.\n# greenhouseTotalRoofArea = greenhouseRoofWidth * greenhouseDepth * 2.0\n# # print (\"greenhouseRoofArea[m^2]: {}\".format(greenhouseTotalRoofArea))1\n# #########################################################\n\n\n#########################################################\n# Virtual greenhouse specification, the multi-roof greenhouse virtually connected 10 of our real greenhouses.\n# You can change these numbers according to your greenhouse design and specification\n#greenhouse roof type\ngreenhouseRoofType = \"SimplifiedAFlame\"\n# #width of the greenhouse (m)\n# greenhouseWidth = 91.44 #\n# #depth of the greenhouse (m)\n# greenhouseDepth = 14.6\n\n# the greenhouse floor area was replaced with the following, referring to a common business size greenhouse\n\n# number of roofs (-). If this is 1, the greenhouse is a single roof greenhouse. If >1, multi-roof greenhouse.\nnumOfRoofs = 10.0\n# source: https://www.interempresas.net/FeriaVirtual/Catalogos_y_documentos/1381/Multispan-greenhouse-ULMA-Agricola.pdf,\n# source: https://www.alibaba.com/product-detail/Multi-roof-Poly-Film-Tunnel-Greenhouse_60184626287.html\n# this should be cahnged depending on the type of greenhouse simulated. (m)\nwidthPerRoof = 9.6\n#total width of the greenhouse (m)\n# greenhouseWidth = 91.44\ngreenhouseWidth = numOfRoofs * widthPerRoof\n\n#depth of the greenhouse (m)\ngreenhouseDepth = 14.6\n\n#area of the greenhouse (m**2)\ngreenhouseFloorArea = greenhouseWidth * greenhouseDepth\nprint(\"greenhouseFloorArea[m^2]:{}\".format(greenhouseFloorArea))\n# # the following calculation gives the real cultivation area of our research greenhouse. However, since it has too large vacant space which is unrealistic for business greenhouse, this number was not used.\n# #width of the greenhouse cultivation area (m)\n# greenhouseCultivationFloorWidth = 73.3\n# #depth of the greenhouse cultivation area(m)\n# greenhouseCultivationFloorDepth = 10.89\n# #floor area of greenhouse cultivation area(m**2)\n# greenhouseCultivationFloorArea = greenhouseCultivationFloorWidth * greenhouseCultivationFloorDepth\n# Instead, it was assumed the cultivation area is 0.9 time of the total greenhouse floor area\ngreenhouseCultivationFloorArea = greenhouseFloorArea * 0.9\nprint(\"greenhouseCultivationFloorArea[m^2]:{}\".format(greenhouseCultivationFloorArea))\n# the type of roof direction\nroofDirectionNotation = \"EastWestDirectionRoof\"\n# roofDirectionNotation = \"NorthSouthDirectionRoof\"\n#side wall height of greenhouse (m)\ngreenhouseHeightSideWall = 1.8288 # = 6[feet]\n# the total sidewall area\ngreenhouseSideWallArea = 2.0 * (greenhouseWidth + greenhouseDepth) * greenhouseHeightSideWall\nprint(\"greenhouseSideWallArea[m^2]:{}\".format(greenhouseSideWallArea))\n\n#center height of greenhouse (m)\ngreenhouseHeightRoofTop = 4.8768 # = 16[feet]\n\n#width of the rooftop. calculate from the Pythagorean theorem. assumed that the shape of rooftop is straight (not curved), and the top height and roof angels are same at each roof.\ngreenhouseRoofWidth = math.sqrt((greenhouseWidth/(numOfRoofs*2.0))**2.0 + (greenhouseHeightRoofTop-greenhouseHeightSideWall)**2.0)\nprint (\"greenhouseRoofWidth [m]: {}\".format(greenhouseRoofWidth))\n# the length of the roof facing east or north\ngreenhouseRoofWidthEastOrNorth = greenhouseRoofWidth\n# the length of the roof facing west or south\ngreenhouseRoofWidthWestOrSouth = greenhouseRoofWidth\n\n#angle of the rooftop (theta θ). [rad]\ngreenhouseRoofAngle = math.acos(((greenhouseWidth/(numOfRoofs*2.0)) / greenhouseRoofWidth))\n# greenhouseRoofAngle = 0.0\nprint (\"greenhouseRoofAngle (rad) : {}\".format(greenhouseRoofAngle))\n# the angle of the roof facing north or east [rad]\nroofAngleEastOrNorth = greenhouseRoofAngle\n# the angle of the roof facing south or west [rad]. This should be modified if the roof angle is different from the other side.\nroofAngleWestOrSouth = greenhouseRoofAngle\n# area of the rooftop [m^2]. summing the left and right side of rooftops from the center.\ngreenhouseTotalRoofArea = greenhouseRoofWidth * greenhouseDepth * numOfRoofs * 2.0\nprint (\"greenhouseTotalRoofArea[m^2]: {}\".format(greenhouseTotalRoofArea))\n\ngreenhouseRoofTotalAreaEastOrNorth = greenhouseRoofWidthEastOrNorth * greenhouseDepth * numOfRoofs\nprint (\"greenhouseRoofTotalAreaEastOrNorth[m^2]: {}\".format(greenhouseRoofTotalAreaEastOrNorth))\ngreenhouseRoofTotalAreaWestOrSouth = greenhouseRoofWidthWestOrSouth * greenhouseDepth * numOfRoofs\nprint (\"greenhouseRoofTotalAreaWestOrSouth[m^2]: {}\".format(greenhouseRoofTotalAreaWestOrSouth))\n#########################################################\n\n#the proportion of shade made by the greenhouse inner structure, actuator (e.g. sensors and fog cooling systems) and farming equipments (e.g. gutters) (-)\n# GreenhouseShadeProportion = 0.1\nGreenhouseShadeProportionByInnerStructures = 0.05\n\n# DLI [mol m^-2 day^-1]\nDLIForButterHeadLettuceWithNoTipburn = 17.0\n# PPFD [umol m^-2 s^-1]\n# the PPFD was divided by 2.0 because it was assumed that the time during the day was the half of a day (12 hours)\n# OptimumPPFDForButterHeadLettuceWithNoTipburn = DLIForButterHeadLettuceWithNoTipburn * 1000000.0 / float(secondperMinute*minuteperHour*hourperDay)\nOptimumPPFDForButterHeadLettuceWithNoTipburn = DLIForButterHeadLettuceWithNoTipburn * 1000000.0 / float(secondperMinute*minuteperHour*hourperDay/2.0)\nprint(\"OptimumPPFDForButterHeadLettuceWithNoTipburn (PPFD):{}\".format(OptimumPPFDForButterHeadLettuceWithNoTipburn))\n\n# 1.5 is an arbitrary number\n# the amount of PPFD to deploy shading curtain\nshadingCurtainDeployPPFD = OptimumPPFDForButterHeadLettuceWithNoTipburn * 1.5\nprint(\"shadingCurtainDeployPPFD:{}\".format(shadingCurtainDeployPPFD))\n\n# The maximum value of m: the number of roofs that incident light penetrating in the model. This value is used at SolarIrradianceMultiroofRoof.py.\n# if the angle between the incident light and the horizonta ql axis is too small, the m can be too large, which cause a system error at Util.sigma by iterating too much and make the simulation slow.\n# Thus, the upper limit was set.\nmMax = numOfRoofs\n# defaultIterationLimit = 495\n#######################################################\n##########Specification of the greenhouse end##########\n#######################################################\n\n##################################################################\n##########specification of glazing (covering film) start##########\n##################################################################\ngreenhouseGlazingType = \"polyethylene (PE) DoubleLayer\"\n\n#ratio of visible light (400nm - 750nm) through a glazing material (-)\n#source: Nadia Sabeh, \"TOMATO GREENHOUSE ROADMAP\" https://www.amazon.com/Tomato-Greenhouse-Roadmap-Guide-Production-ebook/dp/B00O4CPO42\n# https://www.goodreads.com/book/show/23878832-tomato-greenhouse-roadmap\n# singlePEPERTransmittance = 0.875\nsinglePERTransmittance = 0.85\ndobulePERTransmittance = singlePERTransmittance ** 2.0\n# reference: https://www.filmetrics.com/refractive-index-database/Polyethylene/PE-Polyethene\nPEFilmRefractiveIndex = 1.5\n# reference: https://en.wikipedia.org/wiki/Refractive_index\nAirRefractiveIndex = 1.000293\n\n# Source of reference https://www.amazon.com/Tomato-Greenhouse-Roadmap-Guide-Production-ebook/dp/B00O4CPO42\nsinglePolycarbonateTransmittance = 0.91\ndoublePolycarbonateTransmittance = singlePolycarbonateTransmittance ** 2.0\n\nroofCoveringTransmittance = singlePERTransmittance\nsideWallTransmittance = singlePERTransmittance\n\n################################################################\n##########specification of glazing (covering film) end##########\n################################################################\n\n#####################################################################\n##########specification of OPV module (film or panel) start##########\n#####################################################################\n\n# [rad]. tilt of OPV module = tilt of the greenhouse roof\n# OPVAngle = math.radians(0.0)\nOPVAngle = greenhouseRoofAngle\n\n# the coverage ratio of OPV module on the greenhouse roof [-]\n# OPVAreaCoverageRatio = 0.20\n# OPVAreaCoverageRatio = 0.58\n# OPVAreaCoverageRatio = 0.5\nOPVAreaCoverageRatio = 0.009090909090909\n# print(\"OPVAreaCoverageRatio:{}\".format(OPVAreaCoverageRatio))\n\n# the coverage ratio of OPV module on the greenhouse roof [-]. If you set this value same as OPVAreaCoverageRatio, it assumed that the OPV coverage ratio does not change during the whole period\n# OPVAreaCoverageRatioSummerPeriod = 1.0\n# OPVAreaCoverageRatioSummerPeriod = 0.5\nOPVAreaCoverageRatioSummerPeriod = OPVAreaCoverageRatio\n# OPVAreaCoverageRatioSummerPeriod = 0.0\nprint(\"OPVAreaCoverageRatioSummerPeriod:{}\".format(OPVAreaCoverageRatioSummerPeriod))\n\n\n#the area of OPV on the roofTop.\nOPVArea = OPVAreaCoverageRatio * greenhouseTotalRoofArea\nprint(\"OPVArea:{}\".format(OPVArea))\n\n# the PV module area facing each angle\nOPVAreaFacingEastOrNorthfacingRoof = OPVArea * (greenhouseRoofTotalAreaEastOrNorth/greenhouseTotalRoofArea)\nOPVAreaFacingWestOrSouthfacingRoof = OPVArea * (greenhouseRoofTotalAreaWestOrSouth/greenhouseTotalRoofArea)\n\n#the ratio of degradation per day (/day)\n# TODO: find a paper describing the general degradation ratio of OPV module\n#the specification document of our PV module says that the guaranteed quality period is 1 year.\n# reference (degradation ratio of PV module): https://www.nrel.gov/docs/fy12osti/51664.pdf, https://www.solar-partners.jp/pv-eco-informations-41958.html\n# It was assumed that inorganic PV module expiration date is 20 years and its yearly degradation rate is 0.8% (from the first reference page 6), which indicates the OPV film degrades faster by 20 times.\nPVDegradationRatioPerHour = 20.0 * 0.008 / dayperYear / hourperDay\nprint(\"PVDegradationRatioPerHour:{}\".format(PVDegradationRatioPerHour))\n\n\n# the coefficient converting the ideal (given by manufacturers) cell efficiency to the real efficiency under actual conditions\n# degradeCoefficientFromIdealtoReal = 0.85\n# this website (https://franklinaid.com/2013/02/06/solar-power-for-subs-the-panels/) says \"In real-life conditions, the actual values will be somewhat more or less than listed by the manufacturer.\"\n# So it was assumed the manufacture's spec sheet correctly shows the actual power\ndegradeCoefficientFromIdealtoReal = 1.00\n\n\n########################################################################################################################\n# the following information should be taken from the spec sheet provided by a PV module manufacturer\n########################################################################################################################\n#unit[/K]. The proportion of a change of voltage which the OPV film generates under STC condition (25°C),\n# mentioned in # Table 1-2-1\nTempCoeffitientVmpp = -0.0019\n\n#unit[/K]. The proportion of a change of current which the OPV film generates under STC condition (25°C),\n# mentioned in Table 1-2-1\nTempCoeffitientImpp = 0.0008\n\n#unit[/K]. The proportion of a change of power which the OPV film generates under STC condition (25°C),\n# mentioned in Table 1-2-\nTempCoeffitientPmpp = 0.0002\n\n#transmission ratio of VISIBLE sunlight through OPV film.\n#OPVPARTransmittance = 0.6\nOPVPARTransmittance = 0.3\n\n# unit: [A]\nshortCircuitCurrent = 0.72\n# unit [V]\nopenCIrcuitVoltage = 24.0\n# unit: [A]\ncurrentAtMaximumPowerPoint = 0.48\n# unit [V]\nvoltageAtMaximumPowerPoint = 16.0\n# unit: [watt]\nmaximumPower = currentAtMaximumPowerPoint * voltageAtMaximumPowerPoint\nprint(\"maximumPower:{}\".format(maximumPower))\n# unit: [m^2]. This is the area per sheet, not roll (having 8 sheets concatenated). This area excludes the margin space of the OPV sheet. THe margin space are made from transparent laminated film with connectors.\nOPVAreaPerSheet = 0.849 * 0.66\n\n#conversion efficiency from ligtht energy to electricity\n#The efficiency of solar panels is based on standard testing conditions (STC),\n#under which all solar panel manufacturers must test their modules. STC specifies a temperature of 25°C (77 F),\n#solar irradiance of 1000 W/m2 and an air mass 1.5 (AM1.5) spectrums.\n#The STC efficiency of a 240-watt module measuring 1.65 square meters is calculated as follows:\n#240 watts ÷ (1.65m2 (module area) x 1000 W/m2) = 14.54%.\n#source: http://www.solartown.com/learning/solar-panels/solar-panel-efficiency-have-you-checked-your-eta-lately/\n# http://www.isu.edu/~rodrrene/Calculating%20the%20Efficiency%20of%20the%20Solar%20Cell.doc\n# unit: -\n# OPVEfficiencyRatioSTC = maximumPower / OPVAreaPerSheet / 1000.0\n\n# OPVEfficiencyRatioSTC = 0.2\n# this value is the cell efficiency of OPV film purchased at Kacira Lab, CEAC at University of Arizona\n# OPVEfficiencyRatioSTC = 0.0137\n# source: 19.\tLucera, L., Machui, F., Schmidt, H. D., Ahmad, T., Kubis, P., St rohm, S., … Brabec, C. J. (2017). Printed semi-transparent large area organic photovoltaic modules with power conversion efficiencies of close to 5 %. Organic Electronics: Physics, Materials, Applications, 45, 41–45. https://doi.org/10.1016/j.orgel.2017.03.013\nOPVEfficiencyRatioSTC = 0.043\nprint(\"OPVCellEfficiencyRatioSTC:{}\".format(OPVEfficiencyRatioSTC))\n#what is an air mass??\n#エアマスとは太陽光の分光放射分布を表すパラメーター、標準状態の大気(標準気圧1013hPa)に垂直に入射(太陽高度角90°)した\n# 太陽直達光が通過する路程の長さをAM1.0として、それに対する比で表わされます。\n#source: http://www.solartech.jp/module_char/standard.html\n\n########################################################################################################################\n\n#source: http://energy.gov/sites/prod/files/2014/01/f7/pvmrw13_ps5_3m_nachtigal.pdf (3M Ultra-Barrier Solar Film spec.pdf)\n\n#the price of OPV per area [EUR/m^2]\n# [EUR]\noriginalOPVPriceEUR = 13305.6\nOPVPriceEUR = 13305.6\n# [m^2]\nOPVSizePurchased = 6.0 * 0.925 * 10.0\n\n# [EUR/m^2]\nOPVPriceperAreaEUR = OPVPriceEUR / OPVSizePurchased\n\n#as of 11Nov/2016 [USD/EUR]\nCurrencyConversionRatioUSDEUR= 1/1.0850\n#the price of OPV per area [USD/m^2]\n# OPVPricePerAreaUSD = OPVPriceperAreaEUR*CurrencyConversionRatioUSDEUR\n# OPVPricePerAreaUSD = 50.0\nOPVPricePerAreaUSD = 0.0\n# reference of PV panel purchase cost. 200 was a reasonable price in the US.\n# https://www.homedepot.com/p/Grape-Solar-265-Watt-Polycrystalline-Solar-Panel-4-Pack-GS-P60-265x4/206365811?cm_mmc=Shopping%7cVF%7cG%7c0%7cG-VF-PLA%7c&gclid=Cj0KCQjw6pLZBRCxARIsALaaY9YIkZf5W4LESs9HA2RgxsYaeXOfzvMuMCUT9iZ7xU65GafQel6FIY8aApLfEALw_wcB&dclid=CLjZj46A2NsCFYQlfwodGQoKsQ\n# https://www.nrel.gov/docs/fy17osti/68925.pdf\n# OPVPricePerAreaUSD = 200.0\nprint(\"OPVPricePerAreaUSD:{}\".format(OPVPricePerAreaUSD))\n\n\n# True == consider the OPV cost, False == ignore the OPV cost\nifConsiderOPVCost = True\n# if you set this 730, you assume the purchase cost of is OPV zero because at the simulator class, this number divides the integer number, which gives zero.\n# OPVDepreciationPeriodDays = 730.0\nOPVDepreciationPeriodDays = 365.0\n\nOPVDepreciationMethod = \"StraightLine\"\n###################################################################\n##########specification of OPV module (film or panel) end##########\n###################################################################\n\n##########################################################\n##########specification of shading curtain start##########\n##########################################################\n#the transmittance ratio of shading curtain\nshadingTransmittanceRatio = 0.45\n\nisShadingCurtainReinforcementLearning = True\n\n#if True, a black net is covered over the roof for shading in summer\n# hasShadingCurtain = False\nhasShadingCurtain = True\n\nopenCurtainString = \"openCurtain\"\ncloseCurtainString = \"closeCurtain\"\n\n# ######## default setting ########\n# ShadingCurtainDeployStartMMSpring = 5\n# ShadingCurtainDeployStartDDSpring = 31\n# ShadingCurtainDeployEndMMSpring = 5\n# ShadingCurtainDeployEndDDSpring = 31\n# ShadingCurtainDeployStartMMFall =9\n# ShadingCurtainDeployStartDDFall =15\n# ShadingCurtainDeployEndMMFall =6\n# ShadingCurtainDeployEndDDFall =29\n\n# # # optimzed period on greenhouse retail price, starting from minimum values\n# ShadingCurtainDeployStartMMSpring = 1\n# ShadingCurtainDeployStartDDSpring = 1\n# ShadingCurtainDeployEndMMSpring = 1\n# ShadingCurtainDeployEndDDSpring = 4\n# ShadingCurtainDeployStartMMFall = 1\n# ShadingCurtainDeployStartDDFall = 6\n# ShadingCurtainDeployEndMMFall = 1\n# ShadingCurtainDeployEndDDFall = 16\n\n# # # optimzed period on greenhouse retail price, starting from middle values\n# ShadingCurtainDeployStartMMSpring = 6\n# ShadingCurtainDeployStartDDSpring = 19\n# ShadingCurtainDeployEndMMSpring = 7\n# ShadingCurtainDeployEndDDSpring = 5\n# ShadingCurtainDeployStartMMFall = 7\n# ShadingCurtainDeployStartDDFall = 14\n# ShadingCurtainDeployEndMMFall = 7\n# ShadingCurtainDeployEndDDFall = 15\n\n# # # optimzed period on greenhouse retail price, starting from max values\n# ShadingCurtainDeployStartMMSpring = 12\n# ShadingCurtainDeployStartDDSpring = 24\n# ShadingCurtainDeployEndMMSpring = 12\n# ShadingCurtainDeployEndDDSpring = 28\n# ShadingCurtainDeployStartMMFall = 12\n# ShadingCurtainDeployStartDDFall = 30\n# ShadingCurtainDeployEndMMFall = 12\n# ShadingCurtainDeployEndDDFall = 31\n\n# optimzed period on open field farming retail price, starting from max values\n# ShadingCurtainDeployStartMMSpring = 8\n# ShadingCurtainDeployStartDDSpring = 28\n# ShadingCurtainDeployEndMMSpring = 12\n# ShadingCurtainDeployEndDDSpring = 2\n# ShadingCurtainDeployStartMMFall = 12\n# ShadingCurtainDeployStartDDFall = 12\n# ShadingCurtainDeployEndMMFall = 12\n# ShadingCurtainDeployEndDDFall = 31\n\n# # # initial date s for optimization\nShadingCurtainDeployStartMMSpring = 5\nShadingCurtainDeployStartDDSpring = 17\nShadingCurtainDeployEndMMSpring = 6\nShadingCurtainDeployEndDDSpring = 12\nShadingCurtainDeployStartMMFall = 6\nShadingCurtainDeployStartDDFall = 23\nShadingCurtainDeployEndMMFall = 7\nShadingCurtainDeployEndDDFall = 2\n\n\n# Summer period. This should happend soon after ending the shading curtain deployment period.\nSummerPeriodStartDate = datetime.date(int(SimulationStartDate[0:4]), ShadingCurtainDeployEndMMSpring, ShadingCurtainDeployEndDDSpring) + datetime.timedelta(days=1)\nSummerPeriodEndDate = datetime.date(int(SimulationStartDate[0:4]), ShadingCurtainDeployStartMMFall, ShadingCurtainDeployStartDDFall) - datetime.timedelta(days=1)\n\nSummerPeriodStartMM = int(SummerPeriodStartDate.month)\nprint(\"SummerPeriodStartMM:{}\".format(SummerPeriodStartMM))\nSummerPeriodStartDD = int(SummerPeriodStartDate.day)\nprint(\"SummerPeriodStartDD:{}\".format(SummerPeriodStartDD))\nSummerPeriodEndMM = int(SummerPeriodEndDate.month)\nprint(\"SummerPeriodEndMM:{}\".format(SummerPeriodEndMM))\nSummerPeriodEndDD = int(SummerPeriodEndDate.day)\nprint(\"SummerPeriodEndDD:{}\".format(SummerPeriodEndDD))\n\n# this is gonna be True when you want to deploy shading curtains only from ShadigCuratinDeployStartHH to ShadigCuratinDeployEndHH\nIsShadingCurtainDeployOnlyDayTime = True\nShadigCuratinDeployStartHH = 10\nShadigCuratinDeployEndHH = 14\nIsDifferentShadingCurtainDeployTimeEachMonth = True\n\n# this is gonna be true when you want to control shading curtain opening and closing every hour\nIsHourlyShadingCurtainDeploy = False\n########################################################\n##########specification of shading curtain end##########\n########################################################\n\n\n#################################################\n##########Specification of plants start##########\n#################################################\n#Cost of plant production. the unit is USD/m^2\n# the conversion rate was calculated from from University of California Cooperative Extension (UCEC) UC Small farm program (http://sfp.ucdavis.edu/crops/coststudieshtml/lettuce/LettuceTable1/)\nplantProductionCostperSquareMeterPerYear = 1.096405\n\nnumberOfRidge = 5.0\n\n#unit: m. plant density can be derived from this.\ndistanceBetweenPlants = 0.2\n\n# plant density (num of heads per area) [head/m^2]\nplantDensity = 1.0/(distanceBetweenPlants**2.0)\nprint(\"plantDensity:{}\".format(plantDensity))\n\n#number of heads\n# numberOFheads = int(greenhouseCultivationFloorDepth/distanceBetweenPlants * numberOfRidge)\nnumberOFheads = int (plantDensity * greenhouseCultivationFloorArea)\nprint(\"numberOFheads:{}\".format(numberOFheads))\n\n# number of head per cultivation area [heads/m^2]\nnumberOFheadsPerArea = float(numberOFheads / greenhouseCultivationFloorArea)\n\n#photoperiod (time of lighting in a day). the unit is hour\n# TODO: this should be revised so that the photo period is calculated by the sum of PPFD each day or the change of direct solar radiation or the diff of sunse and sunrise\nphotoperiod = 14.0\n\n#number of cultivation days (days/harvest)\ncultivationDaysperHarvest = 35\n# cultivationDaysperHarvest = 30\n\n# the constant of each plant growth model\n# Source: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH\nA_J_Both_Modified_TaylorExpantionWithFluctuatingDLI = \"A_J_Both_Modified_TaylorExpantionWithFluctuatingDLI\"\n# Source: https://www.researchgate.net/publication/4745082_Validation_of_a_dynamic_lettuce_growth_model_for_greenhouse_climate_control\nE_J_VanHenten1994 = \"E_J_VanHenten1994\"\n# Source: https://www.researchgate.net/publication/286938495_A_validated_model_to_predict_the_effects_of_environment_on_the_growth_of_lettuce_Lactuca_sativa_L_Implications_for_climate_change\nS_Pearson1997 = \"S_Pearson1997\"\nplantGrowthModel = E_J_VanHenten1994\n\n# lettuce base temperature [Celusius]\n# Reference: A validated model to predict the effects of environment on the growth of lettuce (Lactuca sativa L.): Implications for climate change\n# https://www.tandfonline.com/doi/abs/10.1080/14620316.1997.11515538\nlettuceBaseTemperature = 0.0\nDryMassToFreshMass = 1.0/0.045\n\n# the weight to harvest [g]\nharvestDryWeight = 200.0 / DryMassToFreshMass\n# harvestDryWeight = 999.0 / DryMassToFreshMass\n\n# operation cost of plants [USD/m^2/year]\nplantcostperSquaremeterperYear = 1.096405\n\n# the DLI upper limitation causing some tipburn\nDLIforTipBurn = DLIForButterHeadLettuceWithNoTipburn\n\n# the discount ratio when there are some tipburn observed\ntipburnDiscountRatio = 0.2\n\n# make this number 1.0 in the end. change this only for simulation experiment\nplantPriceDiscountRatio_justForSimulation = 1.0\n\n# the set point temperature during day time [Celusius]\n# reference: A.J. Both, TEN YEARS OF HYDROPONIC LETTUCE RESEARCH: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH\nsetPointTemperatureDayTime = 24.0\n# setPointTemperatureDayTime = 16.8\n\n# the set point temperature during night time [Celusius]\n# reference: A.J. Both, TEN YEARS OF HYDROPONIC LETTUCE RESEARCH: https://www.researchgate.net/publication/266453402_TEN_YEARS_OF_HYDROPONIC_LETTUCE_RESEARCH\nsetPointTemperatureNightTime = 19.0\n# setPointTemperatureNightTime = 16.8\n\nsetPointHumidityDayTime = 0.65\n# setPointHumidityDayTime = 0.7\nsetPointHumidityNightTime = 0.8\n# setPointHumidityNightTime = 0.7\n\n# the flags indicating daytime or nighttime at each time step\ndaytime = \"daytime\"\nnighttime = \"nighttime\"\n\n# sales price of lettuce grown at greenhouses, which is usually higher than that of open field farming grown lettuce\n# source\n# 1.99 USD head-1 for \"Lettuce, Other, Boston-Greenhouse\") cited from USDA (https://www.ams.usda.gov/mnreports/fvwretail.pdf)\n# other source: USDA, Agricultural, Marketing, Service, National Retail Report - Specialty Crops page 9 and others: https://www.ams.usda.gov/mnreports/fvwretail.pdf\n# unit: USD head-1\nromainLettucePriceBasedOnHeadPrice = 1.99\n\n###################################################\n##########Specification of the plants end##########\n###################################################\n\n\n\n###################################################\n##########Specification of labor cost start########\n###################################################\n# source: https://onlinelibrary.wiley.com/doi/abs/10.1111/cjag.12161\n# unit: people/10000kg yield\nnecessaryLaborPer10000kgYield = 0.315\n\n# source:https://www.bls.gov/regions/west/news-release/occupationalemploymentandwages_tucson.htm\n# unit:USD/person/hour\nhourlyWagePerPerson = 12.79\n\n# unit: hour/day\nworkingHourPerDay = 8.0\n\n###################################################\n##########Specification of labor cost end########\n###################################################\n\n\n###################################################\n##########Specification of energy cost start#######\n###################################################\n\n# energy efficiency of heating equipment [-]\n# source: https://www.alibaba.com/product-detail/Natural-gas-fired-hot-air-heater_60369835987.html?spm=a2700.7724838.2017115.1.527251bcQ2pojZ\n# source: https://www.aga.org/natural-gas/in-your-home/heating/\nheatingEquipmentEfficiency = 0.9\n\n# unit: USD\nheatingEquipmentQurchaseCost = 0.0\n\n# source: http://www.world-nuclear.org/information-library/facts-and-figures/heat-values-of-various-fuels.aspx\n# source: http://agnatural.pt/documentos/ver/natural-gas-conversion-guide_cb4f0ccd80ccaf88ca5ec336a38600867db5aaf1.pdf\n# unit: MJ m-3\nnaturalGasSpecificEnergy = {\"MJ m-3\" :38.7}\nnaturalGasSpecificEnergy[\"MJ ft-3\"] = naturalGasSpecificEnergy[\"MJ m-3\"] / 35.3147\nheatOfWaterEcaporation = {\"J kg-1\" : 2257}\n\n\n# source: https://www.researchgate.net/publication/265890843_A_Review_of_Evaporative_Cooling_Technologies?enrichId=rgreq-2c40013798cfb3c564cf35844f4947fb-XXX&enrichSource=Y292ZXJQYWdlOzI2NTg5MDg0MztBUzoxNjUxOTgyMjg4OTM2OTdAMTQxNjM5NzczNTk5Nw%3D%3D&el=1_x_3&_esc=publicationCoverPdf\n# COP = coefficient of persormance. COP = Q/W\n# Q is the useful heat supplied or removed by the considered system. W is the work required by the considered system.\nPadAndFanCOP = 15.0\n###################################################\n##########Specification of energy cost end#########\n###################################################\n\n\n#########################################################################\n###########################Global variable end###########################\n#########################################################################\n\nclass CropElectricityYeildSimulatorConstant:\n \"\"\"\n a constant class.\n\n \"\"\"\n\n ###########the constractor##################\n def __init__(self):\n print (\"call CropElectricityYeildSimulatorConstant\")\n\n ###########the constractor end##################\n\n ###########the methods##################\n def method(self, val):\n print(\"call CropElectricityYeildSimulatorConstant method\")\n\n ###########the methods end##################\n" }, { "alpha_fraction": 0.729365348815918, "alphanum_fraction": 0.7395694255828857, "avg_line_length": 56.87016296386719, "blob_id": "928c16ef66dd78f51c3150bd1d09841067b27272", "content_id": "c85194d5c19f844e71191ed8638f651c010d2042", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46363, "license_type": "permissive", "max_line_length": 311, "num_lines": 801, "path": "/OPVFilm.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n##########import package files##########\nfrom scipy import stats\nimport sys\nimport datetime\nimport calendar\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport ShadingCurtain\n#######################################################\n\n# command to show all array data\n# np.set_printoptions(threshold=np.inf)\n# print (\"hourlyHorizontalDirectOuterSolarIrradiance:{}\".format(hourlyHorizontalDirectOuterSolarIrradiance))\n# np.set_printoptions(threshold=1000)\n\n# np.set_printoptions(threshold=np.inf)\n# print (\"hourlySolarIncidenceAngle: {}\".format(hourlySolarIncidenceAngle))\n# np.set_printoptions(threshold=1000)\n\n\ndef getOPVArea(OPVCoverageRatio):\n '''\n return the OPV area given a certain OPV coverage ratio\n :param OPVCoverageRatio:\n :return: [m^2]\n '''\n return OPVCoverageRatio * constant.greenhouseTotalRoofArea\n\ndef calcDeclinationAngle (year, month, day):\n '''\n calculate the declination angle [°]\n :param month: list of months.\n :param day: list of days\n\n :return: declinationAngle [rad]\n '''\n # number of days from January first\n n = np.zeros(year.shape[0])\n declinationAngleperHour = np.zeros(year.shape[0])\n\n # print \"year:{}\".format(year)\n # print \"year.shape[0]:{}\".format(year.shape[0])\n # print \"month.shape:{}\".format(month.shape)\n # print \"day:{}\".format(day)\n for i in range (0, year.shape[0]):\n n[i] = (datetime.date(year[i], month[i], day[i]) - datetime.date(year[i], 1, 1)).days + 1\n # print \"n[{}]:{}\".format(i,n[i])\n declinationAngleperHour[i] = math.radians(23.45 * math.sin(math.radians(360.0 * (284 + n[i]) / 365)))\n # print \"i:{}, n[i]:{}\".format(i, n[i])\n\n return declinationAngleperHour\n\ndef getSolarHourAngleKacira2003(hour):\n '''\n calculate the solar hour angle [°]\n :param hour:\n :return:solarHourAngle [degree]\n '''\n return np.radians(constant.minuteperHour*(hour - constant.noonHour ) / 4.0)\n # Be careful! The paper indicates the following equation to get hour angle, but when integrating it with Yano et al. (2009), you need to let it negative in the morning and positive in the afternoon.\n # return np.radians(constant.minuteperHour*(constant.noonHour - hour ) / 4.0)\n\n\ndef getSolarHourAngleYano2009(hour):\n '''\n calculate the solar hour angle [°]\n :param hour:\n :return:solarHourAngle [degree]\n '''\n # TODO: adopt the authorized way to calculate the local apparent time LAT. now it is approximated to be 0.4 based on Tucson, Arizona, US referring to Dr Kacira's calculation .\n localApparentTime = hour - 0.4\n return (math.pi/12.0) * (localApparentTime - constant.noonHour)\n\n # longitude at Tucson 32.2800408,-110.9422745\n # lambda (longitude of the site) = 32.2800408 N: latitude,-110.9422745 W : longitude [degree]\n # lambda_R ( the longitude of the time zone in which the site is situated) = 33.4484, 112.074 [degree]\n # J' (the day angle in the year ) =\n # EOT (equation of time) =\n\n\n\ndef calcSolarAltitudeAngle(hourlyDeclinationAngle, hourlySolarHourAngle):\n '''\n calculate the solar altitude angle [rad]\n constant.Latitude: symbol phi [rad]\n :param hourlyDeclinationAngle:symbol delta [rad]\n :param hourlySolarHourAngle:symbol omega [rad]\n :return: hourlySolarAltitudeAngle [rad]\n '''\n sinDelta = np.sin(hourlyDeclinationAngle)\n cosDelta = np.cos(hourlyDeclinationAngle)\n sinOmega = np.sin(hourlySolarHourAngle)\n cosOmega = np.cos(hourlySolarHourAngle)\n sinPhi = np.sin(constant.Latitude)\n cosPhi = np.cos(constant.Latitude)\n\n return np.arcsin(cosPhi*cosDelta*cosOmega + sinPhi*sinDelta)\n\ndef calcSolarAzimuthAngle(hourlyDeclinationAngle, hourlySolarAltitudeAngle, hourlySolarHourAngle):\n '''\n calculate the azimuth angle [rad]\n constant.Latitude: symbol phi [rad]\n :param hourlyDeclinationAngle:symbol delta [rad]\n :param hourlySolarAltitudeAngle:symbol alpha [rad]\n :param hourlySolarHourAngle:symbol omega [rad]\n :return: hourlyAzimuthAngle [rad]\n '''\n sinDelta = np.sin(hourlyDeclinationAngle)\n cosDelta = np.cos(hourlyDeclinationAngle)\n sinAlpha = np.sin(hourlySolarAltitudeAngle)\n cosAlpha = np.cos(hourlySolarAltitudeAngle)\n sinPhi = np.sin(constant.Latitude)\n cosPhi = np.cos(constant.Latitude)\n # print (\"sinDelta:{}\".format(sinDelta))\n # print (\"sinAlpha:{}\".format(sinAlpha))\n # print (\"cosAlpha:{}\".format(cosAlpha))\n # print (\"sinPhi:{}\".format(sinPhi))\n # print (\"cosPhi:{}\".format(cosPhi))\n\n\n # [rad]\n # hourlyAzimuthAngle is multiplied by -1 when hourlySolarHourAngle (omega) < 0\n\n # when using the function of \"\", \"np.arccos((sinAlpha*sinPhi - sinDelta) / (cosAlpha * cosPhi))\" value became nan when \"(sinAlpha*sinPhi - sinDelta) / (cosAlpha * cosPhi)\" is \"-1.\"\n # Thus this if statement corrects this error.\n a = (sinAlpha*sinPhi - sinDelta) / (cosAlpha * cosPhi)\n for i in range (0, a.shape[0]):\n if a[i] < -1.0:\n a[i] = -1.0\n elif a[i] > 1.0:\n a[i] = 1.0\n # print(\"a:{}\".format(a))\n hourlyAzimuthAngle = np.sign(hourlySolarHourAngle) * np.arccos(a)\n # print(\"hourlyAzimuthAngle:{}\".format(hourlyAzimuthAngle))\n\n return hourlyAzimuthAngle\n\n\ndef calcSolarIncidenceAngleKacira2003(hourlyDeclinationAngle, hourlySolarHourAngle ,hourlysurfaceAzimuthAngle):\n '''\n calculate solar incidence angle\n constant.OPVAngle: symbol capital s (S) [rad]\n constant.Latitude: symbol phi [rad]\n :param hourlyDeclinationAngle: symbol delta [rad]\n :param hourlySolarHourAngle: symbol omega [rad]\n :param hourlysurfaceAzimuthAngle: symbol gamma [rad]\n :return: SolarIncidenceAngle [rad]\n '''\n sinDelta = np.sin(hourlyDeclinationAngle)\n cosDelta = np.cos(hourlyDeclinationAngle)\n sinPhi = np.sin(constant.Latitude)\n cosPhi = np.cos(constant.Latitude)\n sinOmega = np.sin(hourlySolarHourAngle)\n cosOmega = np.cos(hourlySolarHourAngle)\n sinGamma = np.sin(hourlysurfaceAzimuthAngle)\n cosGamma = np.cos(hourlysurfaceAzimuthAngle)\n sinS = np.sin(constant.OPVAngle)\n cosS = np.cos(constant.OPVAngle)\n\n # TODO check which is correct\n # From Kacira et al. (2003)\n # solarIncidenceAngle = sinDelta*sinPhi*cosS - sinDelta*cosPhi*sinS*cosGamma + cosDelta*cosPhi*cosS*cosOmega \\\n # + cosDelta*sinPhi*sinS*cosGamma*cosOmega + cosDelta*sinS*sinGamma*sinOmega\n # from ITACA: http://www.itacanet.org/the-sun-as-a-source-of-energy/part-3-calculating-solar-angles/\n solarIncidenceAngle = sinDelta*sinPhi*cosS + sinDelta*cosPhi*sinS*cosGamma + cosDelta*cosPhi*cosS*cosOmega \\\n - cosDelta*sinPhi*sinS*cosGamma*cosOmega - cosDelta*sinS*sinGamma*sinOmega\n\n # for i in range (0, sinDelta.shape[0]):\n # if -1 > solarIncidenceAngle[i] or 1 < solarIncidenceAngle[i]:\n # solarIncidenceAngle[i] = (solarIncidenceAngle[i] + solarIncidenceAngle[i])/2.0\n\n return np.arccos(solarIncidenceAngle)\n\n\ndef calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngle, OPVAngle = constant.OPVAngle):\n '''\n calculate solar incidence angle. the equation wat taken from\n constant.OPVAngle: symbol capital s (S) [rad]\n constant.Latitude: symbol phi [rad]\n :param hourlyDeclinationAngle: symbol delta [rad]\n :param hourlySolarHourAngle: symbol omega [rad]\n :param hourlySurfaceAzimuthAngle: symbol gamma [rad]\n :param hourlySolarAltitudeAngle: symbol alpha [rad]\n :param hourlySolarAzimuthAngle: symbol phi_S [rad]\n :param hourlyModuleAzimuthAngle: symbol phi_P [rad]\n\n :return: SolarIncidenceAngle [rad]\n '''\n # sinPhi = np.sin(constant.Latitude)\n # cosPhi = np.cos(constant.Latitude)\n sinS = np.sin(OPVAngle)\n cosS = np.cos(OPVAngle)\n sinAlpha = np.sin(hourlySolarAltitudeAngle)\n cosAlpha = np.cos(hourlySolarAltitudeAngle)\n sinPhiS = np.sin(hourlySolarAzimuthAngle)\n cosPhiS = np.cos(hourlySolarAzimuthAngle)\n sinPhiP = np.sin(hourlyModuleAzimuthAngle)\n cosPhiP = np.cos(hourlyModuleAzimuthAngle)\n\n solarIncidenceAngle = np.arccos(sinAlpha*cosS + cosAlpha*sinS*np.cos(hourlySolarAzimuthAngle - hourlyModuleAzimuthAngle))\n # print(\"solarIncidenceAngle:{}\".format(solarIncidenceAngle))\n return solarIncidenceAngle\n\ndef getMaxDirectBeamSolarRadiationKacira2003(hourlySolarAltitudeAngle, hourlyHorizontalDirectOuterSolarIrradiance, hourlyZenithAngle):\n '''\n calculate the max direct beam solar radiation\n :param hourlySolarAltitudeAngle: [rad]\n :param hourlyHorizontalDirectOuterSolarIrradiance: [W m^-2]\n :param hourlyZenithAngle: [rad]\n :return: maxDirectBeamSolarRadiation [W m^-2]\n '''\n # if the altitude angle is minus, it means it is night. Thus, the maxDirectBeamSolarRadiation becomes zero.\n maxDirectBeamSolarRadiation = np.zeros(hourlySolarAltitudeAngle.shape[0])\n\n # if the altitude angle is minus, it means it is night. Thus, the directBeamSolarRadiationNormalToTiltedOPV becomes zero.\n for i in range (0, hourlySolarAltitudeAngle.shape[0]):\n if hourlySolarAltitudeAngle[i] < 0:\n maxDirectBeamSolarRadiation[i] = 0\n else:\n maxDirectBeamSolarRadiation[i] = hourlyHorizontalDirectOuterSolarIrradiance[i] / np.cos(hourlyZenithAngle[i])\n\n # print \"np.degrees(hourlyZenithAngle):{}\".format(np.degrees(hourlyZenithAngle))\n # print \"np.cos(hourlyZenithAngle):{}\".format(np.cos(hourlyZenithAngle))\n # print\"hourlyHorizontalDirectOuterSolarIrradiance:{}\".format(hourlyHorizontalDirectOuterSolarIrradiance)\n # print \"maxDirectBeamSolarRadiation:{}\".format(maxDirectBeamSolarRadiation)\n\n return maxDirectBeamSolarRadiation\n\n\ndef getDirectHorizontalSolarRadiation(hourlySolarAltitudeAngle, hourlyHorizontalSolarIncidenceAngle):\n '''\n calculate the direct solar radiation to horizontal surface. referred to Yano 2009 equation (6)\n :param hourlySolarAltitudeAngle: [rad]\n :param hourlyHorizontalSolarIncidenceAngle: [rad]\n :return: maxDirectBeamSolarRadiation [W m^-2]\n '''\n\n directHorizontalSolarRadiation = constant.solarConstant * (constant.atmosphericTransmissivity**(1.0/np.sin(hourlySolarAltitudeAngle))) * \\\n np.sin(hourlySolarAltitudeAngle)\n\n # TODO maybe the condition of if statement is not incidence angle but azimuth angle(alpha)\n # if the incidence angle is >|90| the radiation is 0\n for i in range (0, hourlyHorizontalSolarIncidenceAngle.shape[0]):\n # if abs(hourlyHorizontalSolarIncidenceAngle[i]) >= math.pi / 2.0:\n if hourlySolarAltitudeAngle[i] < 0.0:\n directHorizontalSolarRadiation[i] = 0.0\n\n return directHorizontalSolarRadiation\n\ndef getDiffuseHorizontalSolarRadiation(hourlySolarAltitudeAngle, hourlyHorizontalSolarIncidenceAngle):\n '''\n calculate the diffuse solar radiation to horizontal surface. referred to Yano 2009 equation (7)\n :param hourlySolarAltitudeAngle: [rad]\n :return: maxDirectBeamSolarRadiation [W m^-2]\n '''\n diffuseHorizontalSolarRadiation = constant.solarConstant * np.sin(hourlySolarAltitudeAngle) * (1 - constant.atmosphericTransmissivity**(1.0/np.sin(hourlySolarAltitudeAngle))) \\\n / (2.0 * (1 - 1.4*np.log(constant.atmosphericTransmissivity)))\n\n # if the incidence angle is >|90| the radiation is 0\n for i in range (0, hourlyHorizontalSolarIncidenceAngle.shape[0]):\n # if abs(hourlyHorizontalSolarIncidenceAngle[i]) >= (math.pi) / 2.0:\n if hourlySolarAltitudeAngle[i] < 0.0:\n diffuseHorizontalSolarRadiation[i] = 0.0\n\n return diffuseHorizontalSolarRadiation\n\n\n# def getDirectTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, hourlySolarIncidenceAngle, hourlyHorizontalDirectOuterSolarIrradiance = \\\n# np.zeros(util.calcSimulationDaysInt() * constant.hourperDay)):\ndef getDirectTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, hourlySolarIncidenceAngle, hourlyHorizontalDirectOuterSolarIrradiance):\n '''\n calculate the direct solar radiation to tilted surface. referred to Yano 2009 for both cases\n '''\n\n # if we estimate the solar radiation (calculate the solar radiation without real data), get into this statement\n if simulatorClass.getEstimateSolarRadiationMode() == True:\n # if (hourlyHorizontalDirectOuterSolarIrradiance == 0.0).all():\n\n # print(\"at OPVFilm.getDirectTitledSolarRadiation, hourlyHorizontalDirectOuterSolarIrradiance does not have data\")\n # if there is no data about solar radiation, assume the solar radiation from the following equation, which is cited from A. Yano, \"electrical energy generated by photovoltaic modules mounted inside the roof of a north-south oriented greenhouse\", 2009\n directTiltedSolarRadiation = constant.solarConstant * (constant.atmosphericTransmissivity ** (1.0 / np.sin(hourlySolarAltitudeAngle))) * \\\n np.cos(hourlySolarIncidenceAngle)\n\n # direct tilted solar radiation is defined as 0 when the incidence angle is >90 or <-90 (|incidence angle| > 90 degree)\n for i in range (0, hourlySolarIncidenceAngle.shape[0]):\n # if abs(hourlySolarIncidenceAngle[i]) >= (math.pi) / 2.0:\n if hourlySolarAltitudeAngle[i] < 0.0 or directTiltedSolarRadiation[i] < 0.0:\n directTiltedSolarRadiation[i] = 0.0\n\n # np.set_printoptions(threshold=np.inf)\n # print (\"directTiltedSolarRadiation: {}\".format(directTiltedSolarRadiation))\n # np.set_printoptions(threshold=1000)\n\n\n return directTiltedSolarRadiation\n\n # if we calculate the solar radiation with real data, get into this statement\n else:\n directTiltedSolarRadiation = constant.solarConstant * (constant.atmosphericTransmissivity ** (1.0 / np.sin(hourlySolarAltitudeAngle))) * \\\n np.cos(hourlySolarIncidenceAngle)\n\n # direct tilted solar radiation is defined as 0 when the incidence angle is >90 or <-90 (|incidence angle| > 90 degree)\n for i in range (0, hourlySolarIncidenceAngle.shape[0]):\n # if abs(hourlySolarIncidenceAngle[i]) >= (math.pi) / 2.0:\n if hourlySolarAltitudeAngle[i] < 0.0 or directTiltedSolarRadiation[i] < 0.0:\n directTiltedSolarRadiation[i] = 0.0\n\n # print (\"directTiltedSolarRadiation:{}\".format(directTiltedSolarRadiation))\n\n\n # TODO: delete the following process if not necessary\n ############ outliers data correction start ############\n # when this value is 2, then average just 1 hour before and after the ith hour\n averageHourRange = 2\n numElBefore = averageHourRange -1\n numElAfter = averageHourRange\n # [W m^-2]\n outlierLimitation = 1100.0\n print (\"max light intensity:{}\".format(np.max(directTiltedSolarRadiation)))\n print (\"mean light intensity:{}\".format(np.mean(directTiltedSolarRadiation)))\n\n #First correction. If the solar radiations binding the outlier are zero, then set the outlier zero.\n for i in range (1, hourlySolarIncidenceAngle.shape[0]-1):\n # if directTiltedSolarRadiation[i] > outlierLimitation and (directTiltedSolarRadiation[i -1] == 0.) and (directTiltedSolarRadiation[i + 1] == 0.):\n if (directTiltedSolarRadiation[i -1] == 0.) and (directTiltedSolarRadiation[i + 1] == 0.):\n directTiltedSolarRadiation[i] = 0.0\n\n # Second correction. If the solar radiations binding the outlier are not zero, then set the outlier the average of solar radiations 1 hour after and before the outlier hour.\n # print (\"outlier indices before correction:{}\".format([i for i, x in enumerate(directTiltedSolarRadiation) if x > 1.2 * (np.sum(directTiltedSolarRadiation[i-numElBefore :i+numElAfter]) - directTiltedSolarRadiation[i]) / (numElBefore+numElAfter -1.0)]))\n print (\"outlier indices before correction:{}\".format([i for i, x in enumerate(directTiltedSolarRadiation) if x > outlierLimitation]))\n # this correction was made because some outliers occured by the calculation of directTiltedSolarRadiation (= hourlyHorizontalDirectOuterSolarIrradiance * np.cos(hourlySolarIncidenceAngle) / np.sin(hourlySolarAltitudeAngle))\n # \"np.cos(hourlySolarIncidenceAngle) / np.sin(hourlySolarAltitudeAngle)\" can be very larger number like 2000\n # if a light intensity [W /m^2] at a certain hour is more than the maximum light intensity by 20%, the value is replaced by the average light intensity before/after 3 hours\n # directTiltedSolarRadiationExcludingOutliers = np.array([(np.sum(directTiltedSolarRadiation[i-numElBefore :i+numElAfter]) - directTiltedSolarRadiation[i]) / float(numElBefore+numElAfter -1.0) if\\\n # x > 1.5 * (np.sum(directTiltedSolarRadiation[i-numElBefore :i+numElAfter]) - directTiltedSolarRadiation[i]) / (numElBefore+numElAfter -1.0) \\\n # else x for i, x in enumerate(directTiltedSolarRadiation) ])\n directTiltedSolarRadiationExcludingOutliers = np.array([(np.sum(directTiltedSolarRadiation[i-numElBefore :i+numElAfter]) - directTiltedSolarRadiation[i]) / float(numElBefore+numElAfter -1.0) if\\\n x > outlierLimitation else x for i, x in enumerate(directTiltedSolarRadiation) ])\n\n print (\"outlier indices after correction:{}\".format([i for i, x in enumerate(directTiltedSolarRadiationExcludingOutliers) if x > outlierLimitation]))\n ############ outliers data correction end ############\n\n # print (\"outlier indices after correction:{}\".format([i for i, x in enumerate(directTiltedSolarRadiationExcludingOutliers) if x > 1.2 * (np.sum(directTiltedSolarRadiationExcludingOutliers[i-numElBefore :i+numElAfter]) - directTiltedSolarRadiationExcludingOutliers[i]) / (numElBefore+numElAfter -1.0)]))\n # print \"hourlySolarIncidenceAngle:{}\".format(hourlySolarIncidenceAngle)\n\n # print \"directTiltedSolarRadiationExcludingOutliers:{}\".format(directTiltedSolarRadiationExcludingOutliers)\n\n # return directTiltedSolarRadiation\n return directTiltedSolarRadiationExcludingOutliers\n\n\ndef getDiffuseTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, estimatedDiffuseHorizontalSolarRadiation, hourlyHorizontalDiffuseOuterSolarIrradiance = \\\n np.zeros(util.calcSimulationDaysInt() * constant.hourperDay)):\n '''\n calculate the diffuse solar radiation to tilted surface. referred to Yano 2009\n :param hourlySolarAltitudeAngle: [rad]\n :param diffuseHorizontalSolarRadiation: [W m^-2]\n\n :return: maxDirectBeamSolarRadiation [W m^-2]\n '''\n\n # if (hourlyHorizontalDiffuseOuterSolarIrradiance == 0.0).all():\n if simulatorClass.getEstimateSolarRadiationMode() == True:\n\n print(\"at OPVFilm.getDiffuseTitledSolarRadiation, hourlyHorizontalDiffuseOuterSolarIrradiance does not have data\")\n # if there is no data about solar radiation, assume the solar radiation from the following equation, which is cited from A. Yano, \"electrical energy generated by photovoltaic modules mounted inside the roof of a north-south oriented greenhouse\", 2009\n diffuseTiltedSolarRadiation = estimatedDiffuseHorizontalSolarRadiation * (1 + np.cos(constant.OPVAngle)) / 2.0\n else:\n print(\"at OPVFilm.getDiffuseTitledSolarRadiation, hourlyHorizontalDiffuseOuterSolarIrradiance has data\")\n diffuseTiltedSolarRadiation = hourlyHorizontalDiffuseOuterSolarIrradiance * (1 + np.cos(constant.OPVAngle)) / 2.0\n\n # diffuse tilted solar radiation is defined as 0 when the elevation/altitude angle is <= 0\n for i in range (0, hourlySolarAltitudeAngle.shape[0]):\n if hourlySolarAltitudeAngle[i] < 0.0:\n diffuseTiltedSolarRadiation[i] = 0.0\n\n return diffuseTiltedSolarRadiation\n\n\ndef getAlbedoTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, estimatedTotalHorizontalSolarRadiation, hourlyHorizontalTotalOuterSolarIrradiance = \\\n np.zeros(util.calcSimulationDaysInt() * constant.hourperDay)):\n '''\n calculate the albedo (reflection) solar radiation to tilted surface. referred to Yano 2009\n :param diffuseHorizontalSolarRadiation: [W m^-2]\n :param hourlySolarAltitudeAngle: [rad]\n :return: totalHorizontalSolarRadiation [W m^-2]\n '''\n\n # if (hourlyHorizontalTotalOuterSolarIrradiance == 0.0).all():\n if simulatorClass.getEstimateSolarRadiationMode() == True:\n\n print(\"at OPVFilm.getAlbedoTitledSolarRadiation, getAlbedoTitledSolarRadiation does not have data\")\n # if there is no data about solar radiation, assume the solar radiation from the following equation, which is cited from A. Yano, \"electrical energy generated by photovoltaic modules mounted inside the roof of a north-south oriented greenhouse\", 2009\n albedoTiltedSolarRadiation = constant.groundReflectance * estimatedTotalHorizontalSolarRadiation * (1 - np.cos(constant.OPVAngle)) / 2.0\n else:\n print(\"at OPVFilm.getAlbedoTitledSolarRadiation, getAlbedoTitledSolarRadiation has data\")\n albedoTiltedSolarRadiation = constant.groundReflectance * hourlyHorizontalTotalOuterSolarIrradiance * (1 - np.cos(constant.OPVAngle)) / 2.0\n\n # diffuse tilted solar radiation is defined as 0 when the elevation/altitude angle is <= 0\n for i in range (0, hourlySolarAltitudeAngle.shape[0]):\n # if abs(hourlySolarAltitudeAngle[i]) <= 0.0:\n if hourlySolarAltitudeAngle[i] < 0.0:\n albedoTiltedSolarRadiation[i] = 0.0\n\n return albedoTiltedSolarRadiation\n\n\ndef calcDirectBeamSolarRadiationNormalToTiltedOPVKacira2003(hourlySolarAltitudeAngle,maxDirectBeamSolarRadiation, hourlySolarIncidenceAngle):\n '''\n calculate the max direct beam solar radiation perpendicular to the tilted OPV\n :param hourlySolarAltitudeAngle: [rad]\n :param maxDirectBeamSolarRadiation: [W m^-2]\n :param hourlySolarIncidenceAngle: [rad]\n :return: directBeamSolarRadiationNormalToTiltedOPV [W m^-2]\n '''\n\n directBeamSolarRadiationNormalToTiltedOPV = np.zeros(hourlySolarAltitudeAngle.shape[0])\n # print \"directBeamSolarRadiationNormalToTiltedOPV:{}\".format(directBeamSolarRadiationNormalToTiltedOPV)\n\n # if the altitude angle is minus, it means it is night. Thus, the directBeamSolarRadiationNormalToTiltedOPV becomes zero.\n for i in range (0, hourlySolarAltitudeAngle.shape[0]):\n if hourlySolarAltitudeAngle[i] < 0.0:\n directBeamSolarRadiationNormalToTiltedOPV[i] = 0.0\n else:\n directBeamSolarRadiationNormalToTiltedOPV[i] = maxDirectBeamSolarRadiation[i] * np.cos(hourlySolarIncidenceAngle[i])\n\n # print\"maxDirectBeamSolarRadiation:{}\".format(maxDirectBeamSolarRadiation)\n # print \"np.degrees(hourlySolarIncidenceAngle):{}\".format(np.degrees(hourlySolarIncidenceAngle))\n\n\n return directBeamSolarRadiationNormalToTiltedOPV\n\ndef calcOPVElectricEnergyperArea(simulatorClass, hourlyOPVTemperature, solarRadiationToOPV):\n '''\n calculate the electric energy per OPV area during the defined days [J/day/m^2]\n :param hourlyOPVTemperature:\n :param Popvin:\n :return:\n '''\n # print(\"at OPVfilm.py, hourlyOPVTemperature.shape:{}\".format(hourlyOPVTemperature.shape))\n\n dailyJopvout = np.zeros(util.calcSimulationDaysInt())\n\n # make the list of OPV coverage ratio at each hour changing during summer\n # OPVAreaCoverageRatioChangingInSummer = getDifferentOPVCoverageRatioInSummerPeriod(constant.OPVAreaCoverageRatio, simulatorClass)\n\n # the PV module degradation ratio by time\n PVDegradationRatio = np.array([1.0 - constant.PVDegradationRatioPerHour * i for i in range (0, hourlyOPVTemperature.shape[0])])\n\n for day in range (0, util.calcSimulationDaysInt()):\n # print \"day:{}, Popvin[day*constant.hourperDay : (day+1)*constant.hourperDay]:{}\".format(day, Popvin[day*constant.hourperDay : (day+1)*constant.hourperDay])\n # unit: [W/m^2] -> [J/m^2] per day\n dailyJopvout[day] = calcOPVElectricEnergyperAreaperDay(\\\n hourlyOPVTemperature[day*constant.hourperDay : (day+1)*constant.hourperDay], \\\n solarRadiationToOPV[day*constant.hourperDay : (day+1)*constant.hourperDay], PVDegradationRatio[day*constant.hourperDay : (day+1)*constant.hourperDay])\n\n\n return dailyJopvout\n\ndef calcOPVElectricEnergyperAreaperDay(hourlyOPVTemperature, Popvin, PVDegradationRatio):\n '''\n calculate the electric energy per OPV area only for a day [J/day/m^2]\n [the electric energy per OPV area per day]\n W_opvout=0.033∫_(t_sunrise)^(t_sunset) (1+C_Voctemp * (T_opv - 25[°C]))(1+ C_Isctemp * (T_opv - 25[°C])) * P_opvin\n param: Popvin: Hourly Outer Light Intensity [Watt/m^2] = [J/second/m^2]\n param: HourlyOuterTemperature: Hourly Outer Temperature [Celsius/hour]\n return: Wopvout, scalar: OPV Electric Energy production per Area per day [J/day/m^2].\n '''\n #unit [W/m^2]\n JopvoutAday = 0\n\n ##change the unit from second to hour [J/second/m^2] -> [J/hour/m^2]\n Popvin = Popvin * 60.0 * 60.0\n\n #print\"int(constant.hourperDay):{}\".format(int(constant.hourperDay))\n #calculate the electric energy per OPV area (watt/m^2)\n for hour in range (0, int(constant.hourperDay)):\n # Jopvout += constant.OPVEfficiencyRatioSTC * constant.degradeCoefficientFromIdealtoReal * \\\n # (1.0 + constant.TempCoeffitientVmpp * (hourlyOPVTemperature[hour] - constant.STCtemperature)) * \\\n # (1.0 + constant.TempCoeffitientImpp * (hourlyOPVTemperature[hour] - constant.STCtemperature)) * \\\n # Popvin [hour]\n JopvoutAday += constant.OPVEfficiencyRatioSTC * constant.degradeCoefficientFromIdealtoReal * PVDegradationRatio[hour] * \\\n (1.0 + constant.TempCoeffitientPmpp * (hourlyOPVTemperature[hour] - constant.STCtemperature)) * Popvin[hour]\n\n #print \"Jopvout:{} (J/m^2)\".format(Wopvout)\n return JopvoutAday\n\ndef getMonthlyElectricityProductionFromDailyData (dailyElectricityPerArea, yearOfeachDay, monthOfeachDay):\n '''\n summing the daily electricity produce to monthly produce\n '''\n # print \"yearOfeachDay:{}\".format(yearOfeachDay)\n # print \"monthOfeachDay:{}\".format(monthOfeachDay)\n\n numOfMonths = util.getSimulationMonthsInt()\n # print \"numOfMonths:{}\".format(numOfMonths)\n monthlyElectricityPerArea = np.zeros(numOfMonths)\n month = 0\n # insert the initial value\n monthlyElectricityPerArea[month] += dailyElectricityPerArea[0]\n for day in range (1, util.calcSimulationDaysInt()):\n if monthOfeachDay[day-1] != monthOfeachDay[day]:\n month += 1\n monthlyElectricityPerArea[month] += dailyElectricityPerArea[day]\n\n return monthlyElectricityPerArea\n\ndef getMonthlyElectricitySalesperArea(monthlyKWhopvoutperArea, monthlyResidentialElectricityPrice):\n '''\n\n :param monthlyKWhopvoutperArea:\n :param monthlyResidentialElectricityPrice:\n :return: [USD/month/m^2]\n '''\n # devided by 100 to convert [USCent] into [USD]\n return monthlyKWhopvoutperArea * (monthlyResidentialElectricityPrice / 100.0)\n\n\ndef calcRevenueOfElectricityProductionperMonth(WopvoutperHarvestperMonth, monthlyAverageElectricityPrice):\n '''\n calculate the electric energy wholesale price per month.\n param: WopvoutperHarvestperMonth: electric energy produced per month with given area [J/month]\n param: monthlyTucsonAverageWholesalelPriceOfElectricity: wholesale price of electricity in Tucson [USD/Mega Watt hour (MWh) for each month]\n\n return: revenueOfElectricityProductionperMonth, scalar: the wholesale price revenue of electricity per month (USD/month).\n '''\n #unit conversion [J/month = Watt*sec/month -> MWh/month]\n #WopvoutperHarvestMWhperMonth = WopvoutperHarvestperMonth * 3600.0 / (10.0**6)\n WopvoutperHarvestMWhperMonth = WopvoutperHarvestperMonth / (10.0**6) / 3600.0\n #print \"WopvoutperHarvestMWhperMonth:{}\".format(WopvoutperHarvestMWhperMonth)\n #print \"monthlyTucsonAverageWholesalelPriceOfElectricity:{}\".format(monthlyTucsonAverageWholesalelPriceOfElectricity)\n\n #unit: USD/month\n revenueOfElectricityProductionperMonth = WopvoutperHarvestMWhperMonth * monthlyAverageElectricityPrice\n #print \"revenueOfElectricityProductionperMonth(USD/month):{}\".format(revenueOfElectricityProductionperMonth)\n\n return revenueOfElectricityProductionperMonth\n\n\ndef calcCostofElectricityProduction(OPVArea):\n '''\n calculate the cost to install OPV film\n\n param: OPVArea: area of OPV film (m^2)\n\n return: cost for the OPV film (USD)\n\n '''\n return OPVArea * constant.OPVPricePerAreaUSD\n\n\ndef getDirectSolarIrradianceBeforeShadingCurtain(simulatorClass):\n\n # get the direct solar irradiance after penetrating multi span roof [W/m^2]\n hourlyDirectSolarRadiationAfterMultiSpanRoof = simulatorClass.getHourlyDirectSolarRadiationAfterMultiSpanRoof()\n\n # OPVAreaCoverageRatio = simulatorClass.getOPVAreaCoverageRatio()\n OPVAreaCoverageRatio = constant.OPVAreaCoverageRatio\n # ShadingCurtainDeployPPFD = simulatorClass.getShadingCurtainDeployPPFD()\n OPVPARTransmittance = constant.OPVPARTransmittance\n\n # make the list of OPV coverage ratio at each hour changing during summer\n OPVAreaCoverageRatioChangingInSummer = getDifferentOPVCoverageRatioInSummerPeriod(OPVAreaCoverageRatio, simulatorClass)\n # print(\"OPVAreaCoverageRatioChangingInSummer:{}\".format(OPVAreaCoverageRatioChangingInSummer))\n\n # consider the transmission ratio of OPV film\n hourlyDirectSolarRadiationAfterOPVAndRoof = hourlyDirectSolarRadiationAfterMultiSpanRoof * (1 - OPVAreaCoverageRatioChangingInSummer) \\\n + hourlyDirectSolarRadiationAfterMultiSpanRoof * OPVAreaCoverageRatioChangingInSummer * OPVPARTransmittance\n # print \"OPVAreaCoverageRatio:{}, HourlyInnerLightIntensityPPFDThroughOPV:{}\".format(OPVAreaCoverageRatio, HourlyInnerLightIntensityPPFDThroughOPV)\n\n # consider the light reduction by greenhouse inner structures and equipments like pipes, poles and gutters\n hourlyDirectSolarRadiationAfterInnerStructure = (1 - constant.GreenhouseShadeProportionByInnerStructures) * hourlyDirectSolarRadiationAfterOPVAndRoof\n # print \"hourlyInnerLightIntensityPPFDThroughInnerStructure:{}\".format(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n\n return hourlyDirectSolarRadiationAfterInnerStructure\n\n\ndef getDirectSolarIrradianceToPlants(simulatorClass, hourlyDirectSolarRadiationAfterInnerStructure):\n\n hasShadingCurtain = simulatorClass.getIfHasShadingCurtain()\n\n # take date and time\n year = simulatorClass.getYear()\n month = simulatorClass.getMonth()\n day = simulatorClass.getDay()\n hour = simulatorClass.getHour()\n\n # array storing the light intensity after penetrating the shading curtain\n hourlyDirectSolarRadiationAfterShadingCurtain= np.zeros(hour.shape[0])\n\n # consider the shading curtain\n if hasShadingCurtain == True:\n # if the date is between the following time and date, discount the irradiance by the shading curtain transmittance.\n # if we assume the shading curtain is deployed all time for the given period,\n if constant.IsShadingCurtainDeployOnlyDayTime == False:\n for i in range(0, hour.shape[0]):\n if (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMSpring, constant.ShadingCurtainDeployStartDDSpring ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMSpring, constant.ShadingCurtainDeployEndDDSpring)) or\\\n (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMFall, constant.ShadingCurtainDeployStartDDFall ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMFall, constant.ShadingCurtainDeployEndDDFall)):\n # deploy the shading curtain\n hourlyDirectSolarRadiationAfterShadingCurtain[i] = hourlyDirectSolarRadiationAfterInnerStructure[i] * constant.shadingTransmittanceRatio\n\n else:\n # not deploy the curtain\n hourlyDirectSolarRadiationAfterShadingCurtain[i] = hourlyDirectSolarRadiationAfterInnerStructure[i]\n\n # if we assume the shading curtain is deployed only for a fixed hot time defined at the constant class in a day, use this\n elif constant.IsShadingCurtainDeployOnlyDayTime == True and constant.IsDifferentShadingCurtainDeployTimeEachMonth == False:\n\n for i in range(0, hour.shape[0]):\n if (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMSpring, constant.ShadingCurtainDeployStartDDSpring ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMSpring, constant.ShadingCurtainDeployEndDDSpring) and \\\n hour[i] >= constant.ShadigCuratinDeployStartHH and hour[i] <= constant.ShadigCuratinDeployEndHH) or\\\n (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMFall, constant.ShadingCurtainDeployStartDDFall ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMFall, constant.ShadingCurtainDeployEndDDFall) and \\\n hour[i] >= constant.ShadigCuratinDeployStartHH and hour[i] <= constant.ShadigCuratinDeployEndHH):\n # deploy the shading curtain\n hourlyDirectSolarRadiationAfterShadingCurtain[i] = hourlyDirectSolarRadiationAfterInnerStructure[i] * constant.shadingTransmittanceRatio\n\n else:\n # not deploy the curtain\n hourlyDirectSolarRadiationAfterShadingCurtain[i] = hourlyDirectSolarRadiationAfterInnerStructure[i]\n\n # if we assume the shading curtain is deployed for the time which dynamically changes each month, use this\n elif constant.IsShadingCurtainDeployOnlyDayTime == True and constant.IsDifferentShadingCurtainDeployTimeEachMonth == True:\n\n # having shading curtain transmittance each hour. 1 = no shading curatin, the transmittance of shading curtain = deploy curtain\n transmittanceThroughShadingCurtainChangingEachMonth = simulatorClass.transmittanceThroughShadingCurtainChangingEachMonth\n\n hourlyDirectSolarRadiationAfterShadingCurtain = hourlyDirectSolarRadiationAfterInnerStructure * transmittanceThroughShadingCurtainChangingEachMonth\n\n return hourlyDirectSolarRadiationAfterShadingCurtain\n\ndef getDiffuseSolarIrradianceBeforeShadingCurtain(simulatorClass):\n\n # get the diffuse solar irradiance, which has not consider the transmittance of OPV film yet.\n # diffuseHorizontalSolarRadiation = simulatorClass.getDiffuseSolarRadiationToOPV()\n diffuseHorizontalSolarRadiation = simulatorClass.diffuseHorizontalSolarRadiation\n # print(\"diffuseHorizontalSolarRadiation.shape:{}\".format(diffuseHorizontalSolarRadiation.shape))\n\n # consider the influecne of the PV module on the roof\n overallRoofCoveringTrasmittance = constant.roofCoveringTransmittance * (1 - constant.OPVAreaCoverageRatio) + \\\n (constant.roofCoveringTransmittance + constant.OPVPARTransmittance) * constant.OPVAreaCoverageRatio\n # get the average transmittance through roof and sidewall\n transmittanceThroughWallAndRoof = (constant.greenhouseSideWallArea * constant.sideWallTransmittance + constant.greenhouseTotalRoofArea * overallRoofCoveringTrasmittance) \\\n / (constant.greenhouseSideWallArea + constant.greenhouseTotalRoofArea)\n # consider the light reflection by greenhouse inner structures and equipments like pipes, poles and gutters\n transmittanceThroughInnerStructure = (1 - constant.GreenhouseShadeProportionByInnerStructures) * transmittanceThroughWallAndRoof\n\n hourlyDiffuseSolarRadiationAfterShadingCurtain = diffuseHorizontalSolarRadiation * transmittanceThroughInnerStructure\n\n return hourlyDiffuseSolarRadiationAfterShadingCurtain\n\n\ndef getDiffuseSolarIrradianceToPlants(simulatorClass, hourlyDiffuseSolarRadiationAfterShadingCurtain):\n '''\n the diffuse solar radiation was calculated by multiplying the average transmittance of all covering materials of the greenhouse (side wall, roof and PV module\n '''\n\n transmittanceThroughShadingCurtainChangingEachMonth = simulatorClass.transmittanceThroughShadingCurtainChangingEachMonth\n # print(\"at OPVFilm, getDiffuseSolarIrradianceToPlants, transmittanceThroughShadingCurtainChangingEachMonth:{}\".format(transmittanceThroughShadingCurtainChangingEachMonth))\n\n # consider the influence of shading curtain\n transmittanceThroughShadingCurtain = (constant.greenhouseSideWallArea + transmittanceThroughShadingCurtainChangingEachMonth * constant.greenhouseFloorArea) / (constant.greenhouseSideWallArea + constant.greenhouseFloorArea)\n\n # get the diffuse solar irradiance after penetrating the greenhouse cover material\n diffuseSolarIrradianceToPlants = hourlyDiffuseSolarRadiationAfterShadingCurtain * transmittanceThroughShadingCurtain\n\n return diffuseSolarIrradianceToPlants\n\n\ndef calcHourlyInnerLightIntensityPPFD(HourlyOuterLightIntensityPPFD, OPVAreaCoverageRatio, OPVPARTransmissionRatio, hasShadingCurtain=False, \\\n shadingCurtainDeployPPFD=constant.shadingCurtainDeployPPFD, cropElectricityYieldSimulator1 = None):\n '''\n calculate the light intensity inside the greenhouse (inner light intensity) each hour\n\n param:HourlyOuterLightIntensityPPFD, vector: [μmol/m^2/s]\n param:OPVAreaCoverageRatio: the ratio that OPV film covers the roof\n param:OPVPARTransmissionRatio: the ratio of OPV film light transmittance\n param:hasShadingCurtain:\n param:shadingCurtainDeployPPFD: the baseline of shading curtain opening/closing\n param:cropElectricityYieldSimulator1: instance\n\n return: vector: [μmol/m^2/s]\n '''\n\n #consider the transmittance ratio of glazing\n InnerLightIntensityPPFDThroughGlazing = constant.dobulePERTransmittance * HourlyOuterLightIntensityPPFD\n # print \"OPVAreaCoverageRatio:{}, HourlyOuterLightIntensityPPFD:{}\".format(OPVAreaCoverageRatio, HourlyOuterLightIntensityPPFD)\n # print \"OPVAreaCoverageRatio:{}, InnerLightIntensityPPFDThroughGlazing:{}\".format(OPVAreaCoverageRatio, InnerLightIntensityPPFDThroughGlazing)\n\n # make the list of OPV coverage ratio at each hour fixing the ratio during summer\n oPVAreaCoverageRatioFixingInSummer = getDifferentOPVCoverageRatioInSummerPeriod(OPVAreaCoverageRatio, cropElectricityYieldSimulator1)\n\n # TODO the light intensity decrease by OPV film will be considered in calculating the solar iiradiance to multispan roof. move this calculation to CropElecricityYieldSimulationDetail.getSolarIrradianceToMultiSpanRoof in the future.\n #consider the transmission ratio of OPV film\n HourlyInnerLightIntensityPPFDThroughOPV = InnerLightIntensityPPFDThroughGlazing * (1 - oPVAreaCoverageRatioFixingInSummer) + InnerLightIntensityPPFDThroughGlazing * oPVAreaCoverageRatioFixingInSummer * OPVPARTransmissionRatio\n # print \"OPVAreaCoverageRatio:{}, HourlyInnerLightIntensityPPFDThroughOPV:{}\".format(OPVAreaCoverageRatio, HourlyInnerLightIntensityPPFDThroughOPV)\n\n #consider the light reflection by greenhouse inner structures and equipments like pipes, poles and gutters\n hourlyInnerLightIntensityPPFDThroughInnerStructure = (1 - constant.GreenhouseShadeProportionByInnerStructures) * HourlyInnerLightIntensityPPFDThroughOPV\n # print \"hourlyInnerLightIntensityPPFDThroughInnerStructure:{}\".format(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n # set the value to the instance\n # cropElectricityYieldSimulator1.setHourlyInnerLightIntensityPPFDThroughInnerStructure(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n\n # set the value to the instance\n cropElectricityYieldSimulator1.setHourlyInnerLightIntensityPPFDThroughGlazing(InnerLightIntensityPPFDThroughGlazing)\n cropElectricityYieldSimulator1.setHourlyInnerLightIntensityPPFDThroughInnerStructure(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n\n # take date and time\n year = cropElectricityYieldSimulator1.getYear()\n month = cropElectricityYieldSimulator1.getMonth()\n day = cropElectricityYieldSimulator1.getDay()\n hour = cropElectricityYieldSimulator1.getHour()\n\n # array storing the light intensity after penetrating the shading curtain\n hourlyInnerLightIntensityPPFDThroughShadingCurtain = np.zeros(hour.shape[0])\n # consider the shading curtain\n if hasShadingCurtain == True:\n # if te date is between the following time and date, then discount the PPFD by the shading curtain transmittance the information about date is taken from cropElectricityYieldSimulator1 object\n # if we assume the shading curtain is deployed whole day for the given period,\n if constant.IsShadingCurtainDeployOnlyDayTime == False:\n for i in range(0, hour.shape[0]):\n if (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMSpring, constant.ShadingCurtainDeployStartDDSpring ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMSpring, constant.ShadingCurtainDeployEndDDSpring)) or\\\n (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMFall, constant.ShadingCurtainDeployStartDDFall ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMFall, constant.ShadingCurtainDeployEndDDFall)):\n # deploy the shading curtain\n hourlyInnerLightIntensityPPFDThroughShadingCurtain[i] = hourlyInnerLightIntensityPPFDThroughInnerStructure[i] * constant.shadingTransmittanceRatio\n\n else:\n # not deploy the curtain\n hourlyInnerLightIntensityPPFDThroughShadingCurtain[i] = hourlyInnerLightIntensityPPFDThroughInnerStructure[i]\n\n return hourlyInnerLightIntensityPPFDThroughShadingCurtain\n\n # if we assume the shading curtain is deployed only for a certain hot time in a day,\n elif constant.IsShadingCurtainDeployOnlyDayTime == True:\n for i in range(0, hour.shape[0]):\n if (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMSpring, constant.ShadingCurtainDeployStartDDSpring ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMSpring, constant.ShadingCurtainDeployEndDDSpring) and \\\n hour[i] >= constant.ShadigCuratinDeployStartHH and hour[i] <= constant.ShadigCuratinDeployEndHH) or\\\n (datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.ShadingCurtainDeployStartMMFall, constant.ShadingCurtainDeployStartDDFall ) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.ShadingCurtainDeployEndMMFall, constant.ShadingCurtainDeployEndDDFall) and \\\n hour[i] >= constant.ShadigCuratinDeployStartHH and hour[i] <= constant.ShadigCuratinDeployEndHH):\n # deploy the shading curtain\n hourlyInnerLightIntensityPPFDThroughShadingCurtain[i] = hourlyInnerLightIntensityPPFDThroughInnerStructure[i] * constant.shadingTransmittanceRatio\n\n else:\n # not deploy the curtain\n hourlyInnerLightIntensityPPFDThroughShadingCurtain[i] = hourlyInnerLightIntensityPPFDThroughInnerStructure[i]\n\n return hourlyInnerLightIntensityPPFDThroughShadingCurtain\n\n # the shading curtain algorithm which open/close shading curatin each hour\n # if PPFD is over hourlyInnerLightIntensityPPFD, then deploy the shading curtain, and decrease PPFD. otehrwise leave it\n # hourlyInnerLightIntensityPPFDThroughShadingCurtain = np.array([x * constant.shadingTransmittanceRatio\\\n # if x > shadingCurtainDeployPPFD else x for x in hourlyInnerLightIntensityPPFDThroughInnerStructure])\n # print \"InnerLightIntensityPPFDThroughShadingCurtain:{}\".format(InnerLightIntensityPPFDThroughShadingCurtain)\n # return hourlyInnerLightIntensityPPFDThroughShadingCurtain\n\n # come to here when not considering shading curtain\n return hourlyInnerLightIntensityPPFDThroughInnerStructure\n\n\ndef getDifferentOPVCoverageRatioInSummerPeriod(OPVAreaCoverageRatio, simulatorClass):\n '''\n this function changes the opv coverage ratio during the summer period into the constant ratio defined at the constant class.,\n\n :param OPVPARTransmissionRatio:\n :param cropElectricityYieldSimulator1:\n :return:\n '''\n\n # take date and time\n year = simulatorClass.getYear()\n month = simulatorClass.getMonth()\n day = simulatorClass.getDay()\n\n OPVCoverageRatio = np.zeros(year.shape[0])\n\n for i in range(0, year.shape[0]):\n # if it is during the summer period when shading curtain is deployed.\n if datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.SummerPeriodStartMM, constant.SummerPeriodStartDD) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD):\n OPVCoverageRatio[i] = constant.OPVAreaCoverageRatioSummerPeriod\n else:\n OPVCoverageRatio[i] = OPVAreaCoverageRatio\n\n # set the array to the objec\n simulatorClass.OPVCoverageRatiosConsiderSummerRatio = OPVCoverageRatio\n # print(\"OPVCoverageRatio:{}\".format(OPVCoverageRatio))\n\n return OPVCoverageRatio\n" }, { "alpha_fraction": 0.6936440467834473, "alphanum_fraction": 0.6987959146499634, "avg_line_length": 63.44401550292969, "blob_id": "8e7b652dc567e2e85487458116da8c6ec70a2ff5", "content_id": "fd3bd283dadf4f9af6ba17aab650609e391f6374", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16693, "license_type": "permissive", "max_line_length": 173, "num_lines": 259, "path": "/CropElectricityYieldProfitMINLP.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "##########import package files##########\nfrom scipy import stats\nimport datetime\nimport sys\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport OPVFilm\n#import Lettuce\nimport CropElectricityYeildSimulatorDetail as simulatorDetail\nimport QlearningAgentShadingCurtain as QRLshadingCurtain\nimport SimulatorClass as SimulatorClass\n#######################################################\n\ndef simulateCropElectricityYieldProfitForMINLP():\n '''\n 1st simulator of crop and electricity yield and their profit\n :return: profitVSOPVCoverageData\n '''\n\n print (\"start modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\n\n # declare the class\n simulatorClass = SimulatorClass.SimulatorClass()\n\n ##########file import (TucsonHourlyOuterEinvironmentData) start##########\n fileName = \"20130101-20170101\" + \".csv\"\n year, \\\n month, \\\n day, \\\n hour, \\\n hourlyHorizontalDiffuseOuterSolarIrradiance, \\\n hourlyHorizontalTotalOuterSolarIrradiance, \\\n hourlyHorizontalDirectOuterSolarIrradiance, \\\n hourlyHorizontalTotalBeamMeterBodyTemperature, \\\n hourlyAirTemperature, simulatorClass = util.getArraysFromData(fileName, simulatorClass)\n ##########file import (TucsonHourlyOuterEinvironmentData) end##########\n\n # set the values to the object\n simulatorClass.setYear(year)\n simulatorClass.setMonth(month)\n simulatorClass.setDay(day)\n simulatorClass.setHour(hour)\n ##########file import (TucsonHourlyOuterEinvironmentData) end##########\n\n\n ##########solar irradiance to OPV calculation start##########\n # calculate with real data\n # hourly average [W m^-2]\n directSolarRadiationToOPVEastDirection, directSolarRadiationToOPVWestDirection, diffuseSolarRadiationToOPV, albedoSolarRadiationToOPV = \\\n simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(year, month, day, hour, hourlyHorizontalDiffuseOuterSolarIrradiance, \\\n hourlyHorizontalDirectOuterSolarIrradiance, \"EastWestDirectionRoof\")\n # [W m^-2] per hour\n totalSolarRadiationToOPV = (directSolarRadiationToOPVEastDirection + directSolarRadiationToOPVWestDirection) / 2.0 + diffuseSolarRadiationToOPV + albedoSolarRadiationToOPV\n\n # # calculate without real data.\n # simulatedDirectSolarRadiationToOPVEastDirection, \\\n # simulatedDirectSolarRadiationToOPVWestDirection, \\\n # simulatedDiffuseSolarRadiationToOPV, \\\n # simulatedAlbedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(year, month, day, hour)\n # # [W m^-2] per hour\n # simulatedTotalSolarRadiationToOPV = simulatedDirectSolarRadiationToOPVEastDirection + simulatedDirectSolarRadiationToOPVWestDirection + \\\n # simulatedDiffuseSolarRadiationToOPV + simulatedAlbedoSolarRadiationToOPV\n # print \"directSolarRadiationToOPV:{}\".format(directSolarRadiationToOPV)\n # print \"diffuseSolarRadiationToOPV:{}\".format(diffuseSolarRadiationToOPV)\n # print \"groundReflectedSolarradiationToOPV:{}\".format(groundReflectedSolarradiationToOPV)\n\n # unit change: [W m^-2] -> [umol m^-2 s^-1] == PPFD\n directPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVEastDirection)\n directPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVWestDirection)\n diffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(diffuseSolarRadiationToOPV)\n groundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(albedoSolarRadiationToOPV)\n totalPPFDToOPV = directPPFDToOPVEastDirection + directPPFDToOPVWestDirection + diffusePPFDToOPV + groundReflectedPPFDToOPV\n # print\"diffusePPFDToOPV.shape:{}\".format(diffusePPFDToOPV.shape)\n # set the matrix to the object\n simulatorClass.setDirectPPFDToOPVEastDirection(directPPFDToOPVEastDirection)\n simulatorClass.setDirectPPFDToOPVWestDirection(directPPFDToOPVWestDirection)\n simulatorClass.setDiffusePPFDToOPV(diffusePPFDToOPV)\n simulatorClass.setGroundReflectedPPFDToOPV(groundReflectedPPFDToOPV)\n\n # unit change: hourly [umol m^-2 s^-1] -> [mol m^-2 day^-1] == DLI :number of photons received in a square meter per day\n directDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVEastDirection)\n directDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVWestDirection)\n diffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(diffusePPFDToOPV)\n groundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(groundReflectedPPFDToOPV)\n totalDLIToOPV = directDLIToOPVEastDirection + directDLIToOPVWestDirection + diffuseDLIToOPV + groundReflectedDLIToOPV\n # print \"directDLIToOPVEastDirection:{}\".format(directDLIToOPVEastDirection)\n # print \"diffuseDLIToOPV.shape:{}\".format(diffuseDLIToOPV.shape)\n # print \"groundReflectedDLIToOPV:{}\".format(groundReflectedDLIToOPV)\n\n\n # ################## plot the difference of real data and simulated data start######################\n # Title = \"difference of the model output with real data and with no data\"\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"total Solar irradiance [W m^-2]\"\n # util.plotTwoData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\n # totalSolarRadiationToOPV, simulatedTotalSolarRadiationToOPV ,Title, xAxisLabel, yAxisLabel, \"with real data\", \"wth no data\")\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the difference of real data and simulated data end######################\n\n # ################## plot the distribution of direct and diffuse PPFD start######################\n # Title = \"TOTAL outer PPFD to OPV\"\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"PPFD [umol m^-2 s^-1]\"\n # util.plotData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\n # directPPFDToOPV + diffusePPFDToOPV + groundReflectedPPFDToOPV, Title, xAxisLabel, yAxisLabel)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of direct and diffuse PPFD end######################\n\n # ################## plot the distribution of direct and diffuse solar DLI start######################\n # Title = \"direct and diffuse outer DLI to OPV\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\n # y1Label = \"(directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0\"\n # y2Label = \"diffuseDLIToOPV\"\n # util.plotTwoData(np.linspace(0, simulationDaysInt, simulationDaysInt), (directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0, diffuseDLIToOPV, Title,\n # xAxisLabel, yAxisLabel, y1Label, y2Label)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of direct and diffuse solar DLI end######################\n\n # ################## plot the distribution of various DLI to OPV film start######################\n # Title = \"various DLI to OPV film\"\n # plotDataSet = np.array([directDLIToOPVEastDirection, directDLIToOPVWestDirection, diffuseDLIToOPV,\n # groundReflectedDLIToOPV])\n # labelList = np.array([\"directDLIToOPVEastDirection\", \"directDLIToOPVWestDirection\", \"diffuseDLIToOPV\",\n # \"groundReflectedDLIToOPV\"])\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\n # util.plotMultipleData(np.linspace(0, simulationDaysInt, simulationDaysInt), plotDataSet, labelList, Title,\n # xAxisLabel, yAxisLabel)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of various DLI to OPV film end######################\n\n ################## calculate the daily electricity yield per area start#####################\n # TODO maybe we need to consider the tilt of OPV and OPV material for the temperature of OPV film. right now, just use the measured temperature\n # get the daily electricity yield per area per day ([J/m^2] per day) based on the given light intensity ([Celsius],[W/m^2]).\n dailyJopvoutperArea = simulatorDetail.calcDailyElectricityYieldSimulationperArea(hourlyHorizontalTotalBeamMeterBodyTemperature, \\\n directSolarRadiationToOPVEastDirection + directSolarRadiationToOPVWestDirection,\n diffuseSolarRadiationToOPV,\n albedoSolarRadiationToOPV)\n\n # unit Exchange [J/m^2] -> [wh / m^2]\n dailyWhopvoutperArea = util.convertFromJouleToWattHour(dailyJopvoutperArea)\n # unit Exchange [Wh/ m^2] -> [kWh/m^2]\n dailykWhopvoutperArea = util.convertWhTokWh(dailyWhopvoutperArea)\n # ################### plot the electricity yield per area with given OPV film\n # title = \"electricity yield per area vs OPV film\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"Electricity yield per OPV area [kWh/m^2/day]\"\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), dailykWhopvoutperArea, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## calculate the daily electricity yield per area end#####################\n\n ################## calculate the daily electricity sales start#####################\n # convert the year of each hour to the year to each day\n yearOfeachDay = year[::24]\n # convert the month of each hour to the month to each day\n monthOfeachDay = month[::24]\n # get the monthly electricity sales per area [USD/month/m^2]\n monthlyElectricitySalesperArea = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperArea, yearOfeachDay, monthOfeachDay)\n # set the value to the object\n simulatorClass.setMonthlyElectricitySalesperArea(monthlyElectricitySalesperArea)\n # print \"simulatorClass.getMonthlyElectricitySalesperArea():{}\".format(simulatorClass.getMonthlyElectricitySalesperArea())\n ################## calculate the daily electricity sales end#####################\n\n ##################calculate the electricity cost per area start######################################\n # initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)\n # # [USD]\n # OPVCostUSDForDepreciation = initialOPVCostUSD * (simulationDaysInt / constant.OPVDepreciationPeriodDays)\n # # set the value to the object\n # simulatorClass.setOPVCostUSDForDepreciationperArea(OPVCostUSDForDepreciation / OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio))\n if constant.ifConsiderOPVCost is True:\n initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)\n # [USD]\n OPVCostUSDForDepreciation = initialOPVCostUSD * (util.getSimulationDaysInt() / constant.OPVDepreciationPeriodDays)\n # set the value to the object\n simulatorClass.setOPVCostUSDForDepreciationperArea(\n OPVCostUSDForDepreciation / OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio))\n else:\n # set the value to the object. the value is zero if not consider the purchase cost\n simulatorClass.setOPVCostUSDForDepreciationperArea(0.0)\n ##################calculate the electricity cost per area end######################################\n\n ################## calculate the daily plant yield start#####################\n # [String]\n plantGrowthModel = constant.TaylorExpantionWithFluctuatingDLI\n # cultivation days per harvest [days/harvest]\n cultivationDaysperHarvest = constant.cultivationDaysperHarvest\n # OPV coverage ratio [-]\n OPVCoverage = constant.OPVAreaCoverageRatio\n # boolean\n hasShadingCurtain = constant.hasShadingCurtain\n # PPFD [umol m^-2 s^-1]\n ShadingCurtainDeployPPFD = constant.ShadingCurtainDeployPPFD\n\n # calculate plant yield given an OPV coverage and model :daily [g/unit]\n shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitDailyHarvestedFreshWeight = \\\n simulatorDetail.calcPlantYieldSimulation(plantGrowthModel, cultivationDaysperHarvest, OPVCoverage, \\\n (directPPFDToOPVEastDirection + directPPFDToOPVWestDirection) / 2.0, diffusePPFDToOPV, groundReflectedPPFDToOPV,\n hasShadingCurtain, ShadingCurtainDeployPPFD, simulatorClass)\n\n # the DLI to plants [mol/m^2/day]\n TotalDLItoPlants = simulatorDetail.getTotalDLIToPlants(OPVCoverage, (directPPFDToOPVEastDirection + directPPFDToOPVWestDirection) / 2.0, diffusePPFDToOPV,\n groundReflectedPPFDToOPV, \\\n hasShadingCurtain, ShadingCurtainDeployPPFD, simulatorClass)\n # print \"TotalDLItoPlants:{}\".format(TotalDLItoPlants)\n # print \"TotalDLItoPlants.shape:{}\".format(TotalDLItoPlants.shape)\n # set the value to the instance\n simulatorClass.setTotalDLItoPlantsBaselineShadingCuratin(TotalDLItoPlants)\n\n # ######################### plot a graph showing only shootFreshMassList per unit\n # title = \"plant yield per head vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"plant fresh weight[g/head]\"\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), shootFreshMassList, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n # # unit conversion; get the plant yield per day per area: [g/unit] -> [g/m^2]\n # shootFreshMassListperArea = util.convertUnitShootFreshMassToShootFreshMassperArea(shootFreshMassList)\n # # unit conversion: [g/m^2] -> [kg/m^2]\n # shootFreshMassListperAreaKg = util.convertFromgramTokilogram(shootFreshMassListperArea)\n # ######################## plot a graph showing only shootFreshMassList per square meter\n # title = \"plant yield per area vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"plant fresh weight[kg/m^2]\"\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), shootFreshMassListperAreaKg, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ######################################################################\n\n # ################## plot various unit Plant Yield vs time\n # plotDataSet = np.array([shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitDailyHarvestedFreshWeight])\n # labelList = np.array([\"shootFreshMassList\", \"unitDailyFreshWeightIncrease\", \"accumulatedUnitDailyFreshWeightIncrease\", \"unitDailyHarvestedFreshWeight\"])\n # title = \"Various unit Plant Yield vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"Unit plant Fresh Weight [g/unit]\"\n # util.plotMultipleData(np.linspace(0, simulationDaysInt, simulationDaysInt), plotDataSet, labelList, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n ################## calculate the daily plant yield end#####################\n\n\n ################## calculate the daily plant sales start#####################\n\n ################## calculate the daily plant sales end#####################\n\n\n ################## calculate the daily plant cost start#####################\n\n ################## calculate the daily plant cost end#####################\n\n\n print (\"end modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\n\n return simulatorClass\n\n\n" }, { "alpha_fraction": 0.6572214365005493, "alphanum_fraction": 0.6823068857192993, "avg_line_length": 46.42292404174805, "blob_id": "43b5d697cba7cb460e432ba83c7f6289980fa323", "content_id": "62f635b697b86bd5b405e200fb5b43c30982c874", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11999, "license_type": "permissive", "max_line_length": 334, "num_lines": 253, "path": "/PlantGrowthModelS_Pearson1997.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n##########import package files##########\nimport sys\nimport os as os\nimport numpy as np\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport PlantGrowthModelS_Pearson1997Constant as PearsonConstant\nimport Lettuce\n#######################################################\n\n# command to show all array data\n# np.set_printoptions(threshold=np.inf)\n# print (\"hourlyHorizontalDirectOuterSolarIrradiance:{}\".format(hourlyHorizontalDirectOuterSolarIrradiance))\n# np.set_printoptions(threshold=1000)\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n\ndef calcUnitDailyFreshWeightS_Pearson1997(simulatorClass):\n\t'''\n\treference: S. Pearson, T. R. Wheeler, P. Hadley & A. E. Wheldon, 1997, \"A validated model to predict the effects of environment on the growth of lettuce (Lactuca sativa L.): Implications for climate change\"\n\thttps://www.researchgate.net/publication/286938495_A_validated_model_to_predict_the_effects_of_environment_on_the_growth_of_lettuce_Lactuca_sativa_L_Implications_for_climate_change\n\t\t\t\t\t\tD. G. SWEENEY, D. W. HAND, G. SLACK AND J. H. M. THORNLEY, 1981, \"Modelling the Growth of Winter Lettuce\": http://agris.fao.org/agris-search/search.do?recordID=US201301972216\n\n\t:return:\n\t'''\n\n\t# cultivationDaysperHarvest = simulatorClass.getCultivationDaysperHarvest()\n\t# OPVAreaCoverageRatio = simulatorClass.getOPVAreaCoverageRatio()\n\t# hasShadingCurtain = simulatorClass.getIfHasShadingCurtain()\n\t# ShadingCurtainDeployPPFD = simulatorClass.getShadingCurtainDeployPPFD()\n\n\tsimulationDaysInt = util.getSimulationDaysInt()\n\n\t##################################\n\t##### input variables start #####\n\t##################################\n\t# Temperature [Clusius]\n\t# it was assumed that the canopy temperature is instantaneously adjusted to the setpoint temperature at each hour.\n\tT_a = Lettuce.getGreenhouseTemperatureEachDay(simulatorClass)\n\n\t# Horizontal irradiance above the canopy (PAR) [W/m^2]\n\tdirectSolarIrradianceToPlants = simulatorClass.directSolarIrradianceToPlants\n\tdiffuseSolarIrradianceToPlants = simulatorClass.diffuseSolarIrradianceToPlants\n\ttotalSolarIrradianceToPlants = directSolarIrradianceToPlants + diffuseSolarIrradianceToPlants\n\t# By dividing the irradiance by 2, the shortwave radiation is converted into PAR [W/m^2]\n\ttotalSolarIrradianceToPlantsPAR = totalSolarIrradianceToPlants/2.0\n\n\t# convert the unit from [average W / m^2 each hour] to [J / m^2 each day]\n\tJ = util.convertWattPerSquareMeterEachHourToJoulePerSaureMeterEachDay(totalSolarIrradianceToPlantsPAR)\n\n\t# CO_2 concentration [kg(CO_2) / m^3]\n\t# it is temporarily assumed that all hourly CO2 concentration is 400 [ppm] = 775 [kg / m^3] 355 [ppm] = 688 [kg / m^3]\n\t# You can convert the unit of CO2 concentration at https://www.lenntech.com/calculators/ppm/converter-parts-per-million.htm\n\tC = np.array([775.0] * T_a.shape[0])\n\n\t# Thermal time [Celsius * d]: calculating thermal time by totalling the mean temperatures for each day during each cultivation period.\n\t# theta = getThermalTime(T_a)\n\ttheta = np.zeros(T_a.shape[0])\n\n\tprint(\"T_a:{}\".format(T_a))\n\tprint(\"J:{}\".format(J))\n\tprint(\"C:{}\".format(C))\n\n\t# ####################################################################################################\n\t# # Stop execution here...\n\t# sys.exit()\n\t# # Move the above line to different parts of the assignment as you implement more of the functionality.\n\t# ####################################################################################################\n\n\t##################################\n\t##### input variables end #####\n\t##################################\n\n\t# dependent variables\n\t# Structured dry weight [g]\n\tW_G = np.zeros(T_a.shape[0])\n\t# Storage (non-structural) dry weight [g]\n\tW_S = np.zeros(T_a.shape[0])\n\t# The plant dry weight (excluding roots) [g]\n\tW = np.zeros(T_a.shape[0])\n\n\t# initial values\n\t# reference:\n\t# it was estimated from the reference of SWEENEY et al. (1981) saying \"The cropping area was divided into three east-west strips (5.49 m X 1.83 m) with 0.6 m wide pathways on either side.\" and\n\t# its initial values W_G(1) (= 1.9 * 10^(-6) kg) and W_S(1) (= 1.9 * 10^(-6) kg), and Pearson et al (1997) saying \"... the initial dry weight of transplants comprised 80% structural and 20% storage material, since Goudriaan et al. (1985) indicated that storage dry weight rarely exceeds 25% of the total dry matter.\"\n\t# input initial data [g]\n\t# W_G[0] = 1.9*10**(-3.0)*2 * 0.8 * constant.greenhouseCultivationFloorArea / (5.49 * 1.83)\n\tW_G[0] = 1.9*10**(-3.0)*2 * 0.8\n\t# Storage dry weight [g]\n\tW_S[0] = 1.9*10**(-3.0)*2 * 0.2\n\n\t# It was assumed to take 2 days to the next cultivation cycle assuming \"transplanting shock prevented growth during the first 48 h\"\n\t# initial total dry weight\n\tW[0] = W_G[0] + W_S[0]\n\ttheta[0] = T_a[0]\n\n\t# print(\"W_G[0]:{}\".format(W_G[0]))\n\t# print(\"W_S[0]:{}\".format(W_S[0]))\n\n\t# loop counter\n\ti = 1\n\t# reference: Pearson et al. (1997)\n\t# It was assumed that the germination and growth before transplanting were done at the growth chamber. The simulation period starts when the first cycle lettuces were planted in a greenhouse.\n\twhile i < simulationDaysInt:\n\t\t# print(\"i-1:{}, W_G[i-1]:{}, W_S[i-1]:{}\".format(i-1, W_G[i-1],W_S[i-1]))\n\t\t# the sub-optimal equivalent of a supra-optimal temperature) eq 8\n\t\tT_e = PearsonConstant.T_o - abs(PearsonConstant.T_o - T_a[i-1])\n\t\t# print(\"T_e:{}\".format(T_e))\n\n\t\t# the increase in structured dry weight\n\t\t# dW_G = W_G[i-1] * PearsonConstant.k * T_e\n\t\tdW_G = (W_G[i-1]) * PearsonConstant.k * T_e\n\t\tW_G[i] = W_G[i - 1] + dW_G\n\t\t# print(\"dW_G:{}\".format(dW_G))\n\n\t\t# T_ep: effective temperature [Celusius]\n\t\t# According to Pearson, S. Hadley, P. Wheldon, A.E. (1993), \"A reanalysis of the effects of temperature and irradiance on time to flowering in chrysanthemum\n\t\t# (Dendranthema grandiflora)\", Effective temperature is the sub-optimum temperature equivalent of a supra-optimum temperature in terms of developmental rate.\n\t\t# Also, since Pearson et al. (1997) says \"The effective temperature (Tep) for photosynthesis was determined with an optimum of Top and the function had a rate constant phi.\"\n\t\t# it was assumed that T_ep can be derived with the same equation and variables as T_e\n\t\tT_ep = PearsonConstant.T_op - abs(PearsonConstant.T_op - T_a[i-1])\n\t\t# print(\"T_ep:{}\".format(T_ep))\n\n\t\t# the decline in photosynthesis with plant age\n\t\t# Pg = PearsonConstant.alpha_m*((1.0-PearsonConstant.beta)/(PearsonConstant.tau*C[i-1]))*J[i-1]*T_ep*PearsonConstant.phi*(PearsonConstant.theta_m-theta[0])/PearsonConstant.theta_m\n\t\tPg = PearsonConstant.alpha_m * ((1.0-PearsonConstant.beta)/ (PearsonConstant.tau * C[i-1]))*J[i-1]*T_ep*PearsonConstant.phi*(PearsonConstant.theta_m-theta[i-1])/PearsonConstant.theta_m\n\t\t# print(\"Pg:{}\".format(Pg))\n\t\t# the relation describing respiration losses\n\t\t# print(\"PearsonConstant.R_G:{}\".format(PearsonConstant.R_G))\n\t\t# print(\"PearsonConstant.theta_m:{}\".format(PearsonConstant.theta_m))\n\t\t# print(\"PearsonConstant.gamma:{}\".format(PearsonConstant.gamma))\n\t\t# print(\"PearsonConstant.epsilon:{}\".format(PearsonConstant.epsilon))\n\n\t\t# Rd = PearsonConstant.R_G * ((PearsonConstant.theta_m - theta[0])/PearsonConstant.gamma) / PearsonConstant.theta_m * T_a[i-1] * PearsonConstant.epsilon * dW_G\n\t\t# this part was changed from the reference's original formula: theta[0] -> theta[i-1]\n\t\tRd = PearsonConstant.R_G * ((PearsonConstant.theta_m - theta[i-1])/PearsonConstant.gamma) / PearsonConstant.theta_m * T_a[i-1] * PearsonConstant.epsilon * dW_G\n\t\t# print(\"Rd:{}\".format(Rd))\n\t\tdW_S = PearsonConstant.psi * (PearsonConstant.h)**2.0 * (1.0 - math.exp(-PearsonConstant.F_G * W_G[i-1] / ((PearsonConstant.h)**2.0))) * Pg - Rd\n\t\t# print(\"dW_S:{}\".format(dW_S))\n\n\t\tW_S[i] = W_S[i - 1] + dW_S\n\n\t\t# The plant dry weight (excluding roots) W\n\t\tW[i] = W_G[i] + W_S[i]\n\t\t# print(\"i:{}, W[i]:{}\".format(i, W[i]))\n\n\t\t# accumulate the thermal time\n\t\ttheta[i] = theta[i - 1] + T_a[i - 1]\n\n\t\t# if the dry weight exceeds the weight for cultimvation, then reset the dryweight\n\t\tif W[i] > constant.harvestDryWeight :\n\n\t\t\t# It was assumed to take 3 days to the next cultivation cycle assuming \"transplanting shock prevented growth during the first 48 h\", and it takes one day for preparation.\n\t\t\ti += 3 + 1\n\t\t\tif(i >= simulationDaysInt): break\n\n\t\t\t# reset the weights\n\t\t\tW_S[i-1] = W_G[0]\n\t\t\tW_S[i-2] = W_G[0]\n\t\t\tW_G[i-1] = W_S[0]\n\t\t\tW_G[i-2] = W_S[0]\n\t\t\t# The plant dry weight (excluding roots) W\n\t\t\tW[i-1] = W_S[i-1] + W_G[i-1]\n\t\t\tW[i-2] = W_S[i-2] + W_G[i-2]\n\n\t\t\t# accumulate the thermal time\n\t\t\ttheta[i - 2] = T_a[i-2]\n\t\t\ttheta[i-1] = theta[i - 2] + T_a[i-1]\n\t\t\ttheta[i] = theta[i-1] + T_a[i]\n\n\t\telse:\n\n\t\t\t# increment the counter for one day\n\t\t\ti += 1\n\n\tprint(\"theta:{}\".format(theta))\n\tprint(\"W_G:{}\".format(W_G))\n\tprint(\"W_S:{}\".format(W_S))\n\tprint(\"W:{}\".format(W))\n\t# convert the dry weight into fresh weight\n\tWFreshPerHead = constant.DryMassToFreshMass * W\n\tprint(\"WFresh:{}\".format(WFreshPerHead))\n\n\t# get the fresh weight increase\n\tWFreshPerHeadWeightIncrease = Lettuce.getFreshWeightIncrease(WFreshPerHead)\n\t# get the accumulated fresh weight during the simulation period\n\tWAccumulatedFreshWeightIncreasePerHead = Lettuce.getAccumulatedFreshWeightIncrease(WFreshPerHead)\n\t# get the harvested weight\n\tWHarvestedFreshWeightPerHead = Lettuce.getHarvestedFreshWeight(WFreshPerHead)\n\n\treturn WFreshPerHead, WFreshPerHeadWeightIncrease, WAccumulatedFreshWeightIncreasePerHead, WHarvestedFreshWeightPerHead\n\n\n# Moved to Lettuce.py\n# def getGreenhouseTemperatureEachDay(simulatorClass):\n# \t# It was assumed the greenhouse temperature was instantaneously adjusted to the set point temperatures at daytime and night time respectively\n# \thourlyDayOrNightFlag = simulatorClass.hourlyDayOrNightFlag\n# \tgreenhouseTemperature = np.array([constant.setPointTemperatureDayTime if i == constant.daytime else constant.setPointTemperatureNightTime for i in hourlyDayOrNightFlag])\n#\n# \t# calc the mean temperature each day\n# \tdailyAverageTemperature = np.zeros(util.getSimulationDaysInt())\n# \tfor i in range(0, util.getSimulationDaysInt()):\n# \t\tdailyAverageTemperature[i] = np.average(greenhouseTemperature[i * constant.hourperDay: (i + 1) * constant.hourperDay])\n# \treturn dailyAverageTemperature\n\ndef getThermalTime(dailyAverageTemperature):\n\t'''\n\tdefinition of thermal time in plant science reference: http://onlinelibrary.wiley.com/doi/10.1111/j.1744-7348.2005.04088.x/pdf\n\t:param dailyAverageTemperature: average [Celusius] per day\n\t:return:\n\t'''\n\tthermalTime = np.zeros(dailyAverageTemperature.shape[0])\n\tfor i in range(0, thermalTime.shape[0]):\n\t\tthermalTime[i] = sum(dailyAverageTemperature[0:i+1])\n\treturn thermalTime\n\n# Moved to Lettuce.py\n# def getFreshWeightIncrease(WFresh):\n# \t# get the fresh weight increase\n#\n# \tfreshWeightIncrease = np.array([WFresh[i] - WFresh[i-1] if WFresh[i] - WFresh[i-1] > 0 else 0.0 for i in range (1, WFresh.shape[0])])\n# \t# insert the value for i == 0\n# \tfreshWeightIncrease[0] = 0.0\n#\n# \treturn freshWeightIncrease\n#\n# def getAccumulatedFreshWeight(WFresh):\n# \t# get accumulated fresh weight\n#\n# \taccumulatedFreshWeight = np.array([WFresh[i] + WFresh[i-1] if WFresh[i] - WFresh[0] > 0 else WFresh[i-1] for i in range (1, WFresh.shape[0])])\n# \t# insert the value for i == 0\n# \taccumulatedFreshWeight[0] = WFresh[0]\n#\n# \treturn accumulatedFreshWeight\n#\n#\n# def getHarvestedFreshWeight(WFresh):\n# \t# get the harvested fresh weight\n#\n# \t# record the fresh weight harvested at each harvest date\n# \tharvestedFreshWeight = np.array([WFresh[i-1] if WFresh[i] - WFresh[i-1] < 0 else 0.0 for i in range (1, WFresh.shape[0])])\n# \t# insert the value for i == 0\n# \tharvestedFreshWeight[0] = 0.0\n#\n# \treturn harvestedFreshWeight\n\n" }, { "alpha_fraction": 0.6265890002250671, "alphanum_fraction": 0.6734640002250671, "avg_line_length": 46.150001525878906, "blob_id": "3fe73774ced53009c5d17ab6201d6aa6122fd473", "content_id": "b1776ff29729cc7d231e0da61b940914fa0513c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3776, "license_type": "permissive", "max_line_length": 180, "num_lines": 80, "path": "/GreenhouseEnergyBalanceConstant.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n#############command to print out all array data\n# np.set_printoptions(threshold=np.inf)\n# print (\"directSolarRadiationToOPVWestDirection:{}\".format(directSolarRadiationToOPVWestDirection))\n# np.set_printoptions(threshold=1000)\n#############\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n##########import package files##########\nimport datetime\nimport sys\nimport os\nimport numpy as np\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\n#######################################################\n\n# the heat transfer coefficient [W m-2 Celsius degree] (ASHRAE guide and data book fundamentals, 1981)\n# source: https://www.crcpress.com/Greenhouses-Advanced-Technology-for-Protected-Horticulture/Hanan/p/book/9780849316982\n# Hanan, J.J. (1998). Chapter 2 Structures: locations, styles, and covers. Greenhouses: advanced technology for protected horticulture (56). CRC Press. Boca Raton, FL.\n# I directly got this value from a confidential research thesis in Kacira Lab at CEAC in University of Arizona. the paper said this value was taken from Hanan (1998)\nU = 6.3\n# if the roof is made from double PE\n# U = 4.0\n\n# e Stefan-Boltzmann's constant [W m-2 K-4]\ndelta = 5.67 * 10.0**(-8)\n\n# transmissitiy of thermal radiation (not solar radiation) for the single layer high density polyethylene\n# source:Nadia Sabeh, tomato greenhouse roadmap - a guide to greenhouse tomato production, page 24, https://www.amazon.com/Tomato-Greenhouse-Roadmap-Guide-Production-ebook/dp/B00O4CPO42\ntau_tc = 0.8\n# source: S. Zhu, J. Deltour, S. Wang, 1998, Modeling the thermal characteristics of greenhouse pond systems,\n# tau_tc = 0.42\n\n# average emissivity of the interior surface, assuming the high density polyethylene\n# source: S. Zhu, J. Deltour, S. Wang, 1998, Modeling the thermal characteristics of greenhouse pond systems,\n# epsilon_i = 0.53\n# source: Tomohiro OKADA, Ryohei ISHIGE, and Shinji ANDO, 2016, Analysis of Thermal Radiation Properties of Polyimide and Polymeric Materials Based on ATR-IR spectroscopy\n# This paper seems to be more reliable at a content of research\nepsilon_i = 0.3\n\n################## constnats for Q_e, latent heat transfer by plant transpiration start #######################\n# another source: http://edis.ifas.ufl.edu/pdffiles/ae/ae45900.pdf\n# specific heat constant pressure [MJ kg-1 Celsius-1]\nc_p = 1.013 * 10.0**(-3)\n\n# atmospheric pressure at 700m elevation [KPa]\n# source: http://www.fao.org/docrep/X0490E/x0490e07.htm\n# source: Water Evaluation And Planning System, user guide, page 17, https://www.sei.org/projects-and-tools/tools/weap/\n# elevation of the model [m]\nelevation = 700\nP = 101.3 * ((293 - 0.0065 * elevation)/293)**5.26\n\n# ratio of molecular weight of water vapor to dry air [0.622]\nepsilon = 0.622\n# latent heat of water vaporization [MJ kg-1]\n# source: https://en.wikipedia.org/wiki/Latent_heat\n# lambda_ = 2.2264705\n# this source gives a different number 2.45, :http://www.fao.org/docrep/X0490E/x0490e07.htm\n# source: Water Evaluation And Planning System, user guide, page 17, https://www.sei.org/projects-and-tools/tools/weap/\nlambda_ = 2.45\n\n\n# Specific heat of dry air [J kg-1 K-1]\nC_p = 1010.0\n\n# the density of air [kg m-3]\nrho = 1.204\n\n# the soild flux [W m-2]. It was assumed this value is zero due to the significantly small impact to the model and difficulty of estimation\nF = 0.0\n\n################## constnats for Q_e, latent heat transfer by plant transpiration end #######################\n\n\n\n\n" }, { "alpha_fraction": 0.6554391980171204, "alphanum_fraction": 0.670831561088562, "avg_line_length": 34.28705596923828, "blob_id": "e103897d08ee8123d48322c285fe610454ca7716", "content_id": "665f7d05da617124bc5b9eb22f72dd2cca200ae7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37618, "license_type": "permissive", "max_line_length": 192, "num_lines": 1066, "path": "/Util.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 12 Dec 2016\n# last edit date: 14 Dec 2016\n#######################################################\n\n##########import package files##########\nfrom scipy import stats\nimport datetime\nimport calendar\nfrom textwrap import wrap\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from sklearn import datasets\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport csv\nimport random\nimport glob\n#######################################################\n\ndef flipCoin( p ):\n '''\n\n :param p:0.0 to 1.0\n :return:\n '''\n r = random.random()\n return r < p\n\n\ndef getStartDateDateType():\n '''\n return the start date as date type\n :return:\n '''\n return datetime.date(int(constant.SimulationStartDate[0:4]), int(constant.SimulationStartDate[4:6]), int(constant.SimulationStartDate[6:8]))\n\ndef getEndDateDateType():\n '''\n return the end date as date type\n :return:\n '''\n return datetime.date(int(constant.SimulationEndDate[0:4]), int(constant.SimulationEndDate[4:6]), int(constant.SimulationEndDate[6:8]))\n\n\ndef getSimulationDaysInt():\n '''\n calculate the simulation days.\n\n :return: simulationDaysInt\n '''\n # get start date and end date of simulation from constant class as String\n startDateDateType = getStartDateDateType()\n # print \"type(startDateDateType):{}\".format(type(startDateDateType))\n # print \"startDateDateType:{}\".format(startDateDateType)\n endDateDateType = getEndDateDateType()\n # print \"endDateDateType:{}\".format(endDateDateType)\n # print \"int(constant.SimulationEndDate[0:4]):{}\".format(int(constant.SimulationEndDate[0:4]))\n # print \"int(constant.SimulationEndDate[5:6]):{}\".format(int(constant.SimulationEndDate[4:6]))\n # print \"int(constant.SimulationEndDate[7:8]):{}\".format(int(constant.SimulationEndDate[6:8]))\n simulationDays = endDateDateType - startDateDateType\n # print \"simulationDays:{}\".format(simulationDays)\n # print \"type(simulationDays):{}\".format(type(simulationDays))\n # convert the data type\n simulationDaysInt = simulationDays.days + 1\n # print \"simulationDaysInt:{}\".format(simulationDaysInt)\n # print \"type(simulationDaysInt):{}\".format(type(simulationDaysInt))\n\n return simulationDaysInt\n\n\ndef calcSimulationDaysInt():\n '''\n calculate the simulation days.\n\n :return: simulationDaysInt\n '''\n # get start date and end date of simulation from constant class as String\n startDateDateType = getStartDateDateType()\n # print \"type(startDateDateType):{}\".format(type(startDateDateType))\n # print \"startDateDateType:{}\".format(startDateDateType)\n endDateDateType = getEndDateDateType()\n # print \"endDateDateType:{}\".format(endDateDateType)\n # print \"int(constant.SimulationEndDate[0:4]):{}\".format(int(constant.SimulationEndDate[0:4]))\n # print \"int(constant.SimulationEndDate[5:6]):{}\".format(int(constant.SimulationEndDate[4:6]))\n # print \"int(constant.SimulationEndDate[7:8]):{}\".format(int(constant.SimulationEndDate[6:8]))\n simulationDays = endDateDateType - startDateDateType\n # print \"simulationDays:{}\".format(simulationDays)\n # print \"type(simulationDays):{}\".format(type(simulationDays))\n # convert the data type\n simulationDaysInt = simulationDays.days + 1\n # print \"simulationDaysInt:{}\".format(simulationDaysInt)\n # print \"type(simulationDaysInt):{}\".format(type(simulationDaysInt))\n\n return simulationDaysInt\n\n\ndef getSimulationMonthsInt():\n # get start date and end date of simulation from constant class as String\n startDateDateType = getStartDateDateType()\n # print \"type(startDateDateType):{}\".format(type(startDateDateType))\n # print \"startDateDateType:{}\".format(startDateDateType)\n endDateDateType = getEndDateDateType()\n return (endDateDateType.year - startDateDateType.year)*12 + endDateDateType.month - startDateDateType.month + 1\n\ndef getDaysFirstMonth():\n startDate = getStartDateDateType()\n _, daysFirstMonth = calendar.monthrange(startDate.year, startDate.month)\n # print \"month_day:{}\".format(month_day)\n return daysFirstMonth\n\ndef getDaysLastMonth():\n lastDate = getEndDateDateType()\n _, daysLastMonth = calendar.monthrange(lastDate.year, lastDate.month)\n # print \"month_day:{}\".format(month_day)\n return daysLastMonth\n\ndef getDaysFirstMonthForGivenPeriod():\n daysFirstMonth = getDaysFirstMonth()\n startDate = getStartDateDateType()\n days = startDate.day\n # print \"day:{}\".format(day)\n # print \"type(day):{}\".format(type(day))\n daysFirstMonthForGivenPeriod = daysFirstMonth - days + 1\n\n return daysFirstMonthForGivenPeriod\n\ndef getNumOfDaysFromJan1st(date):\n jan1stdate = datetime.date(date.year, 1, 1)\n daysFromJan1st = date - jan1stdate\n\n return daysFromJan1st.days\n\ndef getOnly15thDay(hourlySolarradiation):\n # print (hourlySolarradiation.shape)\n # print (type(hourlySolarradiation.shape))\n\n # hourlySolarradiationOnly15th = np.array([])\n hourlySolarradiationOnly15th = []\n\n date = datetime.datetime(getStartDateDateType().year, getStartDateDateType().month, getStartDateDateType().day)\n\n for i in range (0, hourlySolarradiation.shape[0]):\n\n # print (date.day)\n if date.day == 15:\n # np.append(hourlySolarradiationOnly15th, hourlySolarradiation[i])\n hourlySolarradiationOnly15th.append(hourlySolarradiation[i])\n # hourlySolarradiationOnly15th.append(hourlySolarradiation[i])\n\n # print(\"hourlySolarradiation[i]:{}\".format(hourlySolarradiation[i]))\n # print(\"hourlySolarradiationOnly15th:{}\".format(hourlySolarradiationOnly15th))\n # set the date object 1 hour ahead.\n date += datetime.timedelta(hours=1)\n\n # print (\"hourlySolarradiationOnly15th.shape[0]:{}\".format(len(hourlySolarradiationOnly15th)))\n\n return hourlySolarradiationOnly15th\n\ndef getSummerDays(year):\n '''\n\n :param years:\n :return:\n '''\n return datetime.date(year, constant.SummerPeriodEndMM,constant.SummerPeriodEndDD) - datetime.date(year, constant.SummerPeriodStartMM, constant.SummerPeriodStartDD)\n\ndef dateFormConversionYyyyMmDdToMnSlashddSlashYyyy(yyyymmdd):\n yyyy = yyyymmdd[0:4]\n mm = yyyymmdd[4:6]\n dd = yyyymmdd[6:8]\n mmSlashddSlashyyyy = mm + \"/\" + dd + \"/\" + yyyy\n\n return mmSlashddSlashyyyy\n\ndef readData(fileName, relativePath = \"\", skip_header=0, d=','):\n '''\n retrieve the data from the file named fileName\n you may have to modify the path below in other OSs.\n in Linux, Mac OS, the partition is \"/\". In Windows OS, the partition is \"\\\" (backslash).\n os.sep means \"\\\" in windows. So the following path is adjusted for Windows OS\n\n param filename: filename\n param relativePath: the relative path from \"data\" folder to the folder where there is a file you want to import\n param d: the type of data separator, which s \",\" by default\n return: a numpy.array of the data\n '''\n # read a file in \"data\" folder\n if relativePath == \"\":\n filePath = os.path.dirname(__file__).replace('/', os.sep) + '\\\\' + 'data\\\\' + fileName\n print(\"filePath:{}\".format(filePath))\n print(\"os.path.dirname(__file__):{}\".format(os.path.dirname(__file__)))\n\n else:\n filePath = os.path.dirname(__file__).replace('/', os.sep) + '\\\\' + 'data\\\\' + relativePath + '\\\\' + fileName\n # print \"filePath:{}\".format(filePath)\n\n if skip_header == 0:\n return np.genfromtxt(filePath, delimiter=d, dtype=None)\n else:\n return np.genfromtxt(filePath, delimiter=d, dtype=None, skip_header=skip_header)\n\n\ndef exportCSVFile(dataMatrix, fileName=\"exportFile\", relativePath=\"\"):\n '''\n export dataMatrix\n :param dataMatrix:\n :param path:\n :param fileName:\n :return: None\n '''\n # print (dataMatrix)\n\n currentDir = os.getcwd()\n # change the directory to export\n if relativePath == \"\":\n os.chdir(currentDir + \"/exportData\")\n else:\n os.chdir(currentDir + relativePath)\n # print \"os.getcwd():{}\".format(os.getcwd())\n\n f = open(fileName + \".csv\", 'w') # open the file with writing mode\n csvWriter = csv.writer(f, lineterminator=\"\\n\")\n # print \"dataMatrix:{}\".format(dataMatrix)\n for data in dataMatrix:\n csvWriter.writerow(data)\n f.close() # close the file\n\n # take back the current directory\n os.chdir(currentDir)\n # print \"os.getcwd():{}\".format(os.getcwd())\n\ndef importDictionaryAsCSVFile(fileName=\"exportFile\", relativePath=\"\"):\n '''\n import the values of dictionary\n\n :param fileName:\n :param relativePath:\n :return:\n '''\n currentDir = os.getcwd()\n # print (\"currentDir:{}\".format(currentDir))\n # currentDir = unicode(currentDir, encoding='shift-jis')\n # print (\"currentDir:{}\".format(currentDir))\n\n # change the directory to export\n if relativePath == \"\":\n os.chdir(currentDir + \"/exportData\")\n else:\n os.chdir(currentDir + relativePath)\n\n dict = Counter()\n for key, val in csv.reader(open(fileName+\".csv\")):\n dict[key] = val\n\n # take back the current directory\n os.chdir(currentDir)\n # print \"os.getcwd():{}\".format(os.getcwd())\n\n return dict\n\n\ndef exportDictionaryAsCSVFile(dictionary, fileName=\"exportFile\", relativePath=\"\"):\n '''\n export the values of dictionary\n\n :param dictionary:\n :param fileName:\n :param relativePath:\n :return:\n '''\n currentDir = os.getcwd()\n # change the directory to export\n if relativePath == \"\":\n os.chdir(currentDir + \"/exportData\")\n else:\n os.chdir(currentDir + relativePath)\n # print (\"dictionary:{}\".format(dictionary))\n\n w = csv.writer(open(fileName + \".csv\", \"w\"))\n for key, val in dictionary.items():\n w.writerow([key, val])\n\n # take back the current directory\n os.chdir(currentDir)\n # print \"os.getcwd():{}\".format(os.getcwd())\n\n\ndef getArraysFromData(fileName, simulatorClass):\n '''\n read data from a file, process the data and return them as arrays\n :param fileName: String\n :return:\n '''\n\n # get the simulation days set in the constant class\n simulationDaysInt = calcSimulationDaysInt()\n # print \"simulationDaysInt:{}\".format(simulationDaysInt)\n # get start date and end date of simulation from constant class as String\n startDateDateType = getStartDateDateType()\n # print \"startDateDateType:{}\".format(startDateDateType)\n # print \"type(startDateDateType):{}\".format(type(startDateDateType))\n endDateDateType = getEndDateDateType()\n\n # if there are dates where there are any missing lines, skip the date and subtract the number from the simulation days\n trueSimulationDaysInt = simulationDaysInt\n missingDates = np.array([])\n\n # automatically changes its length dependent on the amoutn of imported data\n year = np.zeros(simulationDaysInt * constant.hourperDay, dtype=np.int)\n # print \"year.shape:{}\".format(year.shape)\n month = np.zeros(simulationDaysInt * constant.hourperDay, dtype=np.int)\n # print \"month:{}\".format(month.shape)\n day = np.zeros(simulationDaysInt * constant.hourperDay, dtype=np.int)\n hour = np.zeros(simulationDaysInt * constant.hourperDay, dtype=np.int)\n # dates = np.chararray(simulationDaysInt * constant.hourperDay)\n dates = [\"\"] * (simulationDaysInt * constant.hourperDay)\n # print \"dates:{}\".format(dates)\n\n # [W/m^2]\n hourlyHorizontalDiffuseOuterSolarIrradiance = np.zeros(simulationDaysInt * constant.hourperDay)\n # [W/m^2]\n hourlyHorizontalTotalOuterSolarIrradiance = np.zeros(simulationDaysInt * constant.hourperDay)\n # [W/m^2]\n hourlyHorizontalDirectOuterSolarIrradiance = np.zeros(simulationDaysInt * constant.hourperDay)\n # [deg C]\n hourlyHorizontalTotalBeamMeterBodyTemperature = np.zeros(simulationDaysInt * constant.hourperDay)\n # [deg C]\n hourlyHorizonalDirectBeamMeterBodyTemperature = np.zeros(simulationDaysInt * constant.hourperDay)\n # [deg C]\n hourlyAirTemperature = np.zeros(simulationDaysInt * constant.hourperDay)\n # [%]\n hourlyRelativeHumidity = np.zeros(simulationDaysInt * constant.hourperDay)\n\n # import the file removing the header\n fileData = readData(fileName, relativePath=\"\", skip_header=1)\n # print \"fileData:{}\".format(fileData)\n # print \"fileData.shape:{}\".format(fileData.shape)\n\n # change the date format\n simulationStartDate = dateFormConversionYyyyMmDdToMnSlashddSlashYyyy(constant.SimulationStartDate)\n simulationEndDate = dateFormConversionYyyyMmDdToMnSlashddSlashYyyy(constant.SimulationEndDate)\n # print (\"simulationStartDate:{}\".format(simulationStartDate))\n # print (\"simulationEndDate:{}\".format(simulationEndDate))\n # print (\"fileData.shape:{}\".format(fileData.shape))\n\n ########## store the imported data to lists\n # index for data storing\n index = 0\n for hourlyData in fileData:\n # for day in range(0, simulationDaysInt):\n\n # print\"hourlyData:{}\".format(hourlyData)\n dateList = hourlyData[0].split(\"/\")\n # print \"dateList:{}\".format(dateList)\n # print \"month:{}\".format(month)\n # print \"day:{}\".format(day)\n\n # exclude the data out of the set start date and end date\n if datetime.date(int(dateList[2]), int(dateList[0]), int(dateList[1])) < startDateDateType or \\\n datetime.date(int(dateList[2]), int(dateList[0]), int(dateList[1])) > endDateDateType:\n continue\n\n # print \"datetime.date(int(dateList[2]), int(dateList[0]), int(dateList[1])):{}\".format(datetime.date(int(dateList[2]), int(dateList[0]), int(dateList[1])))\n # print \"startDateDateType:{}\".format(startDateDateType)\n # print \"endDateDateType:{}\".format(endDateDateType)\n\n year[index] = int(dateList[2])\n month[index] = int(dateList[0])\n day[index] = int(dateList[1])\n hour[index] = hourlyData[1]\n dates[index] = hourlyData[0]\n # print \"hourlyData[0]:{}\".format(hourlyData[0])\n # print \"dates:{}\".format(dates)\n # print \"index:{}, year:{}, hour[index]:{}\".format(index, year, hour)\n # print \"hourlyData[0]:{}\".format(hourlyData[0])\n # print \"year[index]:{}\".format(year[index])\n # [W/m^2]\n hourlyHorizontalDiffuseOuterSolarIrradiance[index] = hourlyData[4]\n # [W/m^2]\n hourlyHorizontalTotalOuterSolarIrradiance[index] = hourlyData[2]\n # the direct beam solar radiation is not directly got from the file, need to calculate from \"the total irradiance - the diffuse irradiance\"\n # [W/m^2]\n hourlyHorizontalDirectOuterSolarIrradiance[index] = hourlyHorizontalTotalOuterSolarIrradiance[index] \\\n - hourlyHorizontalDiffuseOuterSolarIrradiance[index]\n # unit: [celusis]\n hourlyHorizontalTotalBeamMeterBodyTemperature[index] = hourlyData[7]\n # unit: [celusis]\n hourlyHorizonalDirectBeamMeterBodyTemperature[index] = hourlyData[8]\n # unit: [celusis]\n hourlyAirTemperature[index] = hourlyData[5]\n # print \"hourlyAirTemperature:{}\".format(hourlyAirTemperature)\n # unit: [-] <- [%]\n hourlyRelativeHumidity = hourlyData[6] * 0.01\n\n # print \"hour[index] - hour[index-1]:{}\".format(hourlyData[1] - hour[index-1])\n # print \"year[index]:{}, month[index]:{}, day[index]:{}, hour[index]:{}\".format(year[index], month[index], day[index], hour[index])\n # print \"year[index]:{}, month[index]:{}, day[index]:{}, hour[index]:{}\".format(year[index-1], month[index-1], day[index-1], hour[index-1])\n # print \"datetime.datetime(year[index], month[index], day[index], hour = hour[index]):{}\".format(datetime.datetime(year[index], month[index], day[index], hour = hour[index]))\n # print \"datetime.datetime(year[index-1], month[index-1], day[index-1]),hour = hour[index-1]:{}\".format(datetime.datetime(year[index-1], month[index-1], day[index-1],hour = hour[index-1]))\n # if index <> 0 and datetime.timedelta(hours=1) <> datetime.datetime(year[index], month[index], day[index], hour = hour[index]) - \\\n # datetime.datetime(year[index-1], month[index-1], day[index-1], hour = hour[index-1]):\n # missingDates = np.append(missingDates, hourlyData)\n index += 1\n # print \"year:{}\".format(year)\n # print \"month:{}\".format(month)\n # print \"day:{}\".format(day)\n # print \"hour:{}\".format(hour)\n # print \"hourlyHorizontalTotalOuterSolarIrradiance:{}\".format(hourlyHorizontalTotalOuterSolarIrradiance)\n # print \"hourlyHorizontalTotalBeamMeterBodyTemperature:{}\".format(hourlyHorizontalTotalBeamMeterBodyTemperature)\n # print \"hourlyHorizontalDirectOuterSolarIrradiance:{}\".format(hourlyHorizontalDirectOuterSolarIrradiance)\n # print \"hourlyHorizonalDirectBeamMeterBodyTemperature.shape:{}\".format(hourlyHorizonalDirectBeamMeterBodyTemperature.shape)\n # print \"hourlyAirTemperature:{}\".format(hourlyAirTemperature)\n # print \"hourlyAirTemperature.shape:{}\".format(hourlyAirTemperature.shape)\n\n # set the values to the object\n simulatorClass.setYear(year)\n simulatorClass.setMonth(month)\n simulatorClass.setDay(day)\n simulatorClass.setHour(hour)\n simulatorClass.setImportedHourlyHorizontalDirectSolarRadiation(hourlyHorizontalDirectOuterSolarIrradiance)\n simulatorClass.setImportedHourlyHorizontalDiffuseSolarRadiation(hourlyHorizontalDiffuseOuterSolarIrradiance)\n simulatorClass.setImportedHourlyHorizontalTotalBeamMeterBodyTemperature(hourlyHorizontalTotalBeamMeterBodyTemperature)\n simulatorClass.setImportedHourlyAirTemperature(hourlyAirTemperature)\n simulatorClass.hourlyRelativeHumidity = hourlyRelativeHumidity\n\n\n ##########file import (TucsonHourlyOuterEinvironmentData) end##########\n\n return year, month, day, hour, hourlyHorizontalDiffuseOuterSolarIrradiance, \\\n hourlyHorizontalTotalOuterSolarIrradiance, \\\n hourlyHorizontalDirectOuterSolarIrradiance, \\\n hourlyHorizontalTotalBeamMeterBodyTemperature, \\\n hourlyAirTemperature\n\n\ndef deriveOtherArraysFromImportedData(simulatorClass):\n # Other data can be added in the future\n\n\n # set the the flag indicating daytime or nighttime\n hourlyHorizontalDirectOuterSolarIrradiance = simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation()\n hourlyDayOrNightFlag = np.array([constant.daytime if i > 0.0 else constant.nighttime for i in hourlyHorizontalDirectOuterSolarIrradiance])\n simulatorClass.hourlyDayOrNightFlag = hourlyDayOrNightFlag\n\n\ndef convertFromJouleToWattHour(joule):\n '''\n [J] == [W*sec] -> [W*hour]\n :param joule:\n :return:\n '''\n return joule / constant.minuteperHour /constant.secondperMinute\n\ndef convertWattPerSquareMeterEachHourToJoulePerSaureMeterEachDay(hourlySolarIrradiance):\n '''\n Unit conversion: [average W / m^2 each hour] -> [J / m^2 each day]\n '''\n\n # convert W / m^2 (= J/(s * m^2)) into J/(hour * m^2)\n SolarRadiantEnergyPerHour = hourlySolarIrradiance * constant.secondperMinute * constant.minuteperHour\n\n dailySolarEnergy = np.zeros(int(SolarRadiantEnergyPerHour.shape[0]/constant.hourperDay))\n for i in range (0, dailySolarEnergy.shape[0]):\n dailySolarEnergy[i] = sum(SolarRadiantEnergyPerHour[i*constant.hourperDay : (i+1)*constant.hourperDay])\n return dailySolarEnergy\n\n\ndef convertFromgramTokilogram(weightg):\n '''\n convert the unit from g to kg\n\n param: weightg, weight (g)\n return: weight(kg)\n '''\n return weightg/1000.0\n\n\ndef convertWhTokWh(electricityYield):\n '''\n convert the unit from Wh to kWh\n :param electricityYieldkW:\n :return:\n '''\n return electricityYield / 1000.0\n\n\ndef convertFromMJperHourSquareMeterToWattperSecSquareMeter(MJperSquareMeter):\n '''\n change the unit of light intensity from MJ/hour/m^2 to Watt/sec/m^2 (unit of light energy in terms of energy),\n which is for OPV electricity generation\n\n param: MJperSquareMeter (MJ/hour/m^2)\n return: (Watt/m^2) = (J/sec/m^2)\n '''\n return MJperSquareMeter *10.0**6 / 60.0 / 60.0\n\ndef convertFromHourlyPPFDWholeDayToDLI(hourlyPPFDWholePeriod):\n '''\n [umol m^-2 s^-1] -> [mol m^-2 day^-1]\n :param hourlyPPFDWholePeriod:\n :return:DLI\n '''\n DLIWholePeriod = np.zeros(calcSimulationDaysInt())\n\n # convert the unit: [umol m^-2 s^-1] -> [umol m^-2 day^-1]\n for day in range (0, calcSimulationDaysInt()):\n for hour in range(0, hourlyPPFDWholePeriod.shape[0]/calcSimulationDaysInt()):\n DLIWholePeriod[day] += hourlyPPFDWholePeriod[day * constant.hourperDay + hour] * constant.secondperMinute * constant.minuteperHour\n\n # convert the unit: [umol m^-2 day^-1] -> [mol m^-2 day^-1]\n DLIWholePeriod = DLIWholePeriod / float(10**6)\n # print \"DLIWholePeriod:{}\".format(DLIWholePeriod)\n return DLIWholePeriod\n\n# def convertFromJouleperDayperAreaToWattper(hourlyPPFDWholePeriod):\n# '''\n# [umol m^-2 s^-1] -> [mol m^-2 day^-1]\n# :param hourlyPPFDWholePeriod:\n# :return:DLI\n# '''\n# DLIWholePeriod = np.zeros(calcSimulationDaysInt())\n#\n# # convert the unit: [umol m^-2 s^-1] -> [umol m^-2 day^-1]\n# for day in range (0, calcSimulationDaysInt()):\n# for hour in range(0, hourlyPPFDWholePeriod.shape[0]/calcSimulationDaysInt()):\n# DLIWholePeriod[day] += hourlyPPFDWholePeriod[day * constant.hourperDay + hour] * constant.secondperMinute * constant.minuteperHour\n#\n# # convert the unit: [umol m^-2 day^-1] -> [mol m^-2 day^-1]\n# DLIWholePeriod = DLIWholePeriod / float(10**6)\n# # print \"DLIWholePeriod:{}\".format(DLIWholePeriod)\n# return DLIWholePeriod\n\ndef convertFromWattperSecSquareMeterToPPFD(WattperSquareMeter):\n '''\n change the unit of light intensity from MJ/m^2 to μmol/m^2/s (unit of PPFD in terms of photon desnity for photosynthesis),\n which is for photosynthesis plant production\n source of the coefficient\n http://www.apogeeinstruments.com/conversion-ppf-to-watts/ : 1/0.219 = 4.57\n http://www.egc.com/useful_info_lighting.php: 1/0.327 = 3.058103976\n\n param: WattperSecSquare (Watt/m^2) = (J/sec/m^2)\n return: (μmol/m^2/s in solar radiation)\n '''\n return WattperSquareMeter * constant.wattToPPFDConversionRatio\n\ndef convertUnitShootFreshMassToShootFreshMassperArea(shootFreshMassList):\n '''\n :return:\n '''\n # unit convert [g/head] -> [g/m^2]\n shootFreshMassListPerCultivationFloorArea = shootFreshMassList * constant.plantDensity\n return shootFreshMassListPerCultivationFloorArea\n\ndef convertcwtToKg(cwt):\n # unit convert [cwt] -> [kg]\n return cwt * constant.kgpercwt\n\n\ndef convertHourlyTemperatureToDailyAverageTemperature(hourlyTemperature):\n '''\n Unit conversion: [g/head] -> [g/m^2]\n '''\n dailyAverageTemperature = np.zeros(int(hourlyTemperature.shape[0]/constant.hourperDay))\n for i in range (0, hourlyTemperature.shape[0]):\n dailyAverageTemperature[i] = np.average(hourlyTemperature[i*constant.hourperDay : (i+1)*constant.hourperDay])\n\n return dailyAverageTemperature\n\n\ndef convertPoundToKg(pound):\n return pound / (1.0 / 0.45359237)\n\ndef convertKgToPound(kg):\n return kg * (1.0 / 0.45359237)\n\n\ndef saveFigure (filename):\n '''\n save the figure with given file name at the curent directory\n param: filename: file name\n return: :\n '''\n # (relative to your python current working directory)\n path = os.path.dirname(__file__).replace('/', os.sep)\n os.chdir(path)\n figure_path = './exportData/'\n # figure_path = '../'\n\n # set to True in order to automatically save the generated plots\n filename = '{}'.format(filename)\n # print \"figure_path + filename:{}\".format(figure_path + filename)\n\n plt.savefig(figure_path + filename)\n\ndef plotMultipleData(x, yList, yLabelList, title = \"data\", xAxisLabel = \"x\", yAxisLabel = \"y\", yMin = None, yMax = None):\n '''\n Plot single input feature x data with multiple corresponding response values a scatter plot\n :param x:\n :param yList:\n :param yLabelList:\n :param title:\n :param xAxisLabel:\n :param yAxisLabe:\n :return: None\n '''\n\n fig = plt.figure() # Create a new figure object for plotting\n ax = fig.add_subplot(111)\n\n markerList = np.array([\",\", \"o\", \"v\", \"^\", \"<\", \">\", \"1\", \"2\", \"3\", \"4\", \"8\", \"s\", \"p\", \"*\", \"h\", \"H\", \"+\", \"x\", \"D\", \"d\",])\n markerList = markerList[ 0 : yLabelList.shape[0] ]\n\n # load iris data\n # iris = datasets.load_iris()\n for i in range (0, yList.shape[0]):\n # plt.scatter(x, yList[i], plt.cm.hot(float(i) / yList.shape[0]), color=plt.cm.hot(float(i) / yList.shape[0]), marker='o', label = yLabelList[i])\n # for color print\n plt.scatter(x, yList[i], s=8, color=plt.cm.hot(float(i) / yList.shape[0]), marker='o', label=yLabelList[i])\n # for monochrome print\n # plt.scatter(x, yList[i], s=8, color= str(float(i) / yList.shape[0]*0.80), marker=markerList[i], label=yLabelList[i])\n\n # add explanatory note\n plt.legend()\n # add labels to each axis\n plt.xlabel(xAxisLabel)\n plt.ylabel(yAxisLabel)\n # add title\n plt.title(title)\n\n if yMin is not None:\n plt.ylim(ymin = yMin)\n if yMax is not None:\n plt.ylim(ymax = yMax)\n # ax.set_title(\"\\n\".join(wrap(title + \"OPVPricePerArea: \" + str(OPVPricePerAreaUSD), 60)))\n plt.pause(.1) # required on some systems so that rendering can happen\n\ndef plotData(x, t, title = \"data\", xAxisLabel = \"x\", yAxisLabel = \"t\", OPVPricePerAreaUSD = constant.OPVPricePerAreaUSD, arbitraryRegressionLine = False, \\\n coeff0 = 0.0, coeff1 = 0.0, coeff2 = 0.0, coeff3 = 0.0, coeff4 = 0.0, coeff5 = 0.0):\n \"\"\"\n Plot single input feature x data with corresponding response\n values t as a scatter plot\n :param x: sequence of 1-dimensional input data features\n :param t: sequence of 1-dimensional responses\n :param title: the title of the plot\n :param xAxisLabel: x-axix label of the plot\n :param xAxisLabel: y-axix label of the plot\n ;OPVPricePerAreaUSD: the OPV Price Per Area (USD/m^2)\n :return: None\n \"\"\"\n #print \"x:{}\".format(x)\n #print \"t:{}\".format(t)\n\n #ax.ticklabel_format(style='plain',axis='y')\n fig = plt.figure() # Create a new figure object for plotting\n ax = fig.add_subplot(111)\n\n plt.scatter(x, t, edgecolor='b', color='w', s = 8, marker='o')\n\n plt.xlabel(xAxisLabel)\n plt.ylabel(yAxisLabel)\n #plt.title(title + \"OPVPricePerArea: \" + OPVPricePerAreaUSD)\n plt.title(title)\n\n # add the OPV price per area [USD/m^2]\n # ax.set_title(\"\\n\".join(wrap(title + \" (OPVPricePerArea: \" + str(OPVPricePerAreaUSD)+\"[USD/m^2])\", 60)))\n\n if arbitraryRegressionLine:\n xLine = np.linspace(0, np.max(x), 100)\n y = coeff0 + coeff1 * xLine + coeff2 * xLine**2 + coeff3 * xLine**3 + coeff4 * xLine**4 + coeff5 * xLine**5\n plt.plot(xLine, y)\n\n plt.pause(.1) # required on some systems so that rendering can happen\n\n\ndef plotTwoData(x, y1, y2, title = \"data\", xAxisLabel = \"x\", yAxisLabel = \"t\", y1Label = \"data1\", y2Label = \"data2\"):\n '''\n Plot single input feature x data with two corresponding response values y1 and y2 as a scatter plot\n :param x:\n :param y1:\n :param y2:\n :param title:\n :param xAxisLabel:\n :param yAxisLabel:\n :return: None\n '''\n\n fig = plt.figure() # Create a new figure object for plotting\n ax = fig.add_subplot(111)\n\n # for color printing\n plt.scatter(x, y1, edgecolor='red', color='red', s = 8, marker='o', label = y1Label)\n plt.scatter(x, y2, edgecolor='blue', color='blue', s = 8, marker='o', label = y2Label)\n # for monochrome printing\n # plt.scatter(x, y1, edgecolor='0.1', color='0.1', s = 8, marker='o', label = y1Label)\n # plt.scatter(x, y2, edgecolor='0.7', color='0.8', s = 8, marker='x', label = y2Label)\n\n plt.legend()\n plt.xlabel(xAxisLabel)\n plt.ylabel(yAxisLabel)\n plt.title(title)\n # ax.set_title(\"\\n\".join(wrap(title + \"OPVPricePerArea: \" + str(OPVPricePerAreaUSD), 60)))\n plt.pause(.1) # required on some systems so that rendering can happen\n\ndef plotTwoDataMultipleYaxes(x, y1, y2, title, xAxisLabel, yAxisLabel1, yAxisLabel2, yLabel1, yLabel2):\n '''\n\n :param OPVCoverageList:\n :param unitDailyFreshWeightIncreaseList:\n :param electricityYield:\n :param title:\n :param xAxisLabel:\n :param yAxisLabel1:\n :param yAxisLabel2:\n :param yLabel1:\n :param yLabel2:\n :return:\n '''\n\n # Create a new figure object for plotting\n # fig = plt.figure()\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n\n # for color printing\n ax1.scatter(x, y1, edgecolor='red', color='red', s = 8, marker='o', label = yLabel1)\n ax2.scatter(x, y2, edgecolor='blue', color='blue', s = 8, marker='o', label = yLabel2)\n # for monochrome printing\n # ax1.scatter(x, y1, edgecolor='0.1', color='0.1', s = 8, marker='o', label = yLabel1)\n # ax2.scatter(x, y2, edgecolor='0.7', color='0.8', s = 8, marker='x', label = yLabel2)\n\n\n # add the explanatory note\n ax1.legend()\n ax2.legend()\n ax1.set_xlabel(xAxisLabel)\n ax1.set_ylabel(yAxisLabel1)\n ax2.set_ylabel(yAxisLabel2)\n plt.title(title)\n # ax.set_title(\"\\n\".join(wrap(title + \"OPVPricePerArea: \" + str(OPVPricePerAreaUSD), 60)))\n plt.pause(.1) # required on some systems so that rendering can happen\n\n\ndef plotCvResults(cv_loss, train_loss, cv_loss_title, figw=800, figh=420, mydpi=96, filepath=None, ylbl='Log Loss'):\n\n plt.figure(figsize=(figw / mydpi, figh / mydpi), dpi=mydpi)\n\n print ('>>>>> cv_loss.shape', cv_loss.shape)\n\n x = np.arange(0, cv_loss.shape[0])\n # cv_loss = np.mean(cv_loss, 0)\n # train_loss = np.mean(train_loss, 0)\n\n # put y-axis on same scale for all plots\n min_ylim = min(list(cv_loss) + list(train_loss))\n min_ylim = int(np.floor(min_ylim))\n max_ylim = max(list(cv_loss) + list(train_loss))\n max_ylim = int(np.ceil(max_ylim))\n\n print ('min_ylim={0}, max_ylim={1}'.format(min_ylim, max_ylim))\n\n plt.subplot(121)\n plt.plot(x, cv_loss, linewidth=2)\n plt.xlabel('Model Order')\n plt.ylabel(ylbl)\n plt.title(cv_loss_title)\n plt.pause(.1) # required on some systems so that rendering can happen\n plt.ylim(min_ylim, max_ylim)\n\n plt.subplot(122)\n plt.plot(x, train_loss, linewidth=2)\n plt.xlabel('Model Order')\n plt.ylabel(ylbl)\n plt.title('Train Loss')\n plt.pause(.1) # required on some systems so that rendering can happen\n plt.ylim(min_ylim, max_ylim)\n\n plt.subplots_adjust(right=0.95, wspace=0.25, bottom=0.2)\n plt.draw()\n\n if filepath:\n # plt.savefig(filepath, format='pdf')\n # print (\"filepath:{}\".format(filepath))\n plt.savefig(filepath)\n\ndef plotDataAndModel(x, y, w, title='Plot of data + appx curve (green curve)',filepath=None):\n plotDataSimple(x, y)\n plt.title(title + \"_\" + str(len(w)-1) + \"th_order\")\n plotModel(x, w, color='g')\n if filepath:\n plt.savefig(filepath, format='png')\n\ndef plotDataSimple(x, y):\n \"\"\"\n Plot single input feature x data with corresponding response\n values y as a scatter plot\n :param x: sequence of 1-dimensional input data features\n :param y: sequence of 1-dimensional responses\n :return: None\n \"\"\"\n plt.figure() # Create a new figure object for plotting\n plt.scatter(x, y, edgecolor='b', color='w', s = 8, marker='o')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.xlim (min(x)*0.98, max(x)*1.02)\n plt.ylim (min(y)*0.98, max(y)*1.02)\n plt.title('Data')\n plt.pause(.1) # required on some systems so that rendering can happen\n\n\ndef plotModel(x, w, color='r'):\n \"\"\"\n Plot the curve for an n-th order polynomial model:\n t = w0*x^0 + w1*x^1 + w2*x^2 + ... wn*x^n\n This works by creating a set of x-axis (plotx) points and\n then use the model parameters w to determine the corresponding\n t-axis (plott) points on the model curve.\n :param x: sequence of 1-dimensional input data features\n :param w: n-dimensional sequence of model parameters: w0, w1, w2, ..., wn\n :param color: matplotlib color to plot model curve\n :return: the plotx and plott values for the plotted curve\n \"\"\"\n # NOTE: this assumes a figure() object has already been created.\n plotx = np.linspace(min(x) - 0.25, max(x) + 0.25, 100)\n plotX = np.zeros((plotx.shape[0], w.size))\n for k in range(w.size):\n plotX[:, k] = np.power(plotx, k)\n plott = np.dot(plotX, w)\n plt.plot(plotx, plott, color=color, markersize = 10, linewidth=2)\n plt.pause(.1) # required on some systems so that rendering can happen\n return plotx, plott\n\n\ndef sigma(m, n, func, s=0):\n '''\n calculate the summation for a given function.\n Reference: https://qiita.com/SheepCloud/items/b8bd929c4f35dfd7b1bd\n :param m: initial index\n :param n: final index. The term with the final index is calculated\n :param func: the function\n :param s: the default value before summing f(m). this is usually 0.0\n :return:\n '''\n # print(\"m:{}, n:{}, s:{}\".format(m, n, s))\n\n\n if m > n: return s\n return sigma(m + 1, n, func, s + func(m))\n\n\nclass Counter(dict):\n \"\"\"\n A counter keeps track of counts for a set of keys.\n\n The counter class is an extension of the standard python\n dictionary type. It is specialized to have number values\n (integers or floats), and includes a handful of additional\n functions to ease the task of counting data. In particular,\n all keys are defaulted to have value 0. Using a dictionary:\n\n a = {}\n print a['test']\n\n would give an error, while the Counter class analogue:\n\n >>> a = Counter()\n >>> print a['test']\n 0\n\n returns the default 0 value. Note that to reference a key\n that you know is contained in the counter,\n you can still use the dictionary syntax:\n\n >>> a = Counter()\n >>> a['test'] = 2\n >>> print a['test']\n 2\n\n This is very useful for counting things without initializing their counts,\n see for example:\n\n >>> a['blah'] += 1\n >>> print a['blah']\n 1\n\n The counter also includes additional functionality useful in implementing\n the classifiers for this assignment. Two counters can be added,\n subtracted or multiplied together. See below for details. They can\n also be normalized and their total count and arg max can be extracted.\n \"\"\"\n\n def __getitem__(self, idx):\n self.setdefault(idx, 0)\n return dict.__getitem__(self, idx)\n\n def incrementAll(self, keys, count):\n \"\"\"\n Increments all elements of keys by the same count.\n\n >>> a = Counter()\n >>> a.incrementAll(['one','two', 'three'], 1)\n >>> a['one']\n 1\n >>> a['two']\n 1\n \"\"\"\n for key in keys:\n self[key] += count\n\n def argMax(self):\n \"\"\"\n Returns the key with the highest value.\n \"\"\"\n if len(self.keys()) == 0: return None\n all = self.items()\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]\n\n def sortedKeys(self):\n \"\"\"\n Returns a list of keys sorted by their values. Keys\n with the highest values will appear first.\n\n >>> a = Counter()\n >>> a['first'] = -2\n >>> a['second'] = 4\n >>> a['third'] = 1\n >>> a.sortedKeys()\n ['second', 'third', 'first']\n \"\"\"\n sortedItems = self.items()\n compare = lambda x, y: sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]\n\n def totalCount(self):\n \"\"\"\n Returns the sum of counts for all keys.\n \"\"\"\n return sum(self.values())\n\n def normalize(self):\n \"\"\"\n Edits the counter such that the total count of all\n keys sums to 1. The ratio of counts for all keys\n will remain the same. Note that normalizing an empty\n Counter will result in an error.\n \"\"\"\n total = float(self.totalCount())\n if total == 0: return\n for key in self.keys():\n self[key] = self[key] / total\n\n def divideAll(self, divisor):\n \"\"\"\n Divides all counts by divisor\n \"\"\"\n divisor = float(divisor)\n for key in self:\n self[key] /= divisor\n\n def copy(self):\n \"\"\"\n Returns a copy of the counter\n \"\"\"\n return Counter(dict.copy(self))\n\n def __mul__(self, y):\n \"\"\"\n Multiplying two counters gives the dot product of their vectors where\n each unique label is a vector element.\n\n >>> a = Counter()\n >>> b = Counter()\n >>> a['first'] = -2\n >>> a['second'] = 4\n >>> b['first'] = 3\n >>> b['second'] = 5\n >>> a['third'] = 1.5\n >>> a['fourth'] = 2.5\n >>> a * b\n 14\n \"\"\"\n sum = 0\n x = self\n if len(x) > len(y):\n x, y = y, x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum\n\n def __radd__(self, y):\n \"\"\"\n Adding another counter to a counter increments the current counter\n by the values stored in the second counter.\n\n >>> a = Counter()\n >>> b = Counter()\n >>> a['first'] = -2\n >>> a['second'] = 4\n >>> b['first'] = 3\n >>> b['third'] = 1\n >>> a += b\n >>> a['first']\n 1\n \"\"\"\n for key, value in y.items():\n self[key] += value\n\n def __add__(self, y):\n \"\"\"\n Adding two counters gives a counter with the union of all keys and\n counts of the second added to counts of the first.\n\n >>> a = Counter()\n >>> b = Counter()\n >>> a['first'] = -2\n >>> a['second'] = 4\n >>> b['first'] = 3\n >>> b['third'] = 1\n >>> (a + b)['first']\n 1\n \"\"\"\n addend = Counter()\n for key in self:\n if key in y:\n addend[key] = self[key] + y[key]\n else:\n addend[key] = self[key]\n for key in y:\n if key in self:\n continue\n addend[key] = y[key]\n return addend\n\n def __sub__(self, y):\n \"\"\"\n Subtracting a counter from another gives a counter with the union of all keys and\n counts of the second subtracted from counts of the first.\n\n >>> a = Counter()\n >>> b = Counter()\n >>> a['first'] = -2\n >>> a['second'] = 4\n >>> b['first'] = 3\n >>> b['third'] = 1\n >>> (a - b)['first']\n -5\n \"\"\"\n addend = Counter()\n for key in self:\n if key in y:\n addend[key] = self[key] - y[key]\n else:\n addend[key] = self[key]\n for key in y:\n if key in self:\n continue\n addend[key] = -1 * y[key]\n return addend\n" }, { "alpha_fraction": 0.7753486633300781, "alphanum_fraction": 0.8066340088844299, "avg_line_length": 97.25926208496094, "blob_id": "60219ffbbd5b3754eef8cf18fc08b3b76f9c0b0a", "content_id": "323ae655991981e83c463636d8026a8cabcf2b48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2653, "license_type": "permissive", "max_line_length": 752, "num_lines": 27, "path": "/README.md", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# The modeling and optimization code for my graduate research \"Modeling and Optimization of crop production and energy generation for economic profit in an organic photovoltaics integrated greenhouse\" by Kensaku Okada, Murat Kacira, Young-Jun Son, Lingling An\n\nThis simulation program was developed for my graduate reserach. The simulator estimates the overall economic profit of lettuce crop production in a greenhouse integrated with OPV film as part of the greenhouse cover. The model calculated the solar irradiance to a tilted surface, electric energy generated by OPV modules installed on greenhouse roof, transmittance through multi-span greenhouse roof, solar irradiance to lettuce in the greenhouse, the growth of lettuce yield, energy consumed by cooling and heating, cost and sales of electric energy and lettuce respectively, and finally the total economic profit. It enables evaluating various organic PV coverage ratios as well as traditional inorganic PV module by changing the model specification.\n\nMIDACO solver (http://www.midaco-solver.com/) was adopted for the optimization. It solved mixed integer non-linear programming (MINLP) problem by combining an extended evolutionary Ant Colony Optimization (ACO) algorithm with the Oracle Penalty Method for constrained handling.\n\n\n# Big picture of the simulator\n![fig 2](https://user-images.githubusercontent.com/6435299/45592781-d9d89580-b9b1-11e8-9433-6ff1bba15c25.png)\n\n\n# Optimization model\n![image](https://user-images.githubusercontent.com/6435299/49426796-f46a2000-f7e4-11e8-836a-3148a503497d.png)\n\n\n# How to run the simulator\n\n<li>Open SimulatorMain.py</li>\n<li>If you want to run the model only with the manually defined values, let case = \"OneCaseSimulation\" at Line 22 </li>\n<li>If you want to iterate the simulation only with different OPV coverage ratio, let case = \"OptimizeOnlyOPVCoverageRatio\" at Line 23 </li>\n<li>If you want to optimize the parameters (OPV coverage ratio, Summer period start date, Summer period end date) with the model using MIDACO Solver, let case = \"OptimizationByMINLPSolver\" at Line 24 (This option needs MIDACO solver (http://www.midaco-solver.com/) paid license. Please purchase it at the website and crease a new folder called MIDACO having the dll file you gonna get). </li>\n\nPlease claim issues if you face any problem in the program.\n\n# Documents\n### Graduation Thesis using this model: https://github.com/kensaku-okada/Greenhouse-with-OPV-film-Model/blob/master/documents/graduatePaperKensakuOkada.pdf\n### Defense Presentation Slide: https://github.com/kensaku-okada/Greenhouse-with-OPV-film-Model/blob/master/documents/defensePresentationDraft2_Kensakul_Okada.pptx\n" }, { "alpha_fraction": 0.659728467464447, "alphanum_fraction": 0.6840910315513611, "avg_line_length": 66.8603515625, "blob_id": "750b1abea6376b6e3c3067970e09e7b4f25eb31e", "content_id": "41c761dc26889ae8fbe50be8972bf90965ce5b88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42280, "license_type": "permissive", "max_line_length": 207, "num_lines": 623, "path": "/SolarIrradianceMultiSpanRoof.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n##########import package files##########\nfrom scipy import stats\nimport sys\nimport datetime\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\n#######################################################\n\n\ndef getAngleBetweenIncientRayAndHorizontalAxisPerpendicularToGHSpan(simulatorClass, hourlyModuleAzimuthAngle):\n # alpha: solar altitude angle\n alpha = simulatorClass.hourlySolarAltitudeAngle\n hourlySolarAzimuthAngle = simulatorClass.hourlySolarAzimuthAngle\n\n E = np.arcsin( np.sin(alpha) / np.sqrt(np.sin(alpha)**2 + (np.cos(alpha)*np.cos(hourlyModuleAzimuthAngle - hourlySolarAzimuthAngle))**2))\n # It was interpreted that the reference of the model Soriano et al., 2004,\"A Study of Direct Solar Radiation transmittance in Asymmetrical Multi-span Greenhouses using Scale Models and Simulation Models\"\n # need the angle E be expressed less than pi/2, when the solar position changes from east to east side in the sky passing meridian\n\n # By definition, E wants to take more than pi/2 [rad] when the sun moves from east to west, which occurs at noon.\n E = np.array([math.pi - E[i] if i!=0 and E[i] > 0.0 and E[i]-E[i-1] < 0.0 else E[i] for i in range (0, E.shape[0])])\n\n return E\n\ndef getAngleBetweenIncientRayAndHorizontalAxisParallelToGHSpan(simulatorClass, hourlyModuleAzimuthAngle):\n # alpha: solar altitude angle\n alpha = simulatorClass.hourlySolarAltitudeAngle\n hourlySolarAzimuthAngle = simulatorClass.hourlySolarAzimuthAngle\n\n EParallel = np.arcsin( np.sin(alpha) / np.sqrt(np.sin(alpha)**2 + (np.cos(alpha)*np.sin(hourlyModuleAzimuthAngle - hourlySolarAzimuthAngle))**2))\n\n return EParallel\n\n\ndef getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingEastOrNorth(simulatorClass, directSolarRadiationToOPVEastDirection, \\\n EPerpendicularEastOrNorthFacingRoof):\n \"\"\"\n\n :param simulatorClass:\n :return:\n \"\"\"\n alpha = constant.roofAngleWestOrSouth\n beta = constant.roofAngleEastOrNorth\n L_1 = constant.greenhouseRoofWidthWestOrSouth\n L_2 = constant.greenhouseRoofWidthEastOrNorth\n\n # the transmittance of roof surfaces 1 (facing west or south)\n T_1 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # the transmittance of roof surfaces 2 (facing east or north)\n T_2 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # the reflectance of roof surfaces 1 (facing west or south)\n F_1 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # the reflectance of roof surfaces 2 (facing west or south)\n F_2 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n\n # the transmittance/reflectance of solar irradiance directly transmitted to the soil through roof each direction of roof (surfaces east and west) (A10)\n T_12 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # the transmittance/reflectance on the roof facing west of solar irradiance reflected by the surface facing west and transmitted to the soil (surfaces west or south)\n T_r11 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n F_r11 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # the transmittance/reflectance on the roof facing east of solar irradiance reflected by the surface facing west and transmitted to the soil (surfaces east or north)\n T_r12 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n F_r12 = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n\n # transmittance through multispan roof\n T_matPerpendicular = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # T_matParallel = np.zeros(directSolarRadiationToOPVEastDirection.shape[0])\n # print(\"num of iteration at getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingEastOrNorth:{}\".format(directSolarRadiationToOPVEastDirection.shape[0]))\n\n ##############################################################################################################################\n # calculate the transmittance of perpendicular direction from the EAST facing roof after penetrating multi-span roof per hour\n ##############################################################################################################################\n for i in range(0, directSolarRadiationToOPVEastDirection.shape[0]):\n\n # print(\"i:{}\".format(i))\n # print(\"EPerpendicularEastOrNorthFacingRoof[i]:{}\".format(EPerpendicularEastOrNorthFacingRoof[i]))\n # print(\"directSolarRadiationToOPVEastDirection[i]:{}\".format(directSolarRadiationToOPVEastDirection[i]))\n\n # if the solar irradiance at a certain time is zero, then skip the element\n if directSolarRadiationToOPVEastDirection[i] == 0.0: continue\n\n # case A1: if the roof-slope angle of the west side is greater than the angle E formed by the incident ray with the horizontal axis perpendicular to the greenhouse span\n # It was assumed the direct solar radiation is 0 when math.pi - alpha <= EPerpendicularEastOrNorthFacingRoof[i]:\n elif EPerpendicularEastOrNorthFacingRoof[i] <= alpha:\n # print (\"call case A1\")\n\n # the number of intercepting spans, which can very depending on E.\n m = getNumOfInterceptingSPans(alpha, EPerpendicularEastOrNorthFacingRoof[i])\n # print(\"case A1 i:{}, m:{}, sys.getrecursionlimit():{}\".format(i, m,sys.getrecursionlimit()))\n\n # fraction (percentage) of light which does not pass through the first span [-]\n l_a = m * L_1 * math.sin(alpha + EPerpendicularEastOrNorthFacingRoof[i]) - (m + 1) * L_2 * math.sin(beta - EPerpendicularEastOrNorthFacingRoof[i])\n # fraction (percentage) of light which crosses the first span before continuing on towards the others [-]\n l_b = m * L_2 * math.sin(beta - EPerpendicularEastOrNorthFacingRoof[i]) - (m - 1) * L_1 * math.sin(alpha + EPerpendicularEastOrNorthFacingRoof[i])\n # print(\"l_a at case A1:{}\".format(l_a))\n # print(\"l_b at case A1:{}\".format(l_b))\n\n # claculate the incidence angle for each facing roof\n incidentAngleForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(EPerpendicularEastOrNorthFacingRoof[i], beta)\n incidentAngleForWestOrSouthhRoof = getIncidentAngleForWestOrSouthRoof(EPerpendicularEastOrNorthFacingRoof[i], alpha)\n # print(\"incidentAngleForEastOrNorthRoof at case A1:{}\".format(incidentAngleForEastOrNorthRoof))\n # print(\"incidentAngleForWestOrSouthhRoof at case A1:{}\".format(incidentAngleForWestOrSouthhRoof))\n\n # calculate the transmittance and reflectance to each roof\n T_2[i], F_2[i] = fresnelEquation(incidentAngleForEastOrNorthRoof)\n T_1[i], F_1[i] = fresnelEquation(incidentAngleForWestOrSouthhRoof)\n # print(\"T_1[i]:{}, T_2[i]:{}, F_1[i]:{}, F_2[i]:{}\".format(T_1[i], T_2[i], F_1[i], F_2[i]))\n\n T_matPerpendicular[i] = getTransmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof(l_a, l_b, m, T_1[i], F_1[i], T_2[i], F_2[i])\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.1: if the angle E is greater than the roof angle of the north side beta (beta < E < 2*beta)\n elif (alpha < EPerpendicularEastOrNorthFacingRoof[i] and EPerpendicularEastOrNorthFacingRoof[i] < 2.0*alpha) or \\\n (math.pi - 2.0*alpha < EPerpendicularEastOrNorthFacingRoof[i] and EPerpendicularEastOrNorthFacingRoof[i] < math.pi - alpha):\n # print (\"call case A2.1\")\n l_1, l_2, T_1[i], F_1[i], T_2[i], F_2[i], T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(\\\n alpha, beta, L_1, L_2, EPerpendicularEastOrNorthFacingRoof[i])\n\n # get the angle E reflected from the west or south facing roof and transmit through multi-span roofs\n reflectedE = getReflectedE(EPerpendicularEastOrNorthFacingRoof[i], alpha)\n # get the incidence angle for each facing roof\n incidentAngleOfReflectedLightForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(reflectedE, beta)\n incidentAngleOfReflectedLightForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoof(reflectedE, alpha)\n\n # get the Transmittance and reflection on each roof from the reflected irradiance\n T_r12[i], F_r12[i] = fresnelEquation(incidentAngleOfReflectedLightForEastOrNorthRoof)\n T_r11[i], F_r11[i] = fresnelEquation(incidentAngleOfReflectedLightForWestOrSouthRoof)\n\n # the number of intercepting spans, which can very depending on E.\n x = abs(2.0 * alpha - EPerpendicularEastOrNorthFacingRoof[i])\n m = getNumOfInterceptingSPans(alpha, x)\n # print(\"case A2.1 i:{}, m:{}, sys.getrecursionlimit():{}\".format(i, m,sys.getrecursionlimit()))\n\n\n # l_a: fraction (percentage) of reflected light which does not pass through the first span [-], equation (A14)\n # l_b: fraction (percentage) of reflected light which crosses the first span before continuing on towards the others [-], equation (A15)\n l_a, l_b = getFractionOfTransmittanceOfReflectedSolarIrradiance(alpha, beta, L_1, L_2, m, EPerpendicularEastOrNorthFacingRoof[i])\n\n T_matPerpendicular[i] = getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForEastOrNorthFacingRoof(l_a, l_b, m, T_r11[i], T_r12[i], F_r11[i], F_r12[i], F_1[i], l_1, l_2) +\\\n + T_12[i]\n\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.2: if the angle E is greater than the roof angle of the north side beta (2*beta < E < 3*beta)\n elif (2.0*alpha < EPerpendicularEastOrNorthFacingRoof[i] and EPerpendicularEastOrNorthFacingRoof[i] < 3.0*alpha) or \\\n (math.pi - 3.0 * alpha < EPerpendicularEastOrNorthFacingRoof[i] and EPerpendicularEastOrNorthFacingRoof[i] < math.pi - 2.0 * alpha):\n # print (\"call case A2.2\")\n\n l_1, l_2, T_1[i], F_1[i], T_2[i], F_2[i], T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(\\\n alpha, beta, L_1, L_2, EPerpendicularEastOrNorthFacingRoof[i])\n\n # get the angle E reflected from the west or south facing roof and transmit through multi-span roofs\n reflectedE = getReflectedE(EPerpendicularEastOrNorthFacingRoof[i], alpha)\n # get the incidence angle for each facing roof\n incidentAngleOfReflectedLightForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(reflectedE, beta)\n incidentAngleOfReflectedLightForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoof(reflectedE, alpha)\n\n # get the Transmittance and reflection on each roof from the reflected irradiance\n T_r12[i], F_r12[i] = fresnelEquation(incidentAngleOfReflectedLightForEastOrNorthRoof)\n T_r11[i], F_r11[i] = fresnelEquation(incidentAngleOfReflectedLightForWestOrSouthRoof)\n\n # the number of intercepting spans, which can very depending on E.\n x = abs(2.0 * alpha - EPerpendicularEastOrNorthFacingRoof[i])\n m = getNumOfInterceptingSPans(alpha, x)\n\n # l_a: fraction (percentage) of reflected light which does not pass through the first span [-], equation (A14)\n # l_b: fraction (percentage) of reflected light which crosses the first span before continuing on towards the others [-], equation (A15)\n l_a, l_b = getFractionOfTransmittanceOfReflectedSolarIrradiance(alpha, beta, L_1, L_2, m, EPerpendicularEastOrNorthFacingRoof[i])\n\n T_matPerpendicular[i] = getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForEastOrNorthFacingRoof(l_a, l_b, m, T_r11[i], T_r12[i], F_r11[i], F_r12[i], F_1[i], l_1, l_2) \\\n + T_12[i]\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.3: if 3.0 * alpha < EPerpendicularEastOrNorthFacingRoof[i]\n # since this model assumes north-south direction greenhouse, the E can be more than pi/2.0, and thus the following range was set\n elif 3.0 * alpha < EPerpendicularEastOrNorthFacingRoof[i] and EPerpendicularEastOrNorthFacingRoof[i] < (math.pi - 3.0 * alpha):\n # print (\"call case A2.3\")\n _, _, _, _, _, _, T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(alpha, beta, L_1, L_2, EPerpendicularEastOrNorthFacingRoof[i])\n T_matPerpendicular[i] = T_12[i]\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # print(\"T_1 :{}\".format(T_1))\n # print(\"F_1 :{}\".format(F_1))\n # print(\"T_2 :{}\".format(T_2))\n # print(\"F_2 :{}\".format(F_2))\n # print(\"T_12 :{}\".format(T_12))\n # print(\"T_r11 :{}\".format(T_r11))\n # print(\"F_r11 :{}\".format(F_r11))\n # print(\"T_r12 :{}\".format(T_r12))\n # print(\"F_r12 :{}\".format(F_r12))\n # print(\"T_matPerpendicular :{}.format(T_matPerpendicular))\n\n return T_matPerpendicular\n\ndef getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingWestOrSouth(simulatorClass, directSolarRadiationToOPVWestDirection, \\\n EPerpendicularWestOrSouthFacingRoof):\n \"\"\"\n :return:\n \"\"\"\n alpha = constant.roofAngleWestOrSouth\n beta = constant.roofAngleEastOrNorth\n L_1 = constant.greenhouseRoofWidthWestOrSouth\n L_2 = constant.greenhouseRoofWidthEastOrNorth\n\n # the Transmittances of roof surfaces 1 (facing west or south)\n T_1 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n # the Transmittances of roof surfaces 2 (facing east or north)\n T_2 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n # the reflectance of roof surfaces 1 (facing west or south)\n F_1 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n # the reflectance of roof surfaces 2 (facing west or south)\n F_2 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n\n # the transmittance/reflectance of solar irradiance directly transmitted to the soil through roof each direction of roof (surfaces east and west) (A10)\n T_12 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n\n # the transmittance/reflectance on the roof facing west of solar irradiance reflected by the surface facing east and transmitted to the soil (surfaces west or south)\n T_r21 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n F_r21 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n # the transmittance/reflectance on the roof facing east of solar irradiance reflected by the surface facing east and transmitted to the soil (surfaces east or north)\n T_r22 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n F_r22 = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n\n # transmittance through multispan roof\n T_matPerpendicular = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n # T_matParallel = np.zeros(directSolarRadiationToOPVWestDirection.shape[0])\n\n\n # calculate the solar irradiacne from the EAST facing roof after penetrating multi-span roof per hour\n for i in range(0, directSolarRadiationToOPVWestDirection.shape[0]):\n # print(\"i:{}\".format(i))\n # print(\"EPerpendicularWestOrSouthFacingRoof[i]:{}\".format(EPerpendicularWestOrSouthFacingRoof[i]))\n # print(\"directSolarRadiationToOPVWestDirection[i]:{}\".format(directSolarRadiationToOPVWestDirection[i]))\n\n # if the solar irradiance at a certain time is zero, then skip the element\n if directSolarRadiationToOPVWestDirection[i] == 0.0: continue\n\n # case A1: if the roof-slope angle of the west side is greater than the angle E formed by the incident ray with the horizontal axis perpendicular to the greenhouse span\n # It was assumed the direct solar radiation is 0 when EPerpendicularWestOrSouthFacingRoof[i] <= alpha\n # elif EPerpendicularWestOrSouthFacingRoof[i] <= alpha or math.pi - alpha <= EPerpendicularWestOrSouthFacingRoof[i]:\n elif math.pi - alpha <= EPerpendicularWestOrSouthFacingRoof[i]:\n # print (\"call case A1\")\n\n # since the original model does not suppose EPerpendicular is more than pi/2 (the cause it assume the angle of the greenhouse is east-west, not north-south where the sun croees the greenhouse)\n # EPerpendicular is converted into pi - EPerpendicular when it is more than pi/2\n if EPerpendicularWestOrSouthFacingRoof[i] > math.pi/2.0:\n EPerpendicularWestOrSouthFacingRoof[i] = math.pi - EPerpendicularWestOrSouthFacingRoof[i]\n\n # print(\"EPerpendicularWestOrSouthFacingRoof_CaseA1[i]:{}\".format(EPerpendicularWestOrSouthFacingRoof[i]))\n # the number of intercepting spans, which can very depending on E.\n m = getNumOfInterceptingSPans(alpha, EPerpendicularWestOrSouthFacingRoof[i])\n\n # fraction (percentage) of light which does not pass through the first span [-]\n l_a = m * L_1 * math.sin(alpha + EPerpendicularWestOrSouthFacingRoof[i]) - (m + 1) * L_2 * math.sin(beta - EPerpendicularWestOrSouthFacingRoof[i])\n # fraction (percentage) of light which crosses the first span before continuing on towards the others [-]\n l_b = m * L_2 * math.sin(beta - EPerpendicularWestOrSouthFacingRoof[i]) - (m - 1) * L_1 * math.sin(alpha + EPerpendicularWestOrSouthFacingRoof[i])\n # print(\"l_a at case A1:{}\".format(l_a))\n # print(\"l_b at case A1:{}\".format(l_b))\n\n # the following functions works to if you do not rollback EPerpendicularWestOrSouthFacingRoof\n # claculate the incidence angle for each facing roof\n incidentAngleForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoofWithBeamComingFromWestOrSouth(EPerpendicularWestOrSouthFacingRoof[i], beta)\n incidentAngleForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoofWithBeamComingFromWestOrSouth(EPerpendicularWestOrSouthFacingRoof[i], alpha)\n # print(\"incidentAngleForEastOrNorthRoof at case A1:{}\".format(incidentAngleForEastOrNorthRoof))\n # print(\"incidentAngleForWestOrSouthRoof at case A1:{}\".format(incidentAngleForWestOrSouthRoof))\n\n # calculate the transmittance and reflectance to each roof\n T_2[i], F_2[i] = fresnelEquation(incidentAngleForEastOrNorthRoof)\n T_1[i], F_1[i] = fresnelEquation(incidentAngleForWestOrSouthRoof)\n # print(\"T_1[i]:{}, T_2[i]:{}, F_1[i]:{}, F_2[i]:{}\".format(T_1[i], T_2[i], F_1[i], F_2[i]))\n\n T_matPerpendicular[i] = getTransmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof(l_a, l_b, m, T_1[i], F_1[i], T_2[i], F_2[i])\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.1: if the angle E is greater than the roof angle of the north side beta (beta < E < 2*beta)\n elif (alpha < EPerpendicularWestOrSouthFacingRoof[i] and EPerpendicularWestOrSouthFacingRoof[i] < 2.0*alpha) or \\\n (math.pi - 2.0 * alpha < EPerpendicularWestOrSouthFacingRoof[i] and EPerpendicularWestOrSouthFacingRoof[i] < math.pi - alpha):\n # print (\"call case A2.1\")\n\n l_1, l_2, T_1[i], F_1[i], T_2[i], F_2[i], T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(\\\n alpha, beta, L_1, L_2, EPerpendicularWestOrSouthFacingRoof[i])\n\n # get the angle E reflected from the west or south facing roof and transmit through multi-span roofs\n reflectedE = getReflectedE(EPerpendicularWestOrSouthFacingRoof[i], alpha)\n # get the incidence angle for each facing roof\n incidentAngleOfReflectedLightForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(reflectedE, beta)\n incidentAngleOfReflectedLightForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoof(reflectedE, alpha)\n\n # get the Transmittance and reflection on each roof from the reflected irradiance\n T_r22[i], F_r22[i] = fresnelEquation(incidentAngleOfReflectedLightForEastOrNorthRoof)\n T_r21[i], F_r21[i] = fresnelEquation(incidentAngleOfReflectedLightForWestOrSouthRoof)\n\n # the number of intercepting spans, which can very depending on E.\n x = abs(2.0 * alpha - EPerpendicularWestOrSouthFacingRoof[i])\n m = getNumOfInterceptingSPans(alpha, x)\n\n # l_a: fraction (percentage) of reflected light which does not pass through the first span [-], equation (A14)\n # l_b: fraction (percentage) of reflected light which crosses the first span before continuing on towards the others [-], equation (A15)\n l_a, l_b = getFractionOfTransmittanceOfReflectedSolarIrradiance(alpha, beta, L_1, L_2, m, EPerpendicularWestOrSouthFacingRoof[i])\n # print(\"l_a:{}, l_b:{}\".format(l_a, l_b))\n\n # # fraction (percentage) of light which does not pass through the first span [-], equation (A14)\n # l_a = L_2 * m * math.sin(EPerpendicularWestOrSouthFacingRoof[i] - beta) - L_1 * (m - 1) * math.sin(alpha + 2.0 * beta - EPerpendicularWestOrSouthFacingRoof[i])\n # # fraction (percentage) of light which crosses the first span before continuing on towards the others [-], equation (A15)\n # l_b = L_1 * math.sin(alpha + 2.0 * beta - EPerpendicularWestOrSouthFacingRoof[i]) - L_2 * math.sin(EPerpendicularWestOrSouthFacingRoof[i] - beta)\n\n T_matPerpendicular[i] = getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForWestOrSouthFacingRoof(l_a, l_b, m, T_r21[i], T_r22[i], F_r21[i], F_r22[i], F_1[i], l_1, l_2)\\\n + T_12[i]\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.2: if the angle E is greater than the roof angle of the north side beta (2*beta < E < 3*beta)\n elif (2.0*alpha < EPerpendicularWestOrSouthFacingRoof[i] and EPerpendicularWestOrSouthFacingRoof[i] < 3.0*alpha) or \\\n (math.pi - 3.0 * alpha < EPerpendicularWestOrSouthFacingRoof[i] and EPerpendicularWestOrSouthFacingRoof[i] < math.pi - 2.0 * alpha):\n # print (\"call case A2.2\")\n\n l_1, l_2, T_1[i], F_1[i], T_2[i], F_2[i], T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(\\\n alpha, beta, L_1, L_2, EPerpendicularWestOrSouthFacingRoof[i])\n\n # get the angle E reflected from the west or south facing roof and transmit through multi-span roofs\n reflectedE = getReflectedE(EPerpendicularWestOrSouthFacingRoof[i], alpha)\n # get the incidence angle for each facing roof\n incidentAngleOfReflectedLightForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(reflectedE, beta)\n incidentAngleOfReflectedLightForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoof(reflectedE, alpha)\n\n # get the Transmittance and reflection on each roof from the reflected irradiance\n T_r22[i], F_r22[i] = fresnelEquation(incidentAngleOfReflectedLightForEastOrNorthRoof)\n T_r21[i], F_r21[i] = fresnelEquation(incidentAngleOfReflectedLightForWestOrSouthRoof)\n\n # the number of intercepting spans, which can very depending on E.\n x = abs(2.0 * alpha - EPerpendicularWestOrSouthFacingRoof[i])\n m = getNumOfInterceptingSPans(alpha, x)\n\n # fraction (percentage) of light which does not pass through the first span [-], equation (A16)\n l_a = L_2 * (1-m) * math.sin(EPerpendicularWestOrSouthFacingRoof[i] - beta) + L_1 * m * math.sin(alpha + 2.0 * beta - EPerpendicularWestOrSouthFacingRoof[i])\n # fraction (percentage) of light which crosses the first span before continuing on towards the others [-], equation (A17)\n l_b = L_2 * math.sin(EPerpendicularWestOrSouthFacingRoof[i] - beta) - L_1 * math.sin(alpha + 2.0 * beta - EPerpendicularWestOrSouthFacingRoof[i])\n # print(\"l_a:{}, l_b:{}\".format(l_a, l_b))\n\n T_matPerpendicular[i] = getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForWestOrSouthFacingRoof(l_a, l_b, m, T_r21[i], T_r22[i], F_r21[i], F_r22[i], F_1[i], l_1, l_2) \\\n + T_12[i]\n # print(\"T_matPerpendicular[i]:{}\".format(T_matPerpendicular[i]))\n\n # case A2.3: if 3.0 * alpha < EPerpendicularEastOrNorthFacingRoof[i]\n # since this model assumes north-south direction greenhouse, the E can be more than pi/2.0, and thus the following range was set\n elif (3.0*alpha < EPerpendicularWestOrSouthFacingRoof[i] and EPerpendicularWestOrSouthFacingRoof[i] < (math.pi - 3.0*alpha)):\n # print (\"call case A2.3\")\n _, _, _, _, _, _, T_12[i] = getSolarIrradianceDirectlhyTransmittedToPlants(alpha, beta, L_1, L_2, EPerpendicularWestOrSouthFacingRoof[i])\n T_matPerpendicular[i] = T_12[i]\n\n # print(\"T_1 :{}\".format(T_1))\n # print(\"F_1 :{}\".format(F_1))\n # print(\"T_2 :{}\".format(T_2))\n # print(\"F_2 :{}\".format(F_2))\n # print(\"T_12 :{}\".format(T_12))\n # print(\"T_r21 :{}\".format(T_r21))\n # print(\"F_r21 :{}\".format(F_r21))\n # print(\"T_r22 :{}\".format(T_r22))\n # print(\"F_r22 :{}\".format(F_r22))\n # print(\"T_matPerpendicular :{}\".format(T_matPerpendicular))\n\n return T_matPerpendicular\n\n\ndef getNumOfInterceptingSPans(alpha, EPerpendicularWestOrSouthFacingRoof):\n '''\n # the number of intercepting spans, which can very depending on E.\n '''\n m = int(1.0 / 2.0 * (1 + math.tan(alpha) / math.tan(EPerpendicularWestOrSouthFacingRoof)))\n # print(\"WestOrSouth case A1 i:{}, m:{}, math.tan(alpha):{}, math.tan(EPerpendicularEastOrNorthFacingRoof[i]):{}\".format(i, m, math.tan(alpha), math.tan(EPerpendicularEastOrNorthFacingRoof[i])))\n # if the angle between the incident light and the horizontal axis is too small, the m can be too large, which cause a system error at Util.sigma by iterating too much. Thus, the upper limit was set\n if m > constant.mMax: m = constant.mMax\n\n return m\n\ndef getTransmittanceForParallelIrrThroughMultiSpanRoof(simulatorClass, EParallelEastOrNorthFacingRoof):\n '''\n In the parallel direction to the grenhouse roof, the agle of the roof is 0. There is no reflection transmitted to other part of the roof.\n :return:\n '''\n ##############################################################################################################################\n # calculate the transmittance of perpendicular direction from the EAST facing roof after penetrating multi-span roof per hour\n ##############################################################################################################################\n # the transmittance of roof surfaces\n T = np.zeros(simulatorClass.getDirectSolarRadiationToOPVEastDirection().shape[0])\n # the reflectance of roof surfaces\n F = np.zeros(simulatorClass.getDirectSolarRadiationToOPVEastDirection().shape[0])\n\n for i in range(0, simulatorClass.getDirectSolarRadiationToOPVEastDirection().shape[0]):\n\n # calculate the transmittance and reflectance to each roof\n T[i], F[i] = fresnelEquation(EParallelEastOrNorthFacingRoof[i])\n\n return T\n\ndef getIncidentAngleForEastOrNorthRoof(EPerpendicularEastOrNorthFacingRoof, beta):\n # calculate the incident angle [rad]\n # the incident angle should be the angle between the solar irradiance and the normal to the tilted roof\n return abs(math.pi / 2.0 - abs(beta + EPerpendicularEastOrNorthFacingRoof))\n\n # if beta + EPerpendicularEastOrNorthFacingRoof < math.pi/2.0:\n # return math.pi/2.0 - abs(beta + EPerpendicularEastOrNorthFacingRoof)\n # # if the angle E + alpha is over pi/2 (the sun pass the normal to the tilted roof )\n # else:\n # return abs(beta + EPerpendicularEastOrNorthFacingRoof) - math.pi / 2.0\n\ndef getIncidentAngleForWestOrSouthRoof(EPerpendicularWestOrSouthFacingRoof, alpha):\n # calculate the incident angle [rad]\n # the incident angle should be the angle between the solar irradiance and the normal to the tilted roof\n return abs(math.pi/2.0 - abs(alpha - EPerpendicularWestOrSouthFacingRoof))\n\n\ndef getIncidentAngleForEastOrNorthRoofWithBeamComingFromWestOrSouth(EPerpendicularEastOrNorthFacingRoof, beta):\n # calculate the incident angle [rad]\n # the incident angle should be the angle between the solar irradiance and the normal to the tilted roof\n return abs(math.pi / 2.0 - abs(beta - EPerpendicularEastOrNorthFacingRoof))\n\ndef getIncidentAngleForWestOrSouthRoofWithBeamComingFromWestOrSouth(EPerpendicularWestOrSouthFacingRoof, alpha):\n # calculate the incident angle [rad]\n # the incident angle should be the angle between the solar irradiance and the normal to the tilted roof\n return abs(math.pi / 2.0 - abs(alpha + EPerpendicularWestOrSouthFacingRoof))\n\n\ndef fresnelEquation(SolarIrradianceIncidentAngle):\n '''\n calculate the transmittance and reflectance for a given incidnet angle and index of reflectances\n reference:\n http://hyperphysics.phy-astr.gsu.edu/hbase/phyopt/freseq.html\n https://www.youtube.com/watch?v=ayxFyRF-SrM\n https://ja.wikipedia.org/wiki/%E3%83%95%E3%83%AC%E3%83%8D%E3%83%AB%E3%81%AE%E5%BC%8F\n :return: transmittance, reflectance\n '''\n\n # reference: https://www.filmetrics.com/refractive-index-database/Polyethylene/PE-Polyethene\n PEFilmRefractiveIndex = constant.PEFilmRefractiveIndex\n # reference: https://en.wikipedia.org/wiki/Refractive_index\n AirRefractiveIndex = constant.AirRefractiveIndex\n\n # print(\"SolarIrradianceIncidentAngle:{}\".format(SolarIrradianceIncidentAngle))\n\n # Snell's law, calculating the transmittance raw after refractance\n transmittanceAngle = math.asin(AirRefractiveIndex/PEFilmRefractiveIndex*math.sin(SolarIrradianceIncidentAngle))\n\n # S (perpendicular) wave\n perpendicularlyPolarizedTransmittance = 2.0 * AirRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) / \\\n (AirRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) + PEFilmRefractiveIndex*math.cos(transmittanceAngle))\n perpendicularlyPolarizedReflectance = (AirRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) - PEFilmRefractiveIndex*math.cos(transmittanceAngle)) / \\\n (AirRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) + PEFilmRefractiveIndex*math.cos(transmittanceAngle))\n\n # P (parallel) wave\n parallelPolarizedTransmittance = 2.0 * AirRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) / \\\n (PEFilmRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) + AirRefractiveIndex*math.cos(transmittanceAngle))\n parallelPolarizedReflectance = (PEFilmRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) - AirRefractiveIndex*math.cos(transmittanceAngle)) / \\\n (PEFilmRefractiveIndex*math.cos(SolarIrradianceIncidentAngle) + AirRefractiveIndex*math.cos(transmittanceAngle))\n\n # according to https://www.youtube.com/watch?v=ayxFyRF-SrM at around 17:00, the reflection can be negative when the phase of light changes by 180 degrees\n # Here it was assumed the phase shift does not influence the light intensity (absolute stength), and so the negative sign was changed into the positive\n perpendicularlyPolarizedReflectance = abs(perpendicularlyPolarizedReflectance)\n parallelPolarizedReflectance = abs(parallelPolarizedReflectance)\n\n\n # Assuming that sunlight included diversely oscilating radiation by 360 degrees, the transmittance and reflectance was averaged with those of parpendicular and parallel oscilation\n transmittanceForSolarIrradiance = (perpendicularlyPolarizedTransmittance + parallelPolarizedTransmittance) / 2.0\n ReflectanceForSolarIrradiance = (perpendicularlyPolarizedReflectance + parallelPolarizedReflectance) / 2.0\n\n return transmittanceForSolarIrradiance, ReflectanceForSolarIrradiance\n\n\ndef getTransmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof(l_a, l_b, m, T_1, F_1, T_2, F_2):\n '''\n the equation number in the reference: (A8), page 252\n '''\n # print(\"l_a:{}, l_b:{}, m:{}, T_1:{}, F_1:{}, T_2:{}, F_2:{}\".format(l_a, l_b, m, T_1, F_1, T_2, F_2))\n\n transmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof = (l_a*T_2*(F_1*util.sigma(0, m-2, lambda s: (T_1*T_2)**s,0) + (T_1*T_2)**(m-1)) + \\\n l_b*T_2*(F_1*util.sigma(0, m-1, lambda s: (T_1*T_2)**s,0) + (T_1*T_2)**m)) / (l_a + l_b)\n # print(\"transmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof:{}\".format(transmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof))\n return transmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof\n\ndef getTransmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof(l_a, l_b, m, T_1, F_1, T_2, F_2):\n '''\n the equation number in the reference: (A8), page 252\n the content of this function is same as getTransmittanceThroughMultiSpanCoveringCaseA1ForEastOrNorthFacingRoof, but made this just for clarifying the meaning of variables.\n '''\n\n transmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof = (l_a*T_1*(F_2*util.sigma(0, m-2, lambda x: (T_1*T_2)**x,0) + (T_1*T_2)**(m-1)) + \\\n l_b*T_1*(F_2*util.sigma(0, m-1, lambda x: (T_1*T_2)**x,0) + (T_1*T_2)**m)) / (l_a + l_b)\n # print(\"transmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof:{}\".format(transmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof))\n return transmittanceThroughMultiSpanCoveringCaseA1ForWestOrSouthFacingRoof\n\ndef getReflectedE(E, roofAngle):\n incidentAngle = getIncidentAngleForWestOrSouthRoof(E, roofAngle)\n # the reflected incident angle E' is pi - (pi - alpha) - (pi/2.0 - incidentAngle))\n return abs(roofAngle - incidentAngle - math.pi/2.0)\n\n\ndef getFractionOfTransmittanceOfReflectedSolarIrradiance(alpha, beta, L_1, L_2, m, EPerpendicular):\n\n # The original source G.P.A. Bot 1983, \"Greenhouse Climate: from physical processes to a dynamic model\" does not seem to suppose EPerpendicular becomes more than pi/2, thus,\n # l_a and l_b became negative when EPerpendicular > pi/2 indeed. Thus it was converted to the rest of the angle\n if EPerpendicular > math.pi/2.0:\n EPerpendicular = math.pi/2.0 - EPerpendicular\n\n # fraction (percentage) of light which does not pass through the first span [-], equation (A14)\n l_a = L_2 * m * math.sin(EPerpendicular - beta) - L_1 * (m - 1) * math.sin(alpha + 2.0 * beta - EPerpendicular)\n # print(\"m:{}, L_1:{}, L_2:{}, alpha:{}, beta:{}\".format(m, L_1, L_2, alpha, beta))\n # print(\"EPerpendicular - beta):{}\".format(math.sin(EPerpendicular - beta)))\n # print(\"math.sin(alpha + 2.0*beta - EPerpendicular):{}\".format(math.sin(alpha + 2.0 * beta - EPerpendicular)))\n # fraction (percentage) of light which crosses the first span before continuing on towards the others [-], equation (A15)\n l_b = L_1 * math.sin(alpha + 2.0 * beta - EPerpendicular) - L_2 * math.sin(EPerpendicular - beta)\n # print(\"l_a:{}, l_b:{}\".format(l_a, l_b))\n\n return l_a, l_b\n\ndef getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForEastOrNorthFacingRoof(l_a, l_b, m, T_r11, T_r12, F_r11, F_r12, F_1, l_1, l_2):\n '''\n the equation number in the reference: (A13)\n '''\n # print(\"called from CaseA2_1: l_a:{}, l_b:{}, m:{}, T_r11:{}, T_r12:{}, F_r11:{}, F_r12:{}, F_1:{}, l_1:{}, l_2:{}\".format(l_a, l_b, m, T_r11, T_r12, F_r11, F_r12, F_1, l_1, l_2))\n\n # transmittanceOfReflectedLight = (F_1*l_a*T_r11*(F_r12*util.sigma(2, m-2, lambda s: (T_r11*T_r12)**s,0.0) + (T_r11*T_r12)**(m-1)) + \\\n # F_1*l_b*T_r11*(F_r12*util.sigma(0, m-3, lambda s: util.sigma(0, s, lambda n: (T_r11*T_r12)**n,0),0.0) + \\\n # util.sigma(0, m-2, lambda s: (T_r11 * T_r12)**s, 0.0))) / (l_1 + l_2)\n\t\t#\n # # print (\"transmittanceOfReflectedLight:{}\".format(transmittanceOfReflectedLight))\n\t\t#\n # return transmittanceOfReflectedLight\n\n return (F_1*l_a*T_r11*(F_r12*util.sigma(2, m-2, lambda s: (T_r11*T_r12)**s,0.0) + (T_r11*T_r12)**(m-1)) + \\\n F_1*l_b*T_r11*(F_r12*util.sigma(0, m-3, lambda s: util.sigma(0, s, lambda n: (T_r11*T_r12)**n,0),0.0) + \\\n util.sigma(0, m-2, lambda s: (T_r11 * T_r12)**s, 0.0))) / (l_1 + l_2)\n\ndef getTransmittanceOfReflectedLightThroughMultiSpanCoveringCaseA2_1ForWestOrSouthFacingRoof(l_a, l_b, m, T_r21, T_r22, F_r22, F_r21, F_2, l_1, l_2):\n '''\n the content of this function is same as getTransmittanceThroughMultiSpanCoveringCaseA2_1ForEastOrNorthFacingRoof, but made this just for clarifying the meaning of variables.\n '''\n # print(\"T_r21:{}, T_r22:{}, F_r21:{}, F_r22:{}, F_2:{}, l_1:{}, l_2:{}\".format(T_r21, T_r22, F_r21, F_r22, F_2, l_1, l_2))\n\n return (F_2*l_a*T_r21*(F_r22*util.sigma(2, m-2, lambda s: (T_r21*T_r22)**s,0) + (T_r21*T_r22)**(m-1)) + \\\n F_2*l_b*T_r21*(F_r22*util.sigma(0, m-3, lambda s: util.sigma(0, s, lambda n: (T_r21*T_r22)**n,0),0) + \\\n util.sigma(0, m-2, lambda s: (T_r21 * T_r22)**s, 0))) / (l_1 + l_2)\n\n\ndef getTransmittanceThroughMultiSpanCoveringCaseA2_2ForEastOrNorthFacingRoof(l_a, l_b, m, T_r11, T_r12, F_r11, F_r12, F_1, l_1, l_2):\n '''\n the equation number in the reference: (A18)\n '''\n return (l_a*F_1 + T_r11*F_r12*util.sigma(0, m-1, lambda s: (T_r11*T_r12)**s,0) + \\\n l_b*F_1*T_r11*T_r12*util.sigma(0, m - 2, lambda s: util.sigma(0, s, lambda n: (T_r11 * T_r12)**n, 0), 0))/(l_1+l_2)\n\ndef getTransmittanceThroughMultiSpanCoveringCaseA2_2ForWestOrSouthFacingRoof(l_a, l_b, m, T_r21, T_r22, F_r21, F_r22, F_2, l_1, l_2):\n '''\n the equation number in the reference: (A18)\n the content of this function is same as getTransmittanceThroughMultiSpanCoveringCaseA2_2ForEastOrNorthFacingRoof, but made this just for clarifying the meaning of variables.\n '''\n return (l_a*F_2 + T_r21*F_r22*util.sigma(0, m-1, lambda s: (T_r21*T_r22)**s,0) + \\\n l_b*F_2*T_r21*T_r22*util.sigma(0, m - 2, lambda s: util.sigma(0, s, lambda n: (T_r21 * T_r22)**n, 0), 0))/(l_1+l_2)\n\n\ndef getSolarIrradianceDirectlhyTransmittedToPlants(alpha, beta, L_1, L_2, EPerpendicular):\n '''\n get direct radiation directly transmitted to the soil through roof surfaces\n :return:\n '''\n # the portion of the beam of incident light that travels through the first side (west or south side) of the roof\n # the following formula is from Soriano et al. (2004), but this seems to be wrong\n # l_1 = L_1 * math.cos(alpha - EPerpendicular)\n # the following formula was cited from the original source of this model: G. P. A. Bot, 1983 \"Greenhouse Climate: from physical processes to a dynamic model\", page 90\n # l_1 = L_1 * math.sin(alpha + EPerpendicular)\n # In addition, the difference of greenhouse direction was considered (l_1 is for west facing roof, and l_2 is for east facing roof. The solar radiation comes from the east in the morning)\n # if the sunlight comes from east (right side in the figure)\n if EPerpendicular < math.pi:\n l_1 = L_1 * math.sin(EPerpendicular - alpha)\n # the portion of the beam of incident light that travels through the second side (east or north side) of the roof.\n l_2 = L_2 * math.sin(EPerpendicular + beta)\n # if the sunlight comes from west (right side in the figure)\n else:\n l_1 = L_1 * math.sin(EPerpendicular + alpha)\n # the portion of the beam of incident light that travels through the second side (east or north side) of the roof.\n l_2 = L_2 * math.sin(EPerpendicular - beta)\n # print(\"l_1:{}\".format(l_1))\n # print(\"l_2:{}\".format(l_2))\n\n # get the incidence angle for each facing roof\n incidentAngleForEastOrNorthRoof = getIncidentAngleForEastOrNorthRoof(EPerpendicular, beta)\n incidentAngleForWestOrSouthRoof = getIncidentAngleForWestOrSouthRoof(EPerpendicular, alpha)\n # print(\"incidentAngleForEastOrNorthRoof :{}\".format(incidentAngleForEastOrNorthRoof))\n # print(\"incidentAngleForWestOrSouthRoof :{}\".format(incidentAngleForWestOrSouthRoof))\n\n\n # get the transmittance\n T_2, F_2 = fresnelEquation(incidentAngleForEastOrNorthRoof)\n T_1, F_1 = fresnelEquation(incidentAngleForWestOrSouthRoof)\n # print(\"T_1:{}, F_1:{}, T_2:{}, F_2:{}\".format(T_1, F_1, T_2, F_2))\n\n # the transmittance of solar irradiance directly transmitted to the soil through roof each direction of roof (surfaces east and west) (A10)\n T_12 = (T_1 * l_1 + T_2 * l_2) / (l_1 + l_2)\n # print(\"T_12:{}\".format(T_12))\n\n return l_1, l_2, T_1, F_1, T_2, F_2, T_12\n\n\ndef getIntegratedT_matFromBothRoofs(T_matForPerpendicularIrrEastOrNorthFacingRoof, T_matForPerpendicularIrrWestOrSouthFacingRoof):\n '''\n :return: integratedT_mat\n '''\n integratedT_mat = np.zeros(T_matForPerpendicularIrrEastOrNorthFacingRoof.shape[0])\n\n for i in range (0, integratedT_mat.shape[0]):\n if T_matForPerpendicularIrrEastOrNorthFacingRoof[i] == 0.0 and T_matForPerpendicularIrrWestOrSouthFacingRoof[i] == 0.0: continue\n elif T_matForPerpendicularIrrEastOrNorthFacingRoof[i] != 0.0 and T_matForPerpendicularIrrWestOrSouthFacingRoof[i] == 0.0:\n integratedT_mat[i] = T_matForPerpendicularIrrEastOrNorthFacingRoof[i]\n elif T_matForPerpendicularIrrEastOrNorthFacingRoof[i] == 0.0 and T_matForPerpendicularIrrWestOrSouthFacingRoof[i] != 0.0:\n integratedT_mat[i] = T_matForPerpendicularIrrWestOrSouthFacingRoof[i]\n # if both t_mat are not 0\n else:\n integratedT_mat[i] = (T_matForPerpendicularIrrEastOrNorthFacingRoof[i] + T_matForPerpendicularIrrWestOrSouthFacingRoof[i]) / 2.0\n\n return integratedT_mat\n\n" }, { "alpha_fraction": 0.6849796175956726, "alphanum_fraction": 0.7073588371276855, "avg_line_length": 41.435752868652344, "blob_id": "4d1e9f6e988cea0bf08758567bedbd7e932f9429", "content_id": "669db81a575e9b9f9ef6e73de3393eef18d5b535", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22797, "license_type": "permissive", "max_line_length": 266, "num_lines": 537, "path": "/GreenhouseEnergyBalance.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n#############command to print out all array data\n# np.set_printoptions(threshold=np.inf)\n# print (\"directSolarRadiationToOPVWestDirection:{}\".format(directSolarRadiationToOPVWestDirection))\n# np.set_printoptions(threshold=1000)\n#############\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n##########import package files##########\nimport datetime\nimport sys\nimport os\nimport numpy as np\nimport math\nimport Lettuce\nimport CropElectricityYeildSimulatorConstant as constant\nimport GreenhouseEnergyBalanceConstant as energyBalanceConstant\nimport Util\nfrom dateutil.relativedelta import *\nfrom time import strptime\n#######################################################\n\ndef getGHEnergyConsumptionByCoolingHeating(simulatorClass):\n\t'''\n\tall of the energy data calculated in this function is the average energy during each hour, not the total energy during eah hour.\n\n\treference:\n\t\t1\n\t\tUnknown author, Greenhouse Steady State Energy Balance Model\n\t\thttps://fac.ksu.edu.sa/sites/default/files/lmhdr_lthlth_wlrb.pdf\n\t\tor\n\t\thttp://ecoursesonline.iasri.res.in/mod/page/view.php?id=1635\n\t\taccessed on May 18 2018\n\t\t2\n\t\tIdso, S. B. 1981. A set of equations for full spectrum and 8-µm to 14-µm and 10.5-µm to\n\t\t12.5-µm thermal radiation from cloudless skies. Water Resources Research, 17: 295-\n\t\t304. https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/WR017i002p00295\n\n\t'''\n\n\t# get the necessary data\n\t# unit: W m-2\n\tdirectSolarIrradianceToPlants = simulatorClass.directSolarIrradianceToPlants\n\t# unit: W m-2\n\tdiffuseSolarIrradianceToPlants = simulatorClass.diffuseSolarIrradianceToPlants\n\t# unit: Celsius degree\n\thourlyAirTemperatureOutside = simulatorClass.getImportedHourlyAirTemperature()\n\t# unit: -\n\trelativeHumidityOutside = simulatorClass.hourlyRelativeHumidity\n\n\t# the amount of direct and diffuse shortwave solar radiation in the greenhouse [W m-2]\n\t################# get Q_sr start ################\n\t'''\n\tin the reference papre, the formula is,\n\tQ_sr = tau_c * S_l * I_sr * A_f\n\twhere tau_c is transmissivity of the greenhouse covering materials for solar radiation,\n\tS_l is shading level, and I_sr is the amount of solar radiation energy received per unit are and per unit time on a horizontal surface outside the greenhouse [W m2]. \n\tHowever, since tau_c * S_l * I_sr is already calculated as the sum of directSolarIrradianceToPlants and diffuseSolarIrradianceToPlants, I arranged the formula as below.\n\t'''\n\t# unit: W m-2\n\ttotalSolarIrradianceToPlants = directSolarIrradianceToPlants + diffuseSolarIrradianceToPlants\n\n\tQ_sr = totalSolarIrradianceToPlants\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"Q_sr:{}\".format(Q_sr))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\t################# get Q_sr end ################\n\n\n\n\n\t# latent heat energy flux due to plant transpiration [W m-2]\n\t################# get Q_lh (Q_e) start ################\n\t# '''\n\t# todo delete this comment if not necessary\n\t# # rate of transpiration [Kg_H2O sec-1 m-2]\n\t# # according to Ricardo Aroca et al. (2008). Mycorrhizal and non-mycorrhizal Lactuca sativa plants exhibit contrasting responses to exogenous ABA during drought stress and recovery\n\t# this value was around 6.6 [mg_H2O hour-1 cm-2]. This should be converted as below.\n\t# '''\n\t# ET = 6.6 / 1000.0 / (constant.secondperMinute * constant.minuteperHour) * 10000.0\n\n\tQ_lh = getLatentHeatTransferByTranspiration(simulatorClass, totalSolarIrradianceToPlants)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"Q_lh:{}\".format(Q_lh))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\t################# get Q_lh (Q_e) end ################\n\n\t# sensible heat from conduction and convection through the greenhouse covering material [W m-2]\n\t################# get Q_sh (Q_cd in reference 1) start ################\n\t# # the area of greenhouse covers [m2]\n\t# A_c = constant.greenhouseTotalRoofArea\n\n\t# inside (set point) air temperature [Celsius degree]\n\tT_iC = Lettuce.getGreenhouseTemperatureEachHour(simulatorClass)\n\t# inside air temperature [K]\n\tT_iK = T_iC + 273.0\n\n\t# outside air temperature [C]\n\tT_oC = hourlyAirTemperatureOutside\n\t# outside air temperature [K]\n\tT_oK = hourlyAirTemperatureOutside + 273.0\n\n\tQ_sh = energyBalanceConstant.U * (T_iC - T_oC)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"Q_sh:{}\".format(Q_sh))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\t################# get Q_sh (Q_cd in reference 1) end ################\n\n\t# net thermal radiation through the greenhouse covers to the atmosphere [W m-2], the difference between the thermal radiation emitted from the surface and the thermal radiation gained from the atmosphere\n\t################# get Q_lw (Q_t in reference 1) start ################\n\t# ambient vapor pressure outside [Pa]\n\t# source: https://www.weather.gov/media/epz/wxcalc/vaporPressure.pdf\n\t_, e_a = getSturatedAndActualVaporPressure(T_oC, relativeHumidityOutside)\n\t# print(\"e_a.shape:{}\".format(e_a.shape))\n\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"e_a:{}\".format(e_a))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# apparent emissivity of the sky (Idso, 1981)\n\t# source: reference 2\n\tepsilon_sky = 0.7 - 5.95 * 10.0**(-7) * e_a * math.e**(1500.0 / T_oK)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"epsilon_sky.shape:{}, epsilon_sky:{}\".format(epsilon_sky.shape, epsilon_sky))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# transmissivity of the shading shading curtain\n\ttau_os = constant.shadingTransmittanceRatio\n\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"T_o:{}\".format(T_o))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\t# the sky temperature (the Swinbank model (1963) ) [K]\n\tT_sky = 0.0552 * T_oK**1.5\n\t# print(\"T_sky.shape:{}\".format(T_sky.shape))\n\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"T_sky:{}\".format(T_sky))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\tQ_lw = energyBalanceConstant.delta * energyBalanceConstant.tau_tc * tau_os * \\\n\t\t\t\t(energyBalanceConstant.epsilon_i * T_iK**4.0 - epsilon_sky * T_sky**4.0)\n\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"Q_lw.shape:{}, Q_lw:{}\".format(Q_lw.shape, Q_lw))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\t################# get Q_lw (Q_t in reference 1) end ################\n\n\t# energy removed by ventilation air or added by heating[W m-2]\n\t################# get Q_v start ################\n\t# if positive, cooling. if negative, heating\n\tQ_v = getQ_v(simulatorClass, Q_sr, Q_lh, Q_sh, Q_lw)\n\t# Q_v = Q_sr - Q_lh - Q_sh - Q_lw\n\t# print(\"Q_v.shape:{}\".format(Q_v.shape))\n\t################# get Q_v end ################\n\n\n\t# set the data to the object. all units are W m-2\n\tsimulatorClass.Q_v[\"coolingOrHeatingEnergy W m-2\"] = Q_v\n\tsimulatorClass.Q_sr[\"solarIrradianceToPlants W m-2\"] = Q_sr\n\tsimulatorClass.Q_lh[\"latentHeatByTranspiration W m-2\"] = Q_lh\n\tsimulatorClass.Q_sh[\"sensibleHeatFromConductionAndConvection W m-2\"] = Q_sh\n\tsimulatorClass.Q_lw[\"longWaveRadiation W m-2\"] = Q_lw\n\t# print(\"Q_sr.shape:{}\".format(Q_sr.shape))\n\t# print(\"Q_lh.shape:{}\".format(Q_lh.shape))\n\t# print(\"Q_sh.shape:{}\".format(Q_sh.shape))\n\t# print(\"Q_lw.shape:{}\".format(Q_lw.shape))\n\n\ndef getLatentHeatTransferByTranspiration(simulatorClass, totalSolarIrradianceToPlants):\n\t'''\n\treference:\n\t1\n\tPollet, S. and Bleyaert, P. 2000.\n\tAPPLICATION OF THE PENMAN-MONTEITH MODEL TO CALCULATE THE EVAPOTRANSPIRATION OF HEAD LETTUCE (Lactuca sativa L. var.capitata) IN GLASSHOUSE CONDITIONS\n\thttps://www.actahort.org/books/519/519_15.htm\n\t2\n\tAndriolo, J.L., da Luz, G.L., Witter, M.H., Godoi, R.S., Barros, G.T., Bortolotto, O.C.\n\t(2005). Growth and yield of lettuce plant under salinity. Horticulture Brasileira,\n\t23(4), 931-934.\n\n\t'''\n\n\t# inside (set point) air temperature [Celsius degree]\n\tT_iC = Lettuce.getGreenhouseTemperatureEachHour(simulatorClass)\n\n\thourlyDayOrNightFlag = simulatorClass.hourlyDayOrNightFlag\n\trelativeHumidityInGH = np.array([constant.setPointHumidityDayTime if i == constant.daytime else constant.setPointHumidityNightTime for i in hourlyDayOrNightFlag])\n\n\t# leaf temperature [Celsius degree]. It was assumed that the difference between the leaf temperature and the air temperature was always 2. This is just an assumption of unofficial experiment at Kacira Lab at CEAC in University of Arizona\n\tT_l = T_iC + 2.0\n\t# dimention of leaf [m]. This is just an assumption of unofficial experiment at Kacira Lab at CEAC in University of Arizona\n\td = 0.14\n\n\t# arerodynamic resistance of the leaf [s m-1]\n\t# source: reference No 1, Pollet et al. 2000\n\tr_a = 840.0 * (d/abs(T_l - T_iC))**(1.0/4.0)\n\t# print(\"r_a:{}\".format(r_a))\n\n\t# the leaf area index [-]\n\t# source: reference No 2, Andriolo et al. 2005\n\t# L = 4.3\n\t# source: Van Henten (1994)\n\tL = simulatorClass.LeafAreaIndex_J_VanHenten1994\n\t# print(\"leaf area index (L):{}\".format(L))\n\n\t# arerodynamic resistance of the crop [s m-1]\n\t# source: reference No 1, Pollet et al. 2000\n\tr_b = r_a /(2.0 * L)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"r_b:{}\".format(r_b))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# short wave radiation [W m-2]\n\t# Need to figure out why dividing the solar irradiance inside by 0.844. see the differnce. -> This is probably because the suthor considered some light prevention by internal equipment. Considering the definition, it should be same as the soalr irradiance to plants\n\t# I_s = 0.844 * totalSolarIrradianceToPlants\n\tI_s = totalSolarIrradianceToPlants\n\n\t############### calc the vapor pressure deficit start ###############\n\t# saturated vapore pressure [Pa]\n\t# source: http://cronklab.wikidot.com/calculation-of-vapour-pressure-deficit\n\t# source: https://www.weather.gov/media/epz/wxcalc/vaporPressure.pdf\n\te_s, e_a = getSturatedAndActualVaporPressure(T_iC, relativeHumidityInGH)\n\t# vapor pressure deficit [Pa]\n\tD_Pa = e_s - e_a\n\t# unit conversion: Pa (vapor pressure deficit) -> g m-2 (humidity deficit)\n\t# source: http://mackenmov.sunnyday.jp/macken/plantf/term/housa/hosa.html\n\tVH = 217.0 * e_s / (T_iC + 273.15)\n\tD = (1.0 - relativeHumidityInGH) * VH\n\n\t############### calc the vapor pressure deficit end ###############\n\n\t# the stomatal resistance [sec m-1]\n\t# source: reference No 1, Pollet et al. 2000\n\tr_s = 164.0*(31.029+I_s)/(6.740+I_s) * (1 + 0.011*(D - 3.0)**2) * (1 + 0.016*(T_iC - 16.4)**2)\n\n\t# crop resistance [sec m-1]\n\t# source: reference No 1, Pollet et al. 2000\n\tr_c = r_s / L\n\n\t################## calc psychrometric constant start ##################\n\t# print(\"energyBalanceConstant.c_p / (energyBalanceConstant.epsilon * energyBalanceConstant.lambda_:{}\".format(energyBalanceConstant.c_p / (energyBalanceConstant.epsilon * energyBalanceConstant.lambda_)))\n\t# print(\"energyBalanceConstant.P:{}\".format(energyBalanceConstant.P))\n\t# psychrometric constant\n\t# source: http://www.fao.org/docrep/X0490E/x0490e07.htm\n\tgamma = energyBalanceConstant.c_p * energyBalanceConstant.P / (energyBalanceConstant.epsilon * energyBalanceConstant.lambda_)\n\t# gamma = 0.665 * 10**(-3) * energyBalanceConstant.P\n\t# print(\"gamma:{}\".format(gamma))\n\n\t# gamma_star = gamma * (1 + r_c / r_b)\n\tgamma_star = gamma * (1 + r_c / r_b)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"gamma_star:{}\".format(gamma_star))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t################## calc psychrometric constant end ##################\n\n\t# slope of saturation vapore pressure - temperature curve [kPa C-1]\n\t# source: http://www.fao.org/docrep/X0490E/x0490e0k.htm\n\t# source: http://edis.ifas.ufl.edu/pdffiles/ae/ae45900.pdf\n\ts = 4098.0 * 610.8 * math.e**((17.27 * T_iC)/(T_iC + 273.3)) / ((T_iC + 273.3)**2.0)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"s:{}\".format(s))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# net all wave radiation above the crop surface == above the canopy [W m-2]\n\t# R_n = totalSolarIrradianceToPlants\n\t# source: (Stanghellini, 1987)\n\tR_n = 0.86 * (1.0 - np.exp(-0.7 * L)) * I_s\n\n\t# The Penman-Monteith equation\n\tQ_lh = s * (R_n - energyBalanceConstant.F) / (s + gamma_star) + (energyBalanceConstant.rho * energyBalanceConstant.C_p * D / r_b) / (s + gamma_star)\n\t# when L (leaf area index is 0.0 Q_lh (r_b and r_c) beomes Nan. To avoid it, Nan is converted into 0.0)\n\tQ_lh = np.nan_to_num(Q_lh)\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"Q_lh:{}\".format(Q_lh))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# simulatorClass.r_a = r_a\n\t# print(\"r_a.shape:{}\".format(r_a.shape))\n\t# simulatorClass.L = L\n\t# print(L.shape)\n\t# simulatorClass.r_b = r_b\n\t# print(r_b.shape)\n\t# simulatorClass.e_a = e_a\n\t# print(e_a.shape)\n\t# simulatorClass.e_s = e_s\n\t# print(e_s.shape)\n\t# simulatorClass.r_s = r_s\n\t# print(r_s.shape)\n\t# simulatorClass.r_c = r_c\n\t# print(r_c.shape)\n\t# # simulatorClass.gamma = gamma\n\t# # print(gamma.shape)\n\t# simulatorClass.gamma_star = gamma_star\n\t# print(gamma_star.shape)\n\t# simulatorClass.s = s\n\t# print(s.shape)\n\t# simulatorClass.R_n = R_n\n\t# print(R_n.shape)\n\n\treturn Q_lh\n\n\ndef getSturatedAndActualVaporPressure(actualT, relativeHumidity):\n\te_s = 610.7 * 10.0**((7.5 * actualT)/(237.3+actualT))\n\te_a = e_s * relativeHumidity\n\treturn e_s, e_a\n\n\ndef getQ_v(simulatorClass, Q_sr, Q_lh, Q_sh, Q_lw):\n\t'''\n\tconsider the greenhouse size (floor area, roofa are, wall area), calc Q_v\n\t'''\n\n\t# unit: W\n\tQ_srW = Q_sr * constant.greenhouseFloorArea\n\t# unit: W\n\tQ_lhW = Q_lh * constant.greenhouseCultivationFloorArea\n\t# unit: W\n\tQ_shW = Q_sh * (constant.greenhouseTotalRoofArea + constant.greenhouseSideWallArea)\n\t# unit: W\n\t# it was assumed the greenhouse ceiling area (not the roof area because it would be strange that we get more long wave radiation as the angle of the roof increases) was same as the floor area.\n\tQ_lwW = Q_lw * constant.greenhouseFloorArea\n\n\t# unit: W\n\t# In the default definition, when Q_lhW, Q_shW, and Q_lwW are positive, heat energy gets out of the greenhouse. Thus, the unit was converted into negative value\n\tQ_vW = Q_srW - (Q_lhW + Q_shW + Q_lwW)\n\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"simulatorClass.shootFreshMassList:{}\".format(simulatorClass.shootFreshMassList))\n\t# print(\"simulatorClass.summerPeriodFlagArray:{}\".format(simulatorClass.summerPeriodFlagArray))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# if it is the summer period or the preparation day for the next cultivation (the fresh mass is zero), let Q_vw zero.\n\tQ_vW = np.array([0.0 if simulatorClass.shootFreshMassList[i] == 0.0 or simulatorClass.summerPeriodFlagArray[i] == 1.0 else Q_vW[i] for i in range(Q_vW.shape[0])])\n\n\tsimulatorClass.Q_vW[\"coolingOrHeatingEnergy W\"] = Q_vW\n\tsimulatorClass.Q_srW[\"solarIrradianceToPlants W\"] = Q_srW\n\tsimulatorClass.Q_lhW[\"sensibleHeatFromConductionAndConvection W\"] = Q_lhW\n\tsimulatorClass.Q_shW[\"latentHeatByTranspiration W\"] = Q_shW\n\tsimulatorClass.Q_lwW[\"longWaveRadiation W\"] = Q_lwW\n\n\t# unit: W m-2\n\treturn Q_vW / constant.greenhouseFloorArea\n\ndef getGHHeatingEnergyCostForPlants(requiredHeatingEnergyForPlants, simulatorClass):\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"requiredHeatingEnergyForPlants:{}\".format(requiredHeatingEnergyForPlants))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# unit: W\n\trequiredHeatingEnergyConsumptionForPlants = {\"W\": requiredHeatingEnergyForPlants / constant.heatingEquipmentEfficiency}\n\t# unit conversion: W (= J sec-1) -> MJ\n\trequiredHeatingEnergyConsumptionForPlants[\"MJ\"] = requiredHeatingEnergyConsumptionForPlants[\"W\"] * constant.secondperMinute * constant.minuteperHour / 1000000.0\n\t# unit conversion: MJ -> ft3\n\trequiredHeatingEnergyConsumptionForPlants[\"ft3\"] = requiredHeatingEnergyConsumptionForPlants[\"MJ\"] / constant.naturalGasSpecificEnergy[\"MJ ft-3\"]\n\t# print(\"requiredHeatingEnergyConsumptionForPlants:{}\".format(requiredHeatingEnergyConsumptionForPlants))\n\n\t# get the price of natural gas\n\tfileName = constant.ArizonaPriceOfNaturalGasDeliveredToResidentialConsumers\n\t# import the file removing the header\n\tfileData = Util.readData(fileName, relativePath=\"\", skip_header=5, d=',')\n\t# print (\"fileData.shape:{}\".format(fileData.shape))\n\n\t# reverse the file data becasue the data starts from the newest date. requiredHeatingEnergyForPlants starts from the old time.\n\tfileData = fileData[::-1]\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print (\"fileData:{}\".format(fileData))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\t\n\n\t# unit ft3 month-1\n\tmonthlyRequiredGHHeatingEnergyForPlants = getMonthlyRequiredGHHeatingEnergyForPlants(requiredHeatingEnergyConsumptionForPlants[\"ft3\"], simulatorClass)\n\t# set the data to the object\n\tsimulatorClass.monthlyRequiredGHHeatingEnergyForPlants = monthlyRequiredGHHeatingEnergyForPlants\n\n\tmonthlyHeatingCostForPlants = np.zeros(monthlyRequiredGHHeatingEnergyForPlants.shape[0])\n\n\tindex = 0\n\tfor fileDataLine in fileData:\n\n\t\t# split first column into month and year\n\t\tmonth = strptime(fileDataLine[0].split()[0],'%b').tm_mon\n\t\t# print(\"month:{}\".format(month))\n\t\tyear = fileDataLine[0].split()[1]\n\n\t\t# unit: USD thousand ft-3\n\t\tmonthlyNaturalGasPrice = float(fileDataLine[1])\n\t\t# print(\"monthlyNaturalGasPrice:{}\".format(monthlyNaturalGasPrice))\n\n\t\t# exclude the data out of the set start month and end month\n\t\tif datetime.date(int(year), int(month), 1) + relativedelta(months=1) <= Util.getStartDateDateType() or \\\n\t\t\t\t\t\tdatetime.date(int(year), int(month), 1) > Util.getEndDateDateType():\n\t\t# if datetime.date(int(year[i]), int(month[i]), 1) + relativedelta(months=1) <= Util.getStartDateDateType() or \\\n\t\t# \t\t\t\tdatetime.date(int(year[i]), int(month[i]), 1) > Util.getEndDateDateType():\n\t\t\t\tcontinue\n\n\t\tmonthlyHeatingCostForPlants[index] = (monthlyNaturalGasPrice / 1000.0) * monthlyRequiredGHHeatingEnergyForPlants[index]\n\t\t# print \"monthlyData:{}\".format(monthlyData)\n\t\tindex += 1\n\n\t# print(\"monthlyHeatingCostForPlants:{}\".format(monthlyHeatingCostForPlants))\n\n\ttotalHeatingCostForPlants = sum(monthlyHeatingCostForPlants)\n\n\treturn totalHeatingCostForPlants\n\n\ndef getMonthlyRequiredGHHeatingEnergyForPlants(requiredHeatingEnergyConsumptionForPlants, simulatorClass):\n\n\tmonth = simulatorClass.getMonth()\n\n\tnumOfMonths = Util.getSimulationMonthsInt()\n\tmonthlyRequiredGHHeatingEnergyForPlants = np.zeros(numOfMonths)\n\tmonthIndex = 0\n\t# insert the initial value\n\tmonthlyRequiredGHHeatingEnergyForPlants[0] = requiredHeatingEnergyConsumptionForPlants[0]\n\tfor i in range(1, month.shape[0]):\n\n\t\tmonthlyRequiredGHHeatingEnergyForPlants[monthIndex] += requiredHeatingEnergyConsumptionForPlants[i]\n\t\tif month[i - 1] != month[i]:\n\t\t\t# move onto the next month\n\t\t\tmonthIndex += 1\n\n\treturn monthlyRequiredGHHeatingEnergyForPlants\n\ndef getGHCoolingEnergyCostForPlants(requiredCoolingEnergyForPlants, simulatorClass):\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"requiredCoolingEnergyForPlants:{}\".format(requiredCoolingEnergyForPlants))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# unit: W\n\trequiredCoolingEnergyConsumptionForPlants = {\"W\": requiredCoolingEnergyForPlants / constant.PadAndFanCOP}\n\n\t# unit conversion W -> kWh\n\trequiredCoolingEnergyConsumptionForPlants[\"kWh\"] = requiredCoolingEnergyConsumptionForPlants[\"W\"] / 1000.0\n\t# ############command to print out all array data\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"requiredCoolingEnergyConsumptionForPlants[\\\"kWh\\\"]:{}\".format(requiredCoolingEnergyConsumptionForPlants[\"kWh\"]))\n\t# np.set_printoptions(threshold=1000)\n\t# ############\n\n\t# get the imported electricity retail price\n\t# unit: cent kWh-1\n\tmonthlyElectricityRetailPrice = simulatorClass.monthlyElectricityRetailPrice\n\t# print(\"monthlyElectricityRetailPrice:{}\".format(monthlyElectricityRetailPrice))\n\n\t# unit kWh month-1\n\tmonthlyRequiredGHCoolingEnergyForPlants = getMonthlyRequiredGHCoolingEnergyForPlants(requiredCoolingEnergyConsumptionForPlants[\"kWh\"], simulatorClass)\n\t# set the data to the object\n\tsimulatorClass.monthlyRequiredGHCoolingEnergyForPlants = monthlyRequiredGHCoolingEnergyForPlants\n\n\t# unit: usd month-1\n\tmonthlyCoolingCostForPlants = np.zeros(monthlyRequiredGHCoolingEnergyForPlants.shape[0])\n\n\tindex = 0\n\tfor monthlyData in monthlyElectricityRetailPrice:\n\n\t\tyear = monthlyData[1]\n\t\tmonth = monthlyData[0]\n\t\t# exclude the data out of the set start month and end month\n\t\t# print(\"monthlyData:{}\".format(monthlyData))\n\t\tif datetime.date(int(year), int(month), 1) + relativedelta(months=1) <= Util.getStartDateDateType() or \\\n\t\t\t\t\t\tdatetime.date(int(year), int(month), 1) > Util.getEndDateDateType():\n\t\t\t\tcontinue\n\n\t\t# the electricity retail cost for cooling. unit: USD month-1\n\t\tmonthlyCoolingCostForPlants[index] = (monthlyData[2] / 100.0 ) * monthlyRequiredGHCoolingEnergyForPlants[index]\n\t\tindex += 1\n\n\t# print(\"monthlyCoolingCostForPlants:{}\".format(monthlyCoolingCostForPlants))\n\n\ttotalCoolingCostForPlants = sum(monthlyCoolingCostForPlants)\n\n\treturn totalCoolingCostForPlants\n\n# \telectricityCunsumptionByPad = getElectricityCunsumptionByPad(simulatorClass)\n# \telectricityCunsumptionByFan = getElectricityCunsumptionByFan(simulatorClass)\n# def getElectricityCunsumptionByPad(simulatorClass):\n# def getElectricityCunsumptionByFan(simulatorClass):\n\n\ndef getMonthlyRequiredGHCoolingEnergyForPlants(requiredCoolingEnergyConsumptionForPlants, simulatorClass):\n\n\tmonth = simulatorClass.getMonth()\n\n\tnumOfMonths = Util.getSimulationMonthsInt()\n\tmonthlyRequiredGHCoolingEnergyForPlants = np.zeros(numOfMonths)\n\tmonthIndex = 0\n\t# insert the initial value\n\tmonthlyRequiredGHCoolingEnergyForPlants[0] = requiredCoolingEnergyConsumptionForPlants[0]\n\tfor i in range(1, month.shape[0]):\n\n\t\tmonthlyRequiredGHCoolingEnergyForPlants[monthIndex] += requiredCoolingEnergyConsumptionForPlants[i]\n\t\tif month[i - 1] != month[i]:\n\t\t\t# move onto the next month\n\t\t\tmonthIndex += 1\n\n\treturn monthlyRequiredGHCoolingEnergyForPlants\n\n" }, { "alpha_fraction": 0.6403176188468933, "alphanum_fraction": 0.6524315476417542, "avg_line_length": 55.4349250793457, "blob_id": "739ad56c1fd3e7274ef33229f21d581088b93ad5", "content_id": "b70d02d1aa4b7eaa6c4a541944783930568de56f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34262, "license_type": "permissive", "max_line_length": 170, "num_lines": 607, "path": "/SimulatorMain.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 19 Dec 2017\n# last edit date: 19 Dec 2017\n######################################################\n\n##########import package files##########\n# from scipy import stats\nimport datetime\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulator1 as Simulator1\n# import TwoDimLeastSquareAnalysis as TwoDimLS\nimport Util\nimport CropElectricityYeildSimulatorConstant as constant\n# import importlib\n\ncase = \"OneCaseSimulation\"\n# case = \"OptimizeOnlyOPVCoverageRatio\"\n# case = \"OptimizationByMINLPSolver\"\n\nif case == \"OneCaseSimulation\":\n print(\"run OneCaseSimulation\")\n\n # get the 2-D data for least square method\n simulatorClass = Simulator1.simulateCropElectricityYieldProfit1()\n # print \"profitVSOPVCoverageData:{}\".format(profitVSOPVCoverageData)\n\n print(\"OneCaseSimulation finished\")\n\n # ####################################################################################################\n # Stop execution here...\n sys.exit()\n # Move the above line to different parts of the assignment as you implement more of the functionality.\n # ####################################################################################################\n\n\n# # Least Square method\n# if case == \"LeastSquareMethod\":\n# print(\"run LeastSquareMethod\")\n#\n# # get the 2-D data for least square method\n# simulatorClass = Simulator1.simulateCropElectricityYieldProfit1()\n# # print \"profitVSOPVCoverageData:{}\".format(profitVSOPVCoverageData)\n#\n# # create the instance\n# twoDimLeastSquare = TwoDimLS.TwoDimLeastSquareAnalysis(profitVSOPVCoverageData)\n# # print \"twoDimLeastSquare.getXaxis():{}\".format(twoDimLeastSquare.getXAxis())\n#\n# x = twoDimLeastSquare.getXAxis()\n# y = twoDimLeastSquare.getYAxis()\n#\n# ########################### 10-fold CV (Cross Validation)\n# NumOfFold = 10\n# maxorder = 15\n# k10BestPolyOrder, min_log_mean_10cv_loss = twoDimLeastSquare.runCrossValidation(NumOfFold, maxorder, x, y,\n# randomize_data=True,\n# cv_loss_title='10-fold CV Loss',\n# filepath='./exportData/10fold-CV.png')\n#\n# # curve fitting (least square method) with given order w\n# w = twoDimLeastSquare.getApproximatedFittingCurve(k10BestPolyOrder)\n#\n# # This polyfit is just for generating non-optimal order figure. Commend out this except debugging or experiment\n# w = np.polyfit(x, y, 15)\n# w = w[::-1]\n#\n# # plot the best order curve with the data points\n# Util.plotDataAndModel(x, y, w, filepath='./exportData/bestPolynomialKFold.png')\n# print ('\\n======================')\n# print ('10-fold the best order = {0}. loss = {1}, func coefficients w = {2}'.format(k10BestPolyOrder, min_log_mean_10cv_loss, w))\n#\n# ########################### LOOCV (Leave One Out Cross Validation)\n# NumOfFold = twoDimLeastSquare.getXAxis().shape[0]\n# loocv_best_poly, min_log_mean_loocv_loss = twoDimLeastSquare.runCrossValidation(NumOfFold, maxorder, x, y,\n# randomize_data=True,\n# cv_loss_title='LOOCV Loss',\n# filepath='./exportData/LOOCV.png')\n#\n# # curve fitting (least square method) with given order w\n# wLOOCV = twoDimLeastSquare.getApproximatedFittingCurve(k10BestPolyOrder)\n# # This polyfit is just for generating non-optimal order figure. Commend out this except debugging or experiment\n# # wLOOCV = np.polyfit(x, y, 8)\n# # wLOOCV = wLOOCV[::-1]\n#\n# # plot the best order curve with the data points\n# Util.plotDataAndModel(x, y, wLOOCV, filepath='./exportData/bestPolynomialLOOCV.png')\n# print ('\\n======================')\n# print ('\\n(LOOCV) the best order = {0}. loss = {1}, func coefficients w = {2}'.format(loocv_best_poly, min_log_mean_loocv_loss, w))\n\nelif case == \"OptimizeOnlyOPVCoverageRatio\":\n # print (\"run Simulator1.simulateCropElectricityYieldProfit1\")\n # simulatorClass = Simulator1.simulateCropElectricityYieldProfit1()\n\n print (\"run Simulator1.optimizeOPVCoverageRatio\")\n\n ####################################################################################################\n ################ parameter preparation for optimization of OPV coverage ratio start ################\n ####################################################################################################\n # x-axis\n OPVCoverageDelta = 0.01\n # OPVCoverageDelta = 0.001\n # the array for x-axis (OPV area [m^2])\n OPVCoverages = np.array([i * 0.01 for i in range (0, int(1.0/OPVCoverageDelta)+1)])\n # print(\"OPVCoverages:{}\".format(OPVCoverages))\n\n # total DLI to plants\n totalDLIstoPlants = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # electricity yield for a given period: [kwh] for a given period\n totalkWhopvouts = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalkWhopvoutsPerRoofArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalkWhopvoutsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # monthly electricity sales per area with each OPV film coverage [USD/month]\n # monthlyElectricitySalesListEastRoof = np.zeros(OPVCoverages.shape[0], Util.getSimulationMonthsInt()), dtype=float)\n # monthlyElectricitySalesListWestRoof = np.zeros(OPVCoverages.shape[0], Util.getSimulationMonthsInt()), dtype=float)\n\n # electricity sales with each OPV film coverage [USD]\n totalElectricitySales = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricitySalesPerOPVArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricitySalesPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n # electricitySalesListperAreaEastRoof = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\n # electricitySalesListperAreaWestRoof = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\n\n # electricity cost with each OPV film coverage [USD]\n totalElectricityCosts = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricityCostsPerOPVArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricityCostsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # electricity profit with each OPV film coverage [USD]\n totalElectricityProfits = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricityProfitsPerCultivationFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalElectricityProfitsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # plant yield for a given period. unit:\n # totalGrowthFreshWeightsPerHead = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalGrowthFreshWeightsPerCultivationFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalGrowthFreshWeightsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n # unit harvested fresh mass weight for a whole given period with each OPV film coverage. unit: kg m-2\n totalHarvestedShootFreshMassPerCultivationFloorAreaKgPerDay = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # plant sales per square meter with each OPV film coverage: [USD/m^2] totalPlantSaleses =\n totalPlantSalesesPerCultivationFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n totalPlantSalesesPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # plant cost per square meter with each OPV film coverage: [USD/m^2]\n totalPlantCostsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype = float)\n\n # plant profit per square meter with each OPV film coverage: [USD/m^2]\n totalPlantProfitsPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n # plant profit with each OPV film coverage: [USD/m^2]\n totalPlantProfits = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n # economicProfit summing the electricity and plant profit [USD]\n totalEconomicProfits = np.zeros(OPVCoverages.shape[0], dtype=float)\n # economicProfit summing the electricity and plant profit per area [USD m-2]\n economicProfitPerGHFloorArea = np.zeros(OPVCoverages.shape[0], dtype=float)\n\n ##################################################################################################\n ################ parameter preparation for optimization of OPV coverage ratio end ################\n ##################################################################################################\n\n for i in range(0, OPVCoverages.shape[0]):\n #set OPV coverage ratio [-]\n constant.OPVAreaCoverageRatio = OPVCoverages[i]\n # constantFilePath = os.path.dirname(__file__).replace('/', os.sep) + '\\\\' + 'CropElectricityYeildSimulatorConstant.py'\n # os.execv(constantFilePath, [os.path.abspath(constantFilePath)])\n # reload(constant)\n\n # change the other relevant parameters\n constant.OPVArea = OPVCoverages[i] * constant.greenhouseTotalRoofArea\n constant.OPVAreaFacingEastOrNorthfacingRoof = OPVCoverages[i] * (constant.greenhouseRoofTotalAreaEastOrNorth / constant.greenhouseTotalRoofArea)\n constant.OPVAreaFacingWestOrSouthfacingRoof = OPVCoverages[i] * (constant.greenhouseRoofTotalAreaWestOrSouth / constant.greenhouseTotalRoofArea)\n print(\"i:{}, constant.OPVArea:{}\".format(i, constant.OPVArea))\n\n # run the simulation\n simulatorClass = Simulator1.simulateCropElectricityYieldProfit1()\n\n # total DLI to plant during the whole simulation days with each OPV coverage ratio\n totalDLIstoPlants[i] = sum(simulatorClass.totalDLItoPlants)\n\n # store the data from the simulator\n # unit: kwh\n totalkWhopvouts[i] = sum(simulatorClass.totalkWhopvoutPerday)\n # unit: kwh/m^2\n # print(\"totalkWhopvoutsPerRoofArea = sum(simulatorClass.totalkWhopvoutPerAreaPerday):{}\".format(sum(simulatorClass.totalkWhopvoutPerAreaPerday)))\n totalkWhopvoutsPerRoofArea[i] = sum(simulatorClass.totalkWhopvoutPerAreaPerday)\n totalkWhopvoutsPerGHFloorArea[i] = totalkWhopvouts[i] / constant.greenhouseFloorArea\n\n # print(\"simulatorClass.totalElectricitySales:{}\".format(simulatorClass.totalElectricitySales))\n # unit:: USD\n totalElectricitySales[i] = simulatorClass.totalElectricitySales\n # print(\"simulatorClass.totalElectricitySalesPerAreaPerMonth:{}\".format(simulatorClass.totalElectricitySalesPerAreaPerMonth))\n # unit: USD/m^2\n totalElectricitySalesPerOPVArea[i] = sum(simulatorClass.totalElectricitySalesPerAreaPerMonth)\n totalElectricitySalesPerGHFloorArea[i] = totalElectricitySales[i] / constant.greenhouseFloorArea\n # unit: USD\n # print(\"simulatorClass.totalOPVCostUSDForDepreciation:{}\".format(simulatorClass.totalOPVCostUSDForDepreciation))\n totalElectricityCosts[i] = simulatorClass.totalOPVCostUSDForDepreciation\n # unit: USD/m^2\n # print(\"simulatorClass.getOPVCostUSDForDepreciationPerOPVArea:{}\".format(simulatorClass.getOPVCostUSDForDepreciationPerOPVArea()))\n totalElectricityCostsPerOPVArea[i] = simulatorClass.getOPVCostUSDForDepreciationPerOPVArea()\n totalElectricityCostsPerGHFloorArea[i] = totalElectricityCosts[i] / constant.greenhouseFloorArea\n\n # electricity profits\n totalElectricityProfits[i] = totalElectricitySales[i] - totalElectricityCosts[i]\n # electricity profits per greenhouse floor. unit: USD/m^2\n totalElectricityProfitsPerGHFloorArea[i] = totalPlantSalesesPerGHFloorArea[i] - totalElectricityCostsPerGHFloorArea[i]\n\n # plant yield for a given period. unit:kg\n # totalGrowthFreshWeightsPerHead[i] =\n totalGrowthFreshWeightsPerCultivationFloorArea[i] = sum(simulatorClass.shootFreshMassPerAreaKgPerDay)\n totalGrowthFreshWeightsPerGHFloorArea[i] = totalGrowthFreshWeightsPerCultivationFloorArea[i] * constant.greenhouseCultivationFloorArea / constant.greenhouseFloorArea\n\n # unit harvested fresh mass weight for a whole given period with each OPV film coverage. unit: kg m-2\n # totalHarvestedShootFreshMassPerAreaKgPerHead[i] =\n totalHarvestedShootFreshMassPerCultivationFloorAreaKgPerDay[i] = sum(simulatorClass.harvestedShootFreshMassPerAreaKgPerDay)\n\n\n # plant sales per square meter with each OPV film coverage: [USD/m^2]\n # print(\"simulatorClass.totalPlantSalesperSquareMeter:{}\".format(simulatorClass.totalPlantSalesperSquareMeter))\n totalPlantSalesesPerCultivationFloorArea[i] = simulatorClass.totalPlantSalesperSquareMeter\n # print(\"simulatorClass.totalPlantSalesPerGHFloorArea:{}\".format(simulatorClass.totalPlantSalesPerGHFloorArea))\n totalPlantSalesesPerGHFloorArea[i] = simulatorClass.totalPlantSalesPerGHFloorArea\n\n # plant cost per square meter with each OPV film coverage: [USD/m^2]\n # print(\"simulatorClass.totalPlantProductionCostPerGHFloorArea:{}\".format(simulatorClass.totalPlantProductionCostPerGHFloorArea))\n totalPlantCostsPerGHFloorArea[i] = simulatorClass.totalPlantProductionCostPerGHFloorArea\n\n # plant profit per square meter with each OPV film coverage: [USD/m^2]\n totalPlantProfitsPerGHFloorArea[i] = totalPlantSalesesPerGHFloorArea[i] - totalPlantCostsPerGHFloorArea[i]\n totalPlantProfits[i] = totalPlantProfitsPerGHFloorArea[i] * constant.greenhouseFloorArea\n\n # plant profit with each OPV film coverage: [USD/m^2]\n totalEconomicProfits[i] = totalElectricityProfits[i] + totalPlantProfitsPerGHFloorArea[i] * constant.greenhouseFloorArea\n\n economicProfitPerGHFloorArea[i] = simulatorClass.economicProfitPerGHFloorArea\n\n ######################################################\n ##### display the optimization results start #########\n ######################################################\n\n # print \"plantCostperSquareMeter:{}\".format(plantCostperSquareMeter)\n # print \"unitDailyHarvestedFreshWeightList:{}\".format(unitDailyHarvestedFreshWeightList)\n # print \"plantSalesperSquareMeterList:{}\".format(plantSalesperSquareMeterList)\n\n ################# plot the electricity yield with different OPV coverage for given period #################\n title = \"total electricity sales per GH floor area vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"electricity sales per GH floor area[USD/m^2]\"\n Util.plotData(OPVCoverages, totalElectricitySalesPerGHFloorArea, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ######################################################################################## #############\n\n ################# plot the electricity yield with different OPV coverage for given period #################\n title = \"harvested plant weights vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"harvested plant weight [kg]\"\n Util.plotData(OPVCoverages, totalHarvestedShootFreshMassPerCultivationFloorAreaKgPerDay, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ######################################################################################## #############\n\n ################# plot the electricity yield with different OPV coverage for given period #################\n title = \"DLI to plants vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"DLI to plants [mol/m^2/day]\"\n Util.plotData(OPVCoverages, totalDLIstoPlants, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ######################################################################################## #############\n\n ################## plot various sales and cost\n plotDataSet = np.array([totalPlantSalesesPerGHFloorArea, totalPlantCostsPerGHFloorArea, totalElectricitySalesPerGHFloorArea, totalElectricityCostsPerGHFloorArea])\n labelList = np.array([\"totalPlantSalesesPerGHFloorArea\", \"totalPlantCostsPerGHFloorArea\", \"totalElectricitySalesPerGHFloorArea\", \"totalElectricityCostsPerGHFloorArea\"])\n title = \"Various sales and cost\"\n xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"Prcie per GH Floor area [USD/m^2]\"\n Util.plotMultipleData(np.linspace(0, OPVCoverages.shape[0], OPVCoverages.shape[0]), plotDataSet, labelList, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n #######################################################################\n\n ################# plot the electricity yield with different OPV coverage for given period #################\n title = \"electricity yield with a given area vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"electricity yield [kwh]\"\n Util.plotData(OPVCoverages, totalkWhopvouts, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ######################################################################################## #############\n\n ################# plot the plant profit with different OPV coverage for given period #################\n title = \"plant profit with a given area vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"plant profit for a given period [USD]\"\n Util.plotData(OPVCoverages, totalPlantProfitsPerGHFloorArea, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ######################################################################################## #############\n\n ######################## plot by two y-axes ##########################\n title = \"plant yield per area and electricity yield vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel1 = \"plant fresh weight per cultivation floor for given period [kg/m^2]\"\n yAxisLabel2 = \"Electricity yield [kWh]\"\n yLabel1 = \"totalGrowthFreshWeights * greenhouseCultivationFloorArea\"\n yLabel2 = \"electricityYield[Kwh]\"\n Util.plotTwoDataMultipleYaxes(OPVCoverages, totalGrowthFreshWeightsPerCultivationFloorArea * constant.greenhouseCultivationFloorArea, \\\n totalkWhopvouts, title, xAxisLabel, yAxisLabel1, yAxisLabel2, yLabel1, yLabel2)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n #######################################################################\n\n ######################## plot by two y-axes ##########################\n title = \"plant yield per area and electricity yield per foot print vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel1 = \"plant fresh weight per foot print for given period [kg/m^2]\"\n yAxisLabel2 = \"Electricity yield per foot print [kW*h/m^2]\"\n yLabel1 = \"totalGrowthFreshWeightsPerGHFloorArea\"\n yLabel2 = \"totalkWhopvoutsPerGHFloorArea\"\n Util.plotTwoDataMultipleYaxes(OPVCoverages, totalGrowthFreshWeightsPerGHFloorArea, \\\n totalkWhopvoutsPerGHFloorArea, title, xAxisLabel, yAxisLabel1, yAxisLabel2, yLabel1, yLabel2)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n # data export\n Util.exportCSVFile(\n np.array([OPVCoverages, totalkWhopvouts, totalkWhopvoutsPerGHFloorArea, totalGrowthFreshWeightsPerCultivationFloorArea * constant.greenhouseCultivationFloorArea,\\\n totalGrowthFreshWeightsPerGHFloorArea,totalHarvestedShootFreshMassPerCultivationFloorAreaKgPerDay ]).T, \\\n \"PlantAndElectricityYieldWholeAndPerFootPrint\")\n Util.exportCSVFile(\n np.array([OPVCoverages, totalElectricitySalesPerGHFloorArea, totalElectricityCostsPerGHFloorArea, totalPlantSalesesPerGHFloorArea, \\\n totalPlantCostsPerGHFloorArea,totalEconomicProfits, economicProfitPerGHFloorArea]).T, \\\n \"SalesAndCostPerFootPrint\")\n\n # plotting this graph is the coal of this simulation!!!\n ################# plot the economic profit with different OPV coverage for given period\n title = \"whole economic profit with a given area vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"economic profit for a given period [USD]\"\n Util.plotData(OPVCoverages, totalEconomicProfits, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n\n ################# plot the economic profit with different OPV coverage for given period per GH area\n title = \"whole economic profit per GH area vs OPV film\"\n xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"economic profit for a given period [USD]\"\n Util.plotData(OPVCoverages, economicProfitPerGHFloorArea, title, xAxisLabel, yAxisLabel)\n Util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n\n ####################################################################################################\n # Stop execution here...\n # sys.exit()\n # Move the above line to different parts of the assignment as you implement more of the functionality.\n ####################################################################################################\n\n\n###########################################################################################################\n########################### Mixed integer non-liner programming with constraints###########################\n###########################################################################################################\nelif case == \"OptimizationByMINLPSolver\":\n print (\"run SimulatorMINLPc.py\")\n\n ########################################################################\n #\n # This is an example call of MIDACO 5.0\n # -------------------------------------\n #\n # MIDACO solves Multi-Objective Mixed-Integer Non-Linear Problems:\n #\n #\n # Minimize F_1(X),... F_O(X) where X(1,...N-NI) is CONTINUOUS\n # and X(N-NI+1,...N) is DISCRETE\n #\n # subject to G_j(X) = 0 (j=1,...ME) equality constraints\n # G_j(X) >= 0 (j=ME+1,...M) inequality constraints\n #\n # and bounds XL <= X <= XU\n #\n #\n # The problem statement of this example is given below. You can use\n # this example as template to run your own problem. To do so: Replace\n # the objective functions 'F' (and in case the constraints 'G') given\n # here with your own problem and follow the below instruction steps.\n #\n ########################################################################\n ###################### OPTIMIZATION PROBLEM ########################\n ########################################################################\n def inputParameters(x):\n '''\n substitute the parameters to the simulator\n return: None\n '''\n\n # unit: [-]\n OPVAreaCoverageRatio = x[0]\n # set OPV coverage ratio [-]\n constant.OPVAreaCoverageRatio = OPVAreaCoverageRatio\n print(\"constant.OPVAreaCoverageRatio :{}\".format(constant.OPVAreaCoverageRatio ))\n\n # change the other relevant parameters\n constant.OPVArea = OPVAreaCoverageRatio * constant.greenhouseTotalRoofArea\n constant.OPVAreaFacingEastOrNorthfacingRoof = OPVAreaCoverageRatio * (constant.greenhouseRoofTotalAreaEastOrNorth / constant.greenhouseTotalRoofArea)\n constant.OPVAreaFacingWestOrSouthfacingRoof = OPVAreaCoverageRatio * (constant.greenhouseRoofTotalAreaWestOrSouth / constant.greenhouseTotalRoofArea)\n\n # unit: [days] <- be careful this!!!!!!!!!! The hour is calculated from the beginning of the year (Jan 1st 0 am)\n shadingCurtainDeployStartDateSpring = x[1]\n shadingCurtainDeployEndDateSpring = x[2]\n shadingCurtainDeployStartDateFall = x[3]\n shadingCurtainDeployEndDateFall = x[4]\n # simulationEndDate = Util.getEndDateDateType()\n year = Util.getStartDateDateType().year\n\n\n shadingCurtainDeployStartDateSpring = datetime.date(year=year, month=1, day=1) + datetime.timedelta(days=shadingCurtainDeployStartDateSpring)\n shadingCurtainDeployEndDateSpring = datetime.date(year=year, month=1, day=1) + datetime.timedelta(days=shadingCurtainDeployEndDateSpring)\n shadingCurtainDeployStartDateFall = datetime.date(year=year, month=1, day=1) + datetime.timedelta(days=shadingCurtainDeployStartDateFall)\n shadingCurtainDeployEndDateFall = datetime.date(year=year, month=1, day=1) + datetime.timedelta(days=shadingCurtainDeployEndDateFall)\n print(\"shadingCurtainDeployStartDateSpring:{}\".format(shadingCurtainDeployStartDateSpring))\n print(\"shadingCurtainDeployEndDateSpring:{}\".format(shadingCurtainDeployEndDateSpring))\n print(\"shadingCurtainDeployStartDateFall:{}\".format(shadingCurtainDeployStartDateFall))\n print(\"shadingCurtainDeployEndDateFall:{}\".format(shadingCurtainDeployEndDateFall))\n\n # set the shading curtain deployment periods\n constant.ShadingCurtainDeployStartMMSpring = shadingCurtainDeployStartDateSpring.month\n constant.ShadingCurtainDeployStartDDSpring = shadingCurtainDeployStartDateSpring.day\n constant.ShadingCurtainDeployEndMMSpring = shadingCurtainDeployEndDateSpring.month\n constant.ShadingCurtainDeployEndDDSpring = shadingCurtainDeployEndDateSpring.day\n constant.ShadingCurtainDeployStartMMFall = shadingCurtainDeployStartDateFall.month\n constant.ShadingCurtainDeployStartDDFall = shadingCurtainDeployStartDateFall.day\n constant.ShadingCurtainDeployEndMMFall = shadingCurtainDeployEndDateFall.month\n constant.ShadingCurtainDeployEndDDFall = shadingCurtainDeployEndDateFall.day\n print(\"constant.ShadingCurtainDeployStartMMFall:{}\".format(constant.ShadingCurtainDeployStartMMFall))\n print(\"constant.ShadingCurtainDeployStartDDFall:{}\".format(constant.ShadingCurtainDeployStartDDFall))\n print(\"constant.ShadingCurtainDeployEndMMFall:{}\".format(constant.ShadingCurtainDeployEndMMFall))\n print(\"constant.ShadingCurtainDeployEndDDFall:{}\".format(constant.ShadingCurtainDeployEndDDFall))\n\n return None\n\n def problem_function(x):\n '''\n :param x: decision variables. In this model, the variables are pv module coverage ratio,\n '''\n\n f = [0.0] * 1 # Initialize array for objectives F(X)\n g = [0.0] * 3 # Initialize array for constraints G(X)\n\n # input the parameters at each iteration\n inputParameters(x)\n\n # Objective functions F(X)\n # call the simulator\n simulatorClass = Simulator1.simulateCropElectricityYieldProfit1()\n # print(\"simulatorClass.economicProfitPerGHFloorArea:{}\".format(simulatorClass.economicProfitPerGHFloorArea))\n # since we need to maximize the objective function, the minus sign is added\n f[0] = -simulatorClass.economicProfitPerGHFloorArea\n\n # Equality constraints G(X) = 0 MUST COME FIRST in g[0:me-1]\n # No eauality constraints\n # Inequality constraints G(X) >= 0 MUST COME SECOND in g[me:m-1]\n shadingCurtainDeployStartDateSpring = x[1]\n shadingCurtainDeployEndDateSpring = x[2]\n shadingCurtainDeployStartDateFall = x[3]\n shadingCurtainDeployEndDateFall = x[4]\n g[0] = shadingCurtainDeployEndDateSpring - (shadingCurtainDeployStartDateSpring + 1)\n g[1] = shadingCurtainDeployStartDateFall - (shadingCurtainDeployEndDateSpring + 1)\n g[2] = shadingCurtainDeployEndDateFall - (shadingCurtainDeployStartDateFall + 1)\n\n # print(\"f:{}, g:{}\".format(f,g))\n\n return f, g\n\n ########################################################################\n ######################### MAIN PROGRAM #############################\n ########################################################################\n\n key = 'Kensaku_Okada_(University_of_Arizona)_[ACADEMIC-SINGLE-USER]'\n\n problem = {} # Initialize dictionary containing problem specifications\n option = {} # Initialize dictionary containing MIDACO options\n\n problem['@'] = problem_function # Handle for problem function name\n\n ########################################################################\n ### Step 1: Problem definition #####################################\n ########################################################################\n\n # STEP 1.A: Problem dimensions\n ##############################\n problem['o'] = 1 # Number of objectives\n problem['n'] = 5 # Number of variables (in total)\n problem['ni'] = 4 # Number of integer variables (0 <= ni <= n)\n problem['m'] = 3 # Number of constraints (in total)\n problem['me'] = 0 # Number of equality constraints (0 <= me <= m)\n\n # STEP 1.B: Lower and upper bounds 'xl' & 'xu'\n ##############################################\n # get the simulation period by hour [hours]\n numOfSimulationDays = Util.getSimulationDaysInt()-1\n # print(\"numOfSimulationDays:{}\".format(numOfSimulationDays))\n problem['xl'] = [0.0, 1, 1, 1, 1]\n problem['xu'] = [1.0, numOfSimulationDays, numOfSimulationDays, numOfSimulationDays, numOfSimulationDays]\n\n # STEP 1.C: Starting point 'x'\n ##############################\n\n # start from the minimum values\n # problem['x'] = problem['xl'] # Here for example: starting point = lower bounds\n # # start from the middle values\n # problem['x'] = [(problem['xl'][i] + problem['xu'][i])/2.0 for i in range(0, len(problem['xl'])) ] # start from the middle\n # # start from the maximum values\n # problem['x'] = problem['xu'] # Here for example: starting point = lower bounds\n # print(\"problem['x']:{}\".format(problem['x']))\n # start from the minimum values for OPV coverage ratio and middle values for numOfSimulationDays, numOfSimulationDays, numOfSimulationDays, numOfSimulationDays\n # problem['x'] = [0.0, 183, 183, 183, 183]\n # start from the minimum values for numOfSimulationDays, numOfSimulationDays, numOfSimulationDays, numOfSimulationDays and middle value for OPV coverage ratio\n # problem['x'] = [0.5, 1, 1, 1, 1]\n # start from the max values for OPV coverage ratio and middle value for numOfSimulationDays, numOfSimulationDays, numOfSimulationDays, numOfSimulationDays\n problem['x'] = [0.0, 183, 183, 183, 183]\n\n\n ########################################################################\n ### Step 2: Choose stopping criteria and printing options ###########\n ########################################################################\n\n # STEP 2.A: Stopping criteria\n #############################\n # option['maxeval'] = 10000 # Maximum number of function evaluation (e.g. 1000000), 999999999 (-> disabled)\n option['maxeval'] = 500 # Maximum number of function evaluation (e.g. 1000000), 999999999 (-> disabled)\n option['maxtime'] = 60 * 60 * 24 # Maximum time limit in Seconds (e.g. 1 Day = 60*60*24)\n\n # STEP 2.B: Printing options\n ############################\n option['printeval'] = 10 # Print-Frequency for current best solution (e.g. 1000)\n option['save2file'] = 1 # Save SCREEN and SOLUTION to TXT-files [0=NO/1=YES]\n\n ########################################################################\n ### Step 3: Choose MIDACO parameters (FOR ADVANCED USERS) ###########\n ########################################################################\n\n # this parameter defines the accuracy for the constraint violation. It is considered an equality constraints (G(X)) to be feasible if |G(X)| <= PARAM(1).\n # An inequality is considered feasible, if G(X) ≥ -PARAM(1). If the user sets PARAM(1) = 0, MIDACO uses a default accuracy of 0.001.\n option['param1'] = 0.0 # ACCURACY\n # this defines the initial seed for MIDACO's internal pseudo random number generator.\n option['param2'] = 0.0 # SEED\n\n option['param3'] = 0.0 # FSTOP\n\n option['param4'] = 0.0 # ALGOSTOP\n\n # option['param5'] = 500.0005 # EVALSTOP\n option['param5'] = 0.0 # EVALSTOP\n\n # This parameter forces MIDACO to focus its search process around the current best solution and\n\t# thus makes it more greedy or local. The larger the FOCUS value, the closer MIDACO will focus its search on the current best solution.\n\t# option['param6'] = 0.0 # FOCUS\n # option['param6'] = 1.0 # FOCUS\n option['param6'] = 10.0 # FOCUS\n # option['param6'] = 20.0 # FOCUS\n\n option['param7'] = 0.0 # ANTS\n\n option['param8'] = 0.0 # KERNEL\n\n # This parameter specifies a user given oracle parameter to the penalty function within MIDACO.\n # This parameter is only relevant for constrained problems.\n option['param9'] = 0.0 # ORACLE\n\n option['param10'] = 0.0 # PARETOMAX\n\n option['param11'] = 0.0 # EPSILON\n\n option['param12'] = 0.0 # CHARACTER\n\n ########################################################################\n ### Step 4: Choose Parallelization Factor ############################\n ########################################################################\n\n # option['parallel'] = 10 # Serial: 0 or 1, Parallel: 2,3,4,5,6,7,8...\n option['parallel'] = 1 # Serial: 0 or 1, Parallel: 2,3,4,5,6,7,8...\n\n ########################################################################\n ############################ Run MIDACO ################################\n ########################################################################\n\n # add the directory to import MIDACO files\n sys.path.append(\"./MIDACO\")\n import midaco\n\n if __name__ == '__main__':\n solution = midaco.run(problem, option, key)\n\n # print solution['f']\n # print solution['g']\n # print solution['x']\n\n print(\"simulation finished.\")\n\n\n########################### Reinforcement learning (q learning)###########################\nelif case == \"ShadingCurtainReinforcementLearning\":\n # run simulateCropElectricityYieldProfit1 to set values to an object of CropElectricityYieldSimulator1\n cropElectricityYieldSimulator1, qLearningAgentsShadingCurtain = Simulator1.simulateCropElectricityYieldProfitRLShadingCurtain()\n\n\n" }, { "alpha_fraction": 0.6831513047218323, "alphanum_fraction": 0.689473032951355, "avg_line_length": 66.73442077636719, "blob_id": "12ab9714fcb76643e1f9ee06111d5df57db53a14", "content_id": "f5560232b85a448b6ad1b996b45be3421a96878a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24993, "license_type": "permissive", "max_line_length": 173, "num_lines": 369, "path": "/CropElectricityYieldProfitRLShadingCurtain.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "##########import package files##########\nfrom scipy import stats\nimport datetime\nimport sys\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport OPVFilm\n#import Lettuce\nimport CropElectricityYeildSimulatorDetail as simulatorDetail\nimport QlearningAgentShadingCurtain as QRLshadingCurtain\nimport SimulatorClass as SimulatorClass\n#######################################################\n\ndef simulateCropElectricityYieldProfitRLShadingCurtain():\n '''\n\n :return:\n '''\n print (\"start modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\n\n # declare the class\n cropElectricityYieldSimulator1 = SimulatorClass.CropElectricityYieldSimulator1()\n\n ##########file import (TucsonHourlyOuterEinvironmentData) start##########\n fileName = \"20130101-20170101\" + \".csv\"\n year, \\\n month, \\\n day, \\\n hour, \\\n hourlyHorizontalDiffuseOuterSolarIrradiance, \\\n hourlyHorizontalTotalOuterSolarIrradiance, \\\n hourlyHorizontalDirectOuterSolarIrradiance, \\\n hourlyHorizontalTotalBeamMeterBodyTemperature, \\\n hourlyAirTemperature, cropElectricityYieldSimulator1 = util.getArraysFromData(fileName, cropElectricityYieldSimulator1)\n ##########file import (TucsonHourlyOuterEinvironmentData) end##########\n\n # set the values to the object\n cropElectricityYieldSimulator1.setYear(year)\n cropElectricityYieldSimulator1.setMonth(month)\n cropElectricityYieldSimulator1.setDay(day)\n cropElectricityYieldSimulator1.setHour(hour)\n\n ##########solar irradiance to OPV calculation start##########\n # calculate with real data\n # hourly average [W m^-2]\n directSolarRadiationToOPVEastDirection, directSolarRadiationToOPVWestDirection, diffuseSolarRadiationToOPV, albedoSolarRadiationToOPV = \\\n simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(year, month, day, hour, hourlyHorizontalDiffuseOuterSolarIrradiance, \\\n hourlyHorizontalDirectOuterSolarIrradiance, \"EastWestDirectionRoof\")\n # [W m^-2] per hour\n totalSolarRadiationToOPV = (directSolarRadiationToOPVEastDirection + directSolarRadiationToOPVWestDirection) / 2.0 + diffuseSolarRadiationToOPV + albedoSolarRadiationToOPV\n\n # # calculate without real data.\n # simulatedDirectSolarRadiationToOPVEastDirection, \\\n # simulatedDirectSolarRadiationToOPVWestDirection, \\\n # simulatedDiffuseSolarRadiationToOPV, \\\n # simulatedAlbedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(year, month, day, hour)\n # # [W m^-2] per hour\n # simulatedTotalSolarRadiationToOPV = simulatedDirectSolarRadiationToOPVEastDirection + simulatedDirectSolarRadiationToOPVWestDirection + \\\n # simulatedDiffuseSolarRadiationToOPV + simulatedAlbedoSolarRadiationToOPV\n # print \"directSolarRadiationToOPV:{}\".format(directSolarRadiationToOPV)\n # print \"diffuseSolarRadiationToOPV:{}\".format(diffuseSolarRadiationToOPV)\n # print \"groundReflectedSolarradiationToOPV:{}\".format(groundReflectedSolarradiationToOPV)\n\n # unit change: [W m^-2] -> [umol m^-2 s^-1] == PPFD\n directPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVEastDirection)\n directPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVWestDirection)\n diffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(diffuseSolarRadiationToOPV)\n groundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(albedoSolarRadiationToOPV)\n totalPPFDToOPV = directPPFDToOPVEastDirection + directPPFDToOPVWestDirection + diffusePPFDToOPV + groundReflectedPPFDToOPV\n # print\"diffusePPFDToOPV.shape:{}\".format(diffusePPFDToOPV.shape)\n ########## set the matrix to the object ###########\n cropElectricityYieldSimulator1.setDirectPPFDToOPVEastDirection(directPPFDToOPVEastDirection)\n cropElectricityYieldSimulator1.setDirectPPFDToOPVWestDirection(directPPFDToOPVWestDirection)\n cropElectricityYieldSimulator1.setDiffusePPFDToOPV(diffusePPFDToOPV)\n cropElectricityYieldSimulator1.setGroundReflectedPPFDToOPV(groundReflectedPPFDToOPV)\n ###################################################\n\n # unit change: hourly [umol m^-2 s^-1] -> [mol m^-2 day^-1] == DLI :number of photons received in a square meter per day\n directDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVEastDirection)\n directDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVWestDirection)\n diffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(diffusePPFDToOPV)\n groundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(groundReflectedPPFDToOPV)\n totalDLIToOPV = directDLIToOPVEastDirection + directDLIToOPVWestDirection + diffuseDLIToOPV + groundReflectedDLIToOPV\n # print \"directDLIToOPVEastDirection:{}\".format(directDLIToOPVEastDirection)\n # print \"diffuseDLIToOPV.shape:{}\".format(diffuseDLIToOPV.shape)\n # print \"groundReflectedDLIToOPV:{}\".format(groundReflectedDLIToOPV)\n ########## set the matrix to the object ##########\n cropElectricityYieldSimulator1.setDirectDLIToOPVEastDirection(directDLIToOPVEastDirection)\n cropElectricityYieldSimulator1.setDirectDLIToOPVWestDirection(directDLIToOPVWestDirection)\n cropElectricityYieldSimulator1.setDiffuseDLIToOPV(diffuseDLIToOPV)\n cropElectricityYieldSimulator1.setGroundReflectedDLIToOPV(groundReflectedDLIToOPV)\n ##################################################\n\n # ################## plot the difference of real data and simulated data start######################\n # Title = \"difference of the model output with real data and with no data\"\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"total Solar irradiance [W m^-2]\"\n # util.plotTwoData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\n # totalSolarRadiationToOPV, simulatedTotalSolarRadiationToOPV ,Title, xAxisLabel, yAxisLabel, \"with real data\", \"wth no data\")\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the difference of real data and simulated data end######################\n\n # ################## plot the distribution of direct and diffuse PPFD start######################\n # Title = \"TOTAL outer PPFD to OPV\"\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"PPFD [umol m^-2 s^-1]\"\n # util.plotData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\n # directPPFDToOPV + diffusePPFDToOPV + groundReflectedPPFDToOPV, Title, xAxisLabel, yAxisLabel)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of direct and diffuse PPFD end######################\n\n # ################## plot the distribution of direct and diffuse solar DLI start######################\n # Title = \"direct and diffuse outer DLI to OPV\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\n # y1Label = \"(directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0\"\n # y2Label = \"diffuseDLIToOPV\"\n # util.plotTwoData(np.linspace(0, simulationDaysInt, simulationDaysInt), (directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0, diffuseDLIToOPV, Title,\n # xAxisLabel, yAxisLabel, y1Label, y2Label)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of direct and diffuse solar DLI end######################\n\n # ################## plot the distribution of various DLI to OPV film start######################\n # Title = \"various DLI to OPV film\"\n # plotDataSet = np.array([directDLIToOPVEastDirection, directDLIToOPVWestDirection, diffuseDLIToOPV,\n # groundReflectedDLIToOPV])\n # labelList = np.array([\"directDLIToOPVEastDirection\", \"directDLIToOPVWestDirection\", \"diffuseDLIToOPV\",\n # \"groundReflectedDLIToOPV\"])\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\n # util.plotMultipleData(np.linspace(0, simulationDaysInt, simulationDaysInt), plotDataSet, labelList, Title,\n # xAxisLabel, yAxisLabel)\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ################## plot the distribution of various DLI to OPV film end######################\n\n ################## calculate the daily electricity yield per area start#####################\n # TODO maybe we need to consider the tilt of OPV and OPV material for the temperature of OPV film. right now, just use the measured temperature\n # get the daily electricity yield per area per day ([J/m^2] per day) based on the given light intensity ([Celsius],[W/m^2]).\n dailyJopvoutperArea = simulatorDetail.calcDailyElectricityYieldSimulationperArea(hourlyHorizontalTotalBeamMeterBodyTemperature, \\\n directSolarRadiationToOPVEastDirection + directSolarRadiationToOPVWestDirection,\n diffuseSolarRadiationToOPV,\n albedoSolarRadiationToOPV)\n\n # unit Exchange [J/m^2] -> [wh / m^2]\n dailyWhopvoutperArea = util.convertFromJouleToWattHour(dailyJopvoutperArea)\n # unit Exchange [Wh/ m^2] -> [kWh/m^2]\n dailykWhopvoutperArea = util.convertWhTokWh(dailyWhopvoutperArea)\n # ################### plot the electricity yield per area with given OPV film\n # title = \"electricity yield per area vs OPV film\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"Electricity yield per OPV area [kWh/m^2/day]\"\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), dailykWhopvoutperArea, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ################### calculate the daily electricity yield per area end#####################\n\n ################## calculate the daily electricity sales start#####################\n # convert the year of each hour to the year to each day\n yearOfeachDay = year[::24]\n # convert the month of each hour to the month to each day\n monthOfeachDay = month[::24]\n # get the monthly electricity sales per area [USD/month/m^2]\n monthlyElectricitySalesperArea = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperArea, yearOfeachDay, monthOfeachDay)\n # set the value to the object\n cropElectricityYieldSimulator1.setMonthlyElectricitySalesperArea(monthlyElectricitySalesperArea)\n # print \"cropElectricityYieldSimulator1.getMonthlyElectricitySalesperArea():{}\".format(cropElectricityYieldSimulator1.getMonthlyElectricitySalesperArea())\n ################## calculate the daily electricity sales end#####################\n\n ##################calculate the electricity cost per area start######################################\n if constant.ifConsiderOPVCost is True:\n initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)\n # [USD]\n OPVCostUSDForDepreciation = initialOPVCostUSD * (util.getSimulationDaysInt() / constant.OPVDepreciationPeriodDays)\n # set the value to the object\n cropElectricityYieldSimulator1.setOPVCostUSDForDepreciationperArea(\n OPVCostUSDForDepreciation / OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio))\n else:\n # set the value to the object. the value is zero if not consider the purchase cost\n cropElectricityYieldSimulator1.setOPVCostUSDForDepreciationperArea(0.0)\n ##################calculate the electricity cost per area end######################################\n\n ################## calculate the daily plant yield start#####################\n # [String]\n plantGrowthModel = constant.TaylorExpantionWithFluctuatingDLI\n # cultivation days per harvest [days/harvest]\n cultivationDaysperHarvest = constant.cultivationDaysperHarvest\n # OPV coverage ratio [-]\n OPVCoverage = constant.OPVAreaCoverageRatio\n # boolean\n hasShadingCurtain = constant.hasShadingCurtain\n # PPFD [umol m^-2 s^-1]\n ShadingCurtainDeployPPFD = constant.ShadingCurtainDeployPPFD\n\n # calculate plant yield given an OPV coverage and model :daily [g/unit]. the shading curtain influence is considered in this function.\n shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitDailyHarvestedFreshWeight = \\\n simulatorDetail.calcPlantYieldSimulation(plantGrowthModel, cultivationDaysperHarvest, OPVCoverage, \\\n (directPPFDToOPVEastDirection + directPPFDToOPVWestDirection) / 2.0, diffusePPFDToOPV, groundReflectedPPFDToOPV,\n hasShadingCurtain, ShadingCurtainDeployPPFD, cropElectricityYieldSimulator1)\n # set the values to the instance\n cropElectricityYieldSimulator1.setShootFreshMassList(shootFreshMassList)\n cropElectricityYieldSimulator1.setUnitDailyFreshWeightIncrease(unitDailyFreshWeightIncrease)\n cropElectricityYieldSimulator1.setAccumulatedUnitDailyFreshWeightIncrease(accumulatedUnitDailyFreshWeightIncrease)\n cropElectricityYieldSimulator1.setUnitDailyHarvestedFreshWeight(unitDailyHarvestedFreshWeight)\n\n # the DLI to plants [mol/m^2/day]\n TotalDLItoPlants = simulatorDetail.getTotalDLIToPlants(OPVCoverage, (directPPFDToOPVEastDirection + directPPFDToOPVWestDirection) / 2.0, diffusePPFDToOPV,\n groundReflectedPPFDToOPV, \\\n hasShadingCurtain, ShadingCurtainDeployPPFD, cropElectricityYieldSimulator1)\n # set the value to the instance\n cropElectricityYieldSimulator1.setTotalDLItoPlantsBaselineShadingCuratin(TotalDLItoPlants)\n\n # print \"TotalDLItoPlants:{}\".format(TotalDLItoPlants)\n # print \"TotalDLItoPlants.shape:{}\".format(TotalDLItoPlants.shape)\n\n # ######################### plot a graph showing only shootFreshMassList per unit\n # title = \"plant yield per head vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"plant fresh weight[g/head]\"\n # util.plotData(np.linspace(0, util.getSimulationDaysInt(), util.getSimulationDaysInt()), shootFreshMassList, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # #######################################################################\n\n # # unit conversion; get the plant yield per day per area: [g/unit] -> [g/m^2]\n # shootFreshMassListperArea = util.convertUnitShootFreshMassToShootFreshMassperArea(shootFreshMassList)\n # # unit conversion: [g/m^2] -> [kg/m^2]\n # shootFreshMassListperAreaKg = util.convertFromgramTokilogram(shootFreshMassListperArea)\n # ######################## plot a graph showing only shootFreshMassList per square meter\n # title = \"plant yield per area vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n # yAxisLabel = \"plant fresh weight[kg/m^2]\"\n # util.plotData(np.linspace(0, util.getSimulationDaysInt(), util.getSimulationDaysInt()), shootFreshMassListperAreaKg, title, xAxisLabel, yAxisLabel)\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n # ######################################################################\n\n ################## plot various unit Plant Yield vs time\n plotDataSet = np.array([shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitDailyHarvestedFreshWeight])\n labelList = np.array([\"shootFreshMassList\", \"unitDailyFreshWeightIncrease\", \"accumulatedUnitDailyFreshWeightIncrease\", \"unitDailyHarvestedFreshWeight\"])\n title = \"Various unit Plant Yield vs time (OPV coverage \" + str(int(100 * OPVCoverage)) + \"%)\"\n xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\n yAxisLabel = \"Unit plant Fresh Weight [g/unit]\"\n util.plotMultipleData(np.linspace(0, util.getSimulationDaysInt(), util.getSimulationDaysInt()), plotDataSet, labelList, title, xAxisLabel, yAxisLabel)\n util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\n ###########################################################################\n ################## calculate the daily plant yield end#####################\n\n ################## calculate the daily plant sales start#####################\n # unit conversion; get the daily plant yield per given period per area: [g/unit] -> [g/m^2]\n dailyHarvestedFreshWeightperArea = util.convertUnitShootFreshMassToShootFreshMassperArea(unitDailyHarvestedFreshWeight)\n # unit conversion: [g/m^2] -> [kg/m^2]1\n dailyHarvestedFreshWeightperAreaKg = util.convertFromgramTokilogram(dailyHarvestedFreshWeightperArea)\n # get the sales price of plant [USD/m^2]\n # if the average DLI during each harvest term is more than 17 mol/m^2/day, discount the price\n # TODO may need to improve the affect of Tipburn\n dailyPlantSalesperSquareMeter = simulatorDetail.getPlantSalesperSquareMeter(year, dailyHarvestedFreshWeightperAreaKg, TotalDLItoPlants)\n\n plantSalesperSquareMeter = sum(dailyPlantSalesperSquareMeter)\n # print \"dailyPlantSalesperSquareMeter.shape:{}\".format(dailyPlantSalesperSquareMeter.shape)\n\n print (\"(The baseline) plantSalesperSquareMeter [USD/m^2]:{}\".format(plantSalesperSquareMeter))\n ################## calculate the daily plant sales end#####################\n\n\n ################## calculate the daily plant cost start#####################\n # plant operation cost per square meter for given simulation period [USD/m^2]\n plantCostperSquareMeter = simulatorDetail.getPlantCostperSquareMeter(util.getSimulationDaysInt())\n ################## calculate the daily plant cost end#####################\n\n ################## calculate the plant profit start#######################\n ###### calculate the plant profit per square meter [USD/m^2]\n plantProfitperSquareMeter = plantSalesperSquareMeter - plantCostperSquareMeter\n # print \"plantProfitperSquareMeterList[i]:{}\".format(plantProfitperSquareMeterList[i])\n # print \"plantProfitperSquareMeterList[{}]:{}\".format(i, plantProfitperSquareMeterList[i])\n plantProfit = plantProfitperSquareMeter * constant.greenhouseCultivationFloorArea\n\n # print \"plantCostperSquareMeter:{}\".format(plantCostperSquareMeter)\n # print \"unitDailyHarvestedFreshWeightList:{}\".format(unitDailyHarvestedFreshWeightList)\n # print \"plantSalesperSquareMeterList:{}\".format(plantSalesperSquareMeterList)\n\n print (\"(The baseline) plantProfit by normal simulation [USD]:{}\".format(plantProfit))\n ################## calculate the plant profit end#######################\n\n #####################################################################################################\n ################## reinforcement learning plant simulation start#####################################\n #####################################################################################################\n\n ################## calculate the plant sales with RL shading curtain start##########################\n if constant.isShadingCurtainReinforcementLearning:\n\n # declare the instance for RL\n # qLearningAgentsShadingCurtain = QRLshadingCurtain.QLearningAgentShadingCurtain(cropElectricityYieldSimulator1, \\\n # numTraining=1500, numTesting = 1, epsilon=0.18, gamma=0.999, alpha=0.2e-6)\n qLearningAgentsShadingCurtain = QRLshadingCurtain.QLearningAgentShadingCurtain(cropElectricityYieldSimulator1, \\\n numTraining=1200, numTesting = 1, epsilon=0.18, gamma=0.99, alpha=0.2e-6)\n\n\n # set values necessary for RL training/testing\n # for dLIEachdayThroughInnerStructure on a certain day\n hourlyInnerLightIntensityPPFDThroughInnerStructure = simulatorClass.getHourlyInnerLightIntensityPPFDThroughInnerStructure()\n # set dLIThroughInnerStructure to the object\n dLIThroughInnerStructure = util.convertFromHourlyPPFDWholeDayToDLI(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n qLearningAgentsShadingCurtain.setDLIThroughInnerStructure(dLIThroughInnerStructure)\n\n ################################ Training #################################\n if constant.ifRunTraining:\n #training the approximate q value function. returns the wegiths of q value function\n qLearningAgentsShadingCurtain = simulatorDetail.trainWeightsRLShadingCurtainDayStep(hasShadingCurtain, qLearningAgentsShadingCurtain, simulatorClass)\n # print (\"qLearningAgentsShadingCurtain.weights:{}\".format(qLearningAgentsShadingCurtain.weights))\n\n ################################ Save the trained weight #################################\n # save the calculated weight by the training\n if constant.ifSaveCalculatedWeight:\n util.exportDictionaryAsCSVFile(qLearningAgentsShadingCurtain.weights, constant.fileNameQLearningTrainedWeight)\n\n # load the calculated weight by the training\n if constant.ifLoadWeight:\n qLearningAgentsShadingCurtain.weights = util.importDictionaryAsCSVFile(constant.fileNameQLearningTrainedWeight, relativePath=\"\")\n print (\"loaded qLearningAgentsShadingCurtain.weights:{}\".format(qLearningAgentsShadingCurtain.weights))\n\n ################################ Testing ##################################\n # with the trained q value function,\n plantSalesperSquareMeterRLShadingCurtainList = simulatorDetail.testWeightsRLShadingCurtainDayStep(hasShadingCurtain, \\\n qLearningAgentsShadingCurtain, simulatorClass)\n\n print (\"(RL) plantSalesperSquareMeterRLShadingCurtain [USD/m^2]:{}\".format(plantSalesperSquareMeterRLShadingCurtainList))\n\n ################## calculate the plant cost start#####################\n # plant operation cost per square meter for given simulation period [USD/m^2]\n # plantCostperSquareMeter = simulatorDetail.getPlantCostperSquareMeter(simulationDaysInt)\n ################## calculate the plant cost end#####################\n\n ################## calculate the plant economic profit start#######################\n ###### calculate the plant profit per square meter [USD/m^2]\n plantProfitperSquareMeterRLShadingCurtainList = plantSalesperSquareMeterRLShadingCurtainList - plantCostperSquareMeter\n # print \"plantProfitperSquareMeterList[i]:{}\".format(plantProfitperSquareMeterList[i])\n # print \"plantProfitperSquareMeterList[{}]:{}\".format(i, plantProfitperSquareMeterList[i])\n plantProfitRLShadingCurtainList = plantProfitperSquareMeterRLShadingCurtainList * constant.greenhouseCultivationFloorArea\n\n print (\"(RL) plantProfitRLShadingCurtainList [USD]:{}\".format(plantProfitRLShadingCurtainList))\n\n # set the result of profits\n qLearningAgentsShadingCurtain.plantProfitRLShadingCurtainList = plantProfitRLShadingCurtainList\n ################## calculate the plant economic profit end#######################\n\n else:\n print (\"reinforcement learning shading curtain waa not assumed. skip the simulation\")\n\n #####################################################################################################\n ################## reinforcement learning plant simulation end#####################################\n #####################################################################################################\n\n print (\"end modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\n\n # print actions\n print (\"qLearningAgentsShadingCurtain.policies:{}\".format(qLearningAgentsShadingCurtain.policies))\n # print DLI to plants\n print (\"qLearningAgentsShadingCurtain.dLIEachDayToPlants:{}\".format(qLearningAgentsShadingCurtain.dLIEachDayToPlants))\n\n\n\n return simulatorClass, qLearningAgentsShadingCurtain\n\n # ####################################################################################################\n # Stop execution here...\n # sys.exit()\n # Move the above line to different parts of the assignment as you implement more of the functionality.\n # ####################################################################################################" }, { "alpha_fraction": 0.6185367107391357, "alphanum_fraction": 0.6705409288406372, "avg_line_length": 59.31685256958008, "blob_id": "d0a92ba9137aef533d3a51fbe6592be69b73b914", "content_id": "113f6878245dc931babb95b560045eb28acbda15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26844, "license_type": "permissive", "max_line_length": 201, "num_lines": 445, "path": "/ShadingCurtain.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n#############command to print out all array data\n# np.set_printoptions(threshold=np.inf)\n# print (\"directSolarRadiationToOPVWestDirection:{}\".format(directSolarRadiationToOPVWestDirection))\n# np.set_printoptions(threshold=1000)\n#############\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n##########import package files##########\nfrom scipy import stats\nimport datetime\nimport calendar\nimport sys\nimport os as os\nimport numpy as np\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport OPVFilm\n#import Lettuce\nimport CropElectricityYeildSimulatorDetail as simulatorDetail\nimport SimulatorClass\n#######################################################\n\n\n# def getHourlyShadingCurtainDeploymentPatternChangingEachMonthPrep():\n# \t'''\n# \tcalculate the shading curtain deployment start and end hour each month so that the average DLI becomes as optimal (constant.DLIforTipBurn) as possible\n# \tThe optimal hour is calculated by averaging the solar irradiance each month\n# \t'''\n#\n# \t# get the num of simulation days\n# \tsimulationDaysInt = util.getSimulationDaysInt()\n#\n# \t# declare the class and instance\n# \tsimulatorClass = SimulatorClass.SimulatorClass()\n#\n# \t##########file import (TucsonHourlyOuterEinvironmentData) start##########\n# \tfileName = constant.environmentData\n# \tyear, \\\n# \tmonth, \\\n# \tday, \\\n# \thour, \\\n# \thourlyHorizontalDiffuseOuterSolarIrradiance, \\\n# \thourlyHorizontalTotalOuterSolarIrradiance, \\\n# \thourlyHorizontalDirectOuterSolarIrradiance, \\\n# \thourlyHorizontalTotalBeamMeterBodyTemperature, \\\n# \thourlyAirTemperature = util.getArraysFromData(fileName, simulatorClass)\n# \t##########file import (TucsonHourlyOuterEinvironmentData) end##########\n#\n# \t# set the imported data\n# \tsimulatorClass.hourlyHorizontalDirectOuterSolarIrradiance = hourlyHorizontalDirectOuterSolarIrradiance\n# \tsimulatorClass.hourlyHorizontalDiffuseOuterSolarIrradiance = hourlyHorizontalDiffuseOuterSolarIrradiance\n# \tsimulatorClass.hourlyHorizontalTotalOuterSolarIrradiance = hourlyHorizontalTotalOuterSolarIrradiance\n# \tsimulatorClass.hourlyHorizontalTotalBeamMeterBodyTemperature = hourlyHorizontalTotalBeamMeterBodyTemperature\n# \tsimulatorClass.hourlyAirTemperature = hourlyAirTemperature\n#\n# \t# set new data which can be derived from the imported data\n# \tutil.deriveOtherArraysFromImportedData(simulatorClass)\n#\n# \t################################################################################\n# \t##########solar irradiance to OPV calculation with imported data start##########\n# \t################################################################################\n# \tif constant.ifUseOnlyRealData == True:\n#\n# \t\t# calculate with real data\n# \t\t# hourly average [W m^-2]\n# \t\tdirectSolarRadiationToOPVEastDirection, \\\n# \t\tdirectSolarRadiationToOPVWestDirection, \\\n# \t\tdiffuseSolarRadiationToOPV, \\\n# \t\talbedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(simulatorClass)\n#\n# \t\t# set the calculated data\n# \t\tsimulatorClass.setDirectSolarRadiationToOPVEastDirection(directSolarRadiationToOPVEastDirection)\n# \t\tsimulatorClass.setDirectSolarRadiationToOPVWestDirection(directSolarRadiationToOPVWestDirection)\n# \t\tsimulatorClass.setDiffuseSolarRadiationToOPV(diffuseSolarRadiationToOPV)\n# \t\tsimulatorClass.setAlbedoSolarRadiationToOPV(albedoSolarRadiationToOPV)\n#\n# \t\t# [W m^-2] per hour\n# \t\ttotalSolarRadiationToOPV = (simulatorClass.getDirectSolarRadiationToOPVEastDirection() + simulatorClass.getDirectSolarRadiationToOPVWestDirection()) / 2.0 \\\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + simulatorClass.getDiffuseSolarRadiationToOPV() + simulatorClass.getAlbedoSolarRadiationToOPV()\n#\n# \t\t###################data export start##################\n# \t\tutil.exportCSVFile(np.array(\n# \t\t\t[year, month, day, hour, simulatorClass.getDirectSolarRadiationToOPVEastDirection(), simulatorClass.getDirectSolarRadiationToOPVWestDirection(), \\\n# \t\t\t simulatorClass.getDiffuseSolarRadiationToOPV(), simulatorClass.getAlbedoSolarRadiationToOPV(), totalSolarRadiationToOPV]).T,\n# \t\t\t\t\t\t\t\t\t\t\t \"hourlyMeasuredSolarRadiations\")\n# \t\t###################data export end##################\n#\n# \t\t###################data export start##################\n# \t\tutil.exportCSVFile(np.array([year, month, day, hour, hourlyHorizontalTotalOuterSolarIrradiance, totalSolarRadiationToOPV]).T,\n# \t\t\t\t\t\t\t\t\t\t\t \"hourlyMeasuredSolarRadiationToHorizontalAndTilted\")\n# \t\t###################data export end##################\n#\n# \t\t# unit change: [W m^-2] -> [umol m^-2 s^-1] == PPFD\n# \t\tdirectPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVEastDirection)\n# \t\tdirectPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVWestDirection)\n# \t\tdiffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(diffuseSolarRadiationToOPV)\n# \t\tgroundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(albedoSolarRadiationToOPV)\n# \t\t# print\"diffusePPFDToOPV.shape:{}\".format(diffusePPFDToOPV.shape)\n#\n# \t\t# set the matrix to the object\n# \t\tsimulatorClass.setDirectPPFDToOPVEastDirection(directPPFDToOPVEastDirection)\n# \t\tsimulatorClass.setDirectPPFDToOPVWestDirection(directPPFDToOPVWestDirection)\n# \t\tsimulatorClass.setDiffusePPFDToOPV(diffusePPFDToOPV)\n# \t\tsimulatorClass.setGroundReflectedPPFDToOPV(groundReflectedPPFDToOPV)\n#\n# \t\t# unit change: hourly [umol m^-2 s^-1] -> [mol m^-2 day^-1] == daily light integral (DLI) :number of photons received in a square meter per day\n# \t\tdirectDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVEastDirection)\n# \t\tdirectDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVWestDirection)\n# \t\tdiffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(diffusePPFDToOPV)\n# \t\tgroundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(groundReflectedPPFDToOPV)\n# \t\ttotalDLIToOPV = (directDLIToOPVEastDirection + directDLIToOPVWestDirection) / 2.0 + diffuseDLIToOPV + groundReflectedDLIToOPV\n# \t\t# print \"directDLIToOPVEastDirection:{}\".format(directDLIToOPVEastDirection)\n# \t\t# print \"diffuseDLIToOPV.shape:{}\".format(diffuseDLIToOPV.shape)\n# \t\t# print \"groundReflectedDLIToOPV:{}\".format(groundReflectedDLIToOPV)\n#\n# \t\t# set the array to the object\n# \t\tsimulatorClass.setDirectDLIToOPVEastDirection(directDLIToOPVEastDirection)\n# \t\tsimulatorClass.setDirectDLIToOPVWestDirection(directDLIToOPVWestDirection)\n# \t\tsimulatorClass.setDiffuseDLIToOPV(diffuseDLIToOPV)\n# \t\tsimulatorClass.setGroundReflectedDLIToOPV(groundReflectedDLIToOPV)\n#\n# \t########################################################################################################################\n# \t################# calculate solar irradiance without real data (estimate solar irradiance) start #######################\n# \t########################################################################################################################\n# \telif constant.ifUseOnlyRealData == False:\n#\n# \t\t# activate the mode to use the formulas for estimation. This is used for branching the solar irradiance to PV module. See OPVFilm.py\n# \t\tsimulatorClass.setEstimateSolarRadiationMode(True)\n#\n# \t\t# calculate the solar radiation to the OPV film\n# \t\t# [W m^-2] per hour\n# \t\testimatedDirectSolarRadiationToOPVEastDirection, \\\n# \t\testimatedDirectSolarRadiationToOPVWestDirection, \\\n# \t\testimatedDiffuseSolarRadiationToOPV, \\\n# \t\testimatedAlbedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(simulatorClass)\n# \t\testimatedTotalSolarRadiationToOPV = (\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\testimatedDirectSolarRadiationToOPVEastDirection + estimatedDirectSolarRadiationToOPVWestDirection) / 2.0 + estimatedDiffuseSolarRadiationToOPV + estimatedAlbedoSolarRadiationToOPV\n#\n# \t\t# set the calc results\n# \t\t# [W m^-2] per hour\n# \t\tsimulatorClass.setDirectSolarRadiationToOPVEastDirection(estimatedDirectSolarRadiationToOPVEastDirection)\n# \t\tsimulatorClass.setDirectSolarRadiationToOPVWestDirection(estimatedDirectSolarRadiationToOPVWestDirection)\n# \t\tsimulatorClass.setDiffuseSolarRadiationToOPV(estimatedDiffuseSolarRadiationToOPV)\n# \t\tsimulatorClass.setAlbedoSolarRadiationToOPV(estimatedAlbedoSolarRadiationToOPV)\n#\n# \t\t# [estimated data] unit change:\n# \t\testimatedDirectPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(estimatedDirectSolarRadiationToOPVEastDirection)\n# \t\testimatedDirectPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(estimatedDirectSolarRadiationToOPVWestDirection)\n# \t\testimatedDiffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(estimatedDiffuseSolarRadiationToOPV)\n# \t\testimatedGroundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(estimatedAlbedoSolarRadiationToOPV)\n# \t\t# print(\"estimatedDirectPPFDToOPVEastDirection:{}\".format(estimatedDirectPPFDToOPVEastDirection))\n# \t\t# print(\"estimatedDirectPPFDToOPVWestDirection:{}\".format(estimatedDirectPPFDToOPVWestDirection))\n#\n# \t\t# set the variables\n# \t\tsimulatorClass.setDirectPPFDToOPVEastDirection(estimatedDirectPPFDToOPVEastDirection)\n# \t\tsimulatorClass.setDirectPPFDToOPVWestDirection(estimatedDirectPPFDToOPVWestDirection)\n# \t\tsimulatorClass.setDiffusePPFDToOPV(estimatedDiffusePPFDToOPV)\n# \t\tsimulatorClass.setGroundReflectedPPFDToOPV(estimatedGroundReflectedPPFDToOPV)\n#\n# \t\t# [estimated data] unit change:\n# \t\testimatedDirectDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDirectPPFDToOPVEastDirection)\n# \t\testimatedDirectDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDirectPPFDToOPVWestDirection)\n# \t\testimatedDiffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDiffusePPFDToOPV)\n# \t\testimatedGroundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(estimatedGroundReflectedPPFDToOPV)\n# \t\t# estimatedTotalDLIToOPV = (estimatedDirectDLIToOPVEastDirection + estimatedDirectDLIToOPVWestDirection) / 2.0 + estimatedDiffuseDLIToOPV + estimatedGroundReflectedDLIToOPV\n# \t\t# set the variables\n# \t\tsimulatorClass.setDirectDLIToOPVEastDirection(estimatedDirectDLIToOPVEastDirection)\n# \t\tsimulatorClass.setDirectDLIToOPVWestDirection(estimatedDirectDLIToOPVWestDirection)\n# \t\tsimulatorClass.setDiffuseDLIToOPV(estimatedDiffuseDLIToOPV)\n# \t\tsimulatorClass.setGroundReflectedDLIToOPV(estimatedGroundReflectedDLIToOPV)\n#\n# \t\t# deactivate the mode to the default value.\n# \t\tsimulatorClass.setEstimateSolarRadiationMode(False)\n#\n# \t\t# data export of solar irradiance\n# \t\tutil.exportCSVFile(np.array([year, month, day, hour,\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulatorClass.getDirectSolarRadiationToOPVEastDirection(),\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulatorClass.getDirectSolarRadiationToOPVWestDirection(),\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulatorClass.getDiffuseSolarRadiationToOPV(),\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulatorClass.getAlbedoSolarRadiationToOPV(),\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t estimatedTotalSolarRadiationToOPV]).T,\n# \t\t\t\t\t\t\t\t\t\t\t \"SolarIrradianceToHorizontalSurface\")\n# \t\t################# calculate solar irradiance without real data (estimate the data) end #######################\n#\n#\n# \t###############################################################################################\n# \t###################calculate the solar irradiance through multi span roof start################\n# \t###############################################################################################\n# \t# The calculated irradiance is stored to the object in this function\n# \tsimulatorDetail.getDirectSolarIrradianceThroughMultiSpanRoof(simulatorClass)\n# \t# data export\n# \tutil.exportCSVFile(\n# \t\tnp.array([year, month, day, hour, simulatorClass.integratedT_mat, simulatorClass.getHourlyDirectSolarRadiationAfterMultiSpanRoof(), ]).T,\n# \t\t\"directSolarRadiationAfterMultiSpanRoof\")\n# \t###########################################################################################\n# \t###################calculate the solar irradiance through multi span roof end##############\n# \t###########################################################################################\n#\n# \t###########################################################################################\n# \t###################calculate the solar irradiance to plants start##########################\n# \t###########################################################################################\n# \t# get/set cultivation days per harvest [days/harvest]\n# \tcultivationDaysperHarvest = constant.cultivationDaysperHarvest\n# \tsimulatorClass.setCultivationDaysperHarvest(cultivationDaysperHarvest)\n#\n# \t# get/set OPV coverage ratio [-]\n# \tOPVCoverage = constant.OPVAreaCoverageRatio\n# \tsimulatorClass.setOPVAreaCoverageRatio(OPVCoverage)\n#\n# \t# get/set OPV coverage ratio during fallow period[-]\n# \tOPVCoverageSummerPeriod = constant.OPVAreaCoverageRatioSummerPeriod\n# \tsimulatorClass.setOPVCoverageRatioSummerPeriod(OPVCoverageSummerPeriod)\n#\n# \t# get if we assume to have shading curtain\n# \thasShadingCurtain = constant.hasShadingCurtain\n# \tsimulatorClass.setIfHasShadingCurtain(hasShadingCurtain)\n#\n# \t# get the direct solar irradiance after penetrating multi span roof [W/m^2]\n# \thourlyDirectSolarRadiationAfterMultiSpanRoof = simulatorClass.getHourlyDirectSolarRadiationAfterMultiSpanRoof()\n#\n# \t# OPVAreaCoverageRatio = simulatorClass.getOPVAreaCoverageRatio()\n# \tOPVAreaCoverageRatio = constant.OPVAreaCoverageRatio\n# \thasShadingCurtain = simulatorClass.getIfHasShadingCurtain()\n# \t# ShadingCurtainDeployPPFD = simulatorClass.getShadingCurtainDeployPPFD()\n# \tOPVPARTransmittance = constant.OPVPARTransmittance\n#\n#\n# \t# make the list of OPV coverage ratio at each hour changing during summer\n# \tOPVAreaCoverageRatioChangingInSummer = OPVFilm.getDifferentOPVCoverageRatioInSummerPeriod(OPVAreaCoverageRatio, simulatorClass)\n#\n# \t# ############command to print out all array data\n# \t# np.set_printoptions(threshold=np.inf)\n# \t# print(\"OPVAreaCoverageRatioChangingInSummer:{}\".format(OPVAreaCoverageRatioChangingInSummer))\n# \t# np.set_printoptions(threshold=1000)\n# \t# ############\n#\n# \t# consider the transmission ratio of OPV film\n# \thourlyDirectSolarRadiationAfterOPVAndRoof = hourlyDirectSolarRadiationAfterMultiSpanRoof * (1 - OPVAreaCoverageRatioChangingInSummer) \\\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t+ hourlyDirectSolarRadiationAfterMultiSpanRoof * OPVAreaCoverageRatioChangingInSummer * OPVPARTransmittance\n# \t# print \"OPVAreaCoverageRatio:{}, HourlyInnerLightIntensityPPFDThroughOPV:{}\".format(OPVAreaCoverageRatio, HourlyInnerLightIntensityPPFDThroughOPV)\n#\n# \t# consider the light reduction by greenhouse inner structures and equipments like pipes, poles and gutters\n# \thourlyDirectSolarRadiationAfterInnerStructure = (1 - constant.GreenhouseShadeProportionByInnerStructures) * hourlyDirectSolarRadiationAfterOPVAndRoof\n#\n#\n# \ttransmittanceThroughShadingCurtainChangingEachMonth = getHourlyShadingCurtainDeploymentPatternChangingEachMonthMain(simulatorClass, hourlyDirectSolarRadiationAfterInnerStructure)\n#\n# \treturn transmittanceThroughShadingCurtainChangingEachMonth\n\n# def getHourlyShadingCurtainDeploymentPatternChangingEachMonthMain(simulatorClass, hourlyDirectSolarRadiationAfterInnerStructure):\ndef getHourlyShadingCurtainDeploymentPatternChangingEachMonthMain(simulatorClass):\n\t'''\n\tcalculate the shading curtain deployement pattern which changes each month.\n\tThe deployment is judges by comparing the average DLI and the optimal DLI that is defined at CropElectricityYeildSimulatorConstant\n\n\t'''\n\n\tdirectSolarIrradianceBeforeShadingCurtain = simulatorClass.directSolarIrradianceBeforeShadingCurtain\n\tdiffuseSolarIrradianceBeforeShadingCurtain = simulatorClass.diffuseSolarIrradianceBeforeShadingCurtain\n\n\ttotalSolarIrradianceBeforeShadingCurtain = directSolarIrradianceBeforeShadingCurtain + diffuseSolarIrradianceBeforeShadingCurtain\n\t# print(\"totalSolarIrradianceBeforeShadingCurtain:{}\".format(totalSolarIrradianceBeforeShadingCurtain))\n\n\t################### calc shading deployment pattern start ###########################\n\tif constant.IsShadingCurtainDeployOnlyDayTime == True and constant.IsDifferentShadingCurtainDeployTimeEachMonth == True:\n\n\t\t# 1 = no shading curatin, the transmittance of shading curtain = deploey curtain\n\t\ttransmittanceThroughShadingCurtainChangingEachMonth = np.zeros(len(totalSolarIrradianceBeforeShadingCurtain))\n\n\t\t#############\n\t\t# 1: deploy shading curtain, 0 = not deploy\n\t\tshadingHours = {\n\t\t\t# \"0\": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# ,\"12\": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# ,\"12-13\": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"11-13\":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"11-14\":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"10-14\":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"10-15\":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"9-15\": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"9-16\": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"8-16\": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"8-17\": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"7-17\": [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n\t\t\t# , \"7-18\": [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\n\t\t\t# , \"6-18\": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\n\t\t\t# , \"6-19\": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n\t\t\t# , \"5-19\": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n\t\t\t# , \"5-20\": [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n\t\t\t# , \"4-20\": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n\t\t\t# , \"4-21\": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n\t\t\t# , \"3-21\": [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n\t\t\t# , \"3-22\": [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n\t\t\t# , \"2-22\": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n\t\t\t# , \"2-23\": [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t\t# , \"1-23\": [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t\t# , \"0-23\": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t\t0: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 1: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 2: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 4: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 5: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 6: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 7: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 8: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 9: [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]\n\t\t\t, 10: [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n\t\t\t, 11: [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n\t\t\t, 12: [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\n\t\t\t, 13: [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]\n\t\t\t, 14: [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n\t\t\t, 15: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n\t\t\t, 16: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n\t\t\t, 17: [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]\n\t\t\t, 18: [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n\t\t\t, 19: [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\n\t\t\t, 20: [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n\t\t\t, 21: [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\n\t\t\t, 22: [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t\t, 23: [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t\t, 24: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n\t\t}\n\t\t# print(\"len(shadingHours):{}\".format(len(shadingHours)))\n\n\t\t##############set the initial values start##############\n\t\t# get the num of simulation months, rounding up.\n\t\t# e.g. when the simulation period is 1st Jan to 15th Jan, it is 1.\n\t\t# e.g. when the simulation period is 1st 16th Nov to 1st Dec, it is 2.\n\t\t# simulationMonths = util.getSimulationMonthsInt()\n\n\t\t# unit: hour\n\t\tsimulationHours = util.getSimulationDaysInt() * constant.hourperDay\n\t\t# print(\"simulationHours:{}\".format(simulationHours))\n\n\t\tcurrnetDate = util.getStartDateDateType()\n\t\tcurrentYear = util.getStartDateDateType().year\n\t\tcurrentMonth = util.getStartDateDateType().month\n\t\tcurrentDay = util.getStartDateDateType().day\n\n\t\t# SimulatedMonths = 1\n\t\thour = 0\n\t\t##############set the initial values end##############\n\n\t\t# total sollar irradiance per day which does not cause tipburn. unit conversion: DLI(mol m-2 day) -> W m-2\n\t\t# totalSolarIrradiancePerDayNoTipburn = constant.DLIforTipBurn * 1000000.0 / constant.minuteperHour / constant.secondperMinute / constant.wattToPPFDConversionRatio\n\n\t\t# loop each hour\n\t\thour = 0\n\t\twhile hour < simulationHours:\n\t\t\t# print(\"hour:{}\".format(hour))\n\n\t\t\t# get the number of days each month\n\t\t\t_, currentMonthDays = calendar.monthrange(currnetDate.year, currnetDate.month)\n\t\t\t# print(\"currentMonthDays:{}\".format(currentMonthDays))\n\n\t\t\tdaysOfEachMonth = (datetime.date(currentYear, currentMonth, currentMonthDays) - currnetDate).days + 1\n\t\t\t# print(\"daysOfEachMonth:{}\".format(daysOfEachMonth))\n\n\t\t\t# loop for each shading curtain deployment pattern\n\t\t\tfor i in range(0, len(shadingHours)):\n\n\t\t\t\t# if shading curtain: constant.shadingTransmittanceRatio, if no shading: 1.0 transmittance\n\t\t\t\tTransmittanceThroughShadingCurtain = np.array([constant.shadingTransmittanceRatio if j == 1 else 1.0 for j in shadingHours[i]])\n\t\t\t\t# print(\"TransmittanceThroughShadingCurtain:{}\".format(TransmittanceThroughShadingCurtain))\n\n\t\t\t\t# extend the shading curatin pattern for whole month\n\t\t\t\ttransmittanceThroughShadingCurtainWholeMonth = []\n\t\t\t\t[transmittanceThroughShadingCurtainWholeMonth .extend(TransmittanceThroughShadingCurtain) for k in range(0, daysOfEachMonth)]\n\t\t\t\t# print(\"len(transmittanceThroughShadingCurtainWholeMonth ):{}\".format(len(transmittanceThroughShadingCurtainWholeMonth )))\n\n\t\t\t\t# get the solar irradiance through shading curtain\n\t\t\t\thourlyDirectSolarRadiationAfterShadingCurtain = totalSolarIrradianceBeforeShadingCurtain[hour : hour + daysOfEachMonth * constant.hourperDay] * transmittanceThroughShadingCurtainWholeMonth\n\t\t\t\t# print(\"hourlyDirectSolarRadiationAfterShadingCurtain:{}\".format(hourlyDirectSolarRadiationAfterShadingCurtain))\n\t\t\t\t# print(\"len(hourlyDirectSolarRadiationAfterShadingCurtain):{}\".format(len(hourlyDirectSolarRadiationAfterShadingCurtain)))\n\t\t\t\t# print(\"sum(hourlyDirectSolarRadiationAfterShadingCurtain):{}\".format(sum(hourlyDirectSolarRadiationAfterShadingCurtain)))\n\n\t\t\t\t# convert the unit from W m-2 to DLI (mol m-2 day-1)\n\t\t\t\tDLIAfterShadingCurtain = sum(hourlyDirectSolarRadiationAfterShadingCurtain) / daysOfEachMonth * constant.wattToPPFDConversionRatio * constant.secondperMinute * constant.minuteperHour / 1000000.0\n\t\t\t\t# print(\"DLIAfterShadingCurtain:{}\".format(DLIAfterShadingCurtain))\n\n\t\t\t\t# print(\"i:{}\".format(i))\n\t\t\t\t# if the average DLI is less than the optimal DLI\n\t\t\t\tif DLIAfterShadingCurtain <= constant.DLIforTipBurn:\n\t\t\t\t\t# store the transmittance which shading curatin deployed\n\t\t\t\t\ttransmittanceThroughShadingCurtainChangingEachMonth[hour : hour + daysOfEachMonth * constant.hourperDay] = transmittanceThroughShadingCurtainWholeMonth\n\t\t\t\t\t# increment hour\n\t\t\t\t\thour += daysOfEachMonth * constant.hourperDay\n\t\t\t\t\t# variable update\n\t\t\t\t\t# print(\"before currnetDate:{}\".format(currnetDate))\n\t\t\t\t\tcurrnetDate = currnetDate + datetime.timedelta(days = daysOfEachMonth)\n\t\t\t\t\t# print(\"after currnetDate:{}\".format(currnetDate))\n\t\t\t\t\tcurrentYear = currnetDate.year\n\t\t\t\t\tcurrentMonth = currnetDate.month\n\n\t\t\t\t\t# move to the next month. break the for loop\n\t\t\t\t\tbreak\n\n\t\t\t\t# if the average solar irradiance does not become below the optimal DLI, then store the solar irradiance for shadingHours[24]\n\t\t\t\telif i == 24:\n\t\t\t\t\t# store the transmittance which shading curatin deployed\n\t\t\t\t\ttransmittanceThroughShadingCurtainChangingEachMonth[hour : hour + daysOfEachMonth * constant.hourperDay] = transmittanceThroughShadingCurtainWholeMonth\n\t\t\t\t\t# increment hour\n\t\t\t\t\thour += daysOfEachMonth * constant.hourperDay\n\t\t\t\t\t# variable update\n\t\t\t\t\t# print(\"before currnetDate:{}\".format(currnetDate))\n\t\t\t\t\tcurrnetDate = currnetDate + datetime.timedelta(days = daysOfEachMonth)\n\t\t\t\t\t# print(\"after currnetDate:{}\".format(currnetDate))\n\t\t\t\t\tcurrentYear = currnetDate.year\n\t\t\t\t\tcurrentMonth = currnetDate.month\n\n\t\t\t\t\t# move to the next month. break the for loop\n\t\t\t\t\tbreak\n\n\t\t# store the data\n\t\tsimulatorClass.transmittanceThroughShadingCurtainChangingEachMonth = transmittanceThroughShadingCurtainChangingEachMonth\n\n\t\treturn transmittanceThroughShadingCurtainChangingEachMonth\n\n\telse:\n\t\tprint(\"error: please let constant.IsShadingCurtainDeployOnlyDayTime == True and constant.IsDifferentShadingCurtainDeployTimeEachMonth == True\")\n\t\t####################################################################################################\n\t\t# Stop execution here...\n\t\tsys.exit()\n\t\t# Move the above line to different parts of the assignment as you implement more of the functionality.\n\t\t####################################################################################################\n\n\n# print \"hourlyInnerLightIntensityPPFDThroughInnerStructure:{}\".format(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n\n\n\n# if __name__ == '__main__':\n# \ttransmittanceThroughShadingCurtainChangingEachMonth = getHourlyShadingCurtainDeploymentPatternChangingEachMonthPrep()\n# \tprint(\"transmittanceThroughShadingCurtainChangingEachMonth:{}\".format(transmittanceThroughShadingCurtainChangingEachMonth))\n#\n# \t# export the data\n\n\n\n" }, { "alpha_fraction": 0.6478849053382874, "alphanum_fraction": 0.654906690120697, "avg_line_length": 44.26356506347656, "blob_id": "dd56150e905607bcef116a1da11ca22cc14c9b81", "content_id": "b2fda2a2d5888e053e3d2ca1e4c113e81475197e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5839, "license_type": "permissive", "max_line_length": 177, "num_lines": 129, "path": "/PlantGrowthModelValidation.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 21 April 2018\n#######################################################\n\n# ####################################################################################################\n# np.set_printoptions(threshold=np.inf)\n# print \"hourlySolarIncidenceAngle:{}\".format(np.degrees(hourlySolarIncidenceAngle))\n# np.set_printoptions(threshold=1000)\n# ####################################################################################################\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n##########import package files##########\nimport os as os\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util\nimport datetime\nimport Lettuce\nimport PlantGrowthModelE_J_VanHentenConstant as VanHentenConstant\nimport SimulatorClass\nimport PlantGrowthModelE_J_VanHenten\n\n\ndef importPlantGrowthModelValidationData(fileName, simulatorClass):\n\n\t# fileData = Util.readData(fileName, \"\", skip_header=1,d='\\t')\n\tfileData = Util.readData(fileName, \"\", skip_header=1,d=',')\n\tprint(\"fileData.shape:{}\".format(fileData.shape))\n\t# ####################################################################################################\n\t# # np.set_printoptions(threshold=np.inf)\n\t# np.set_printoptions(threshold=np.inf)\n\t# print(\"filedata:{}\".format(fileData))\n\t# np.set_printoptions(threshold=1000)\n\t# ####################################################################################################\n\n\tyear = np.zeros(fileData.shape[0], dtype=int)\n\tmonth = np.zeros(fileData.shape[0], dtype=int)\n\tday = np.zeros(fileData.shape[0], dtype=int)\n\thour = np.zeros(fileData.shape[0], dtype=int)\n\tGHSolarIrradiance = np.zeros(fileData.shape[0])\n\tGHAirTemperature = np.zeros(fileData.shape[0])\n\n\tfor i in range(0, fileData.shape[0]):\n\t\tyear[i] = int(fileData[i][0])\n\t\tmonth[i] = int(fileData[i][1])\n\t\tday[i] = int(fileData[i][2])\n\t\thour[i] = int(fileData[i][3])\n\t\tGHSolarIrradiance[i] = fileData[i][4]\n\t\tGHAirTemperature[i] = fileData[i][5]\n\n\t# print(\"year[0]:{}\".format(year[0]))\n\t# set the imported values to the object\n\tsimulatorClass.setYear(year)\n\tsimulatorClass.setMonth(month)\n\tsimulatorClass.setDay(day)\n\tsimulatorClass.setHour(hour)\n\t# simulatorClass.GHSolarIrradianceValidationData = GHSolarIrradiance\n\tsimulatorClass.directSolarIrradianceToPlants = GHSolarIrradiance/2.0\n\tsimulatorClass.diffuseSolarIrradianceToPlants = GHSolarIrradiance/2.0\n\n\tsimulatorClass.GHAirTemperatureValidationData = GHAirTemperature\n\n\treturn simulatorClass\n\n########################################################################################################################################\n# Note: To run this file and validation, please change constant.SimulationStartDate and constant.SimulationEndDate according to the date of imported data.\n########################################################################################################################################\n\n# data import\n# get the num of simulation days\nsimulationDaysInt = Util.getSimulationDaysInt()\n\n# declare the class and instance\nsimulatorClass = SimulatorClass.SimulatorClass()\n\n# set spececific numbers to the instance\n# simulatorDetail.setSimulationSpecifications(simulatorClass)\n\n##########file import (TucsonHourlyOuterEinvironmentData) start##########\nfileName = constant.environmentData\nyear, \\\nmonth, \\\nday, \\\nhour, \\\nhourlyHorizontalDiffuseOuterSolarIrradiance, \\\nhourlyHorizontalTotalOuterSolarIrradiance, \\\nhourlyHorizontalDirectOuterSolarIrradiance, \\\nhourlyHorizontalTotalBeamMeterBodyTemperature, \\\nhourlyAirTemperature = Util.getArraysFromData(fileName, simulatorClass)\n##########file import (TucsonHourlyOuterEinvironmentData) end##########\n\n# # set the imported data\n# simulatorClass.hourlyHorizontalDirectOuterSolarIrradiance = hourlyHorizontalDirectOuterSolarIrradiance\n# simulatorClass.hourlyHorizontalDiffuseOuterSolarIrradiance = hourlyHorizontalDiffuseOuterSolarIrradiance\n# simulatorClass.hourlyHorizontalTotalOuterSolarIrradiance = hourlyHorizontalTotalOuterSolarIrradiance\n# simulatorClass.hourlyHorizontalTotalBeamMeterBodyTemperature = hourlyHorizontalTotalBeamMeterBodyTemperature\n# simulatorClass.hourlyAirTemperature = hourlyAirTemperature\n\n\n# import the weather data with which you want to validate the plant growth model. This data overwrite the originally imported data above.\nfileName = constant.plantGrowthModelValidationData\nimportPlantGrowthModelValidationData(fileName, simulatorClass)\n\n# set new data which can be derived from the imported data\nUtil.deriveOtherArraysFromImportedData(simulatorClass)\n\nFWPerHead, WFreshWeightIncrease, WAccumulatedFreshWeightIncrease, WHarvestedFreshWeight = PlantGrowthModelE_J_VanHenten.calcUnitDailyFreshWeightE_J_VanHenten1994(simulatorClass)\n\n# data export\nUtil.exportCSVFile(np.array([simulatorClass.getYear(),simulatorClass.getMonth(), simulatorClass.getDay(), simulatorClass.getHour(), \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t FWPerHead, WFreshWeightIncrease, WHarvestedFreshWeight]).T, \"plantGrowthModelValidationdata\")\n\n# print(\"day:{}\".format(day))\n# print(\"simulatorClass.getDay():{}\".format(simulatorClass.getDay()))\n# print(\"simulatorClass.getDay().shape:{}\".format(simulatorClass.getDay().shape))\n# print(\"FWPerHead.shape:{}\".format(FWPerHead.shape))\n\n# Util.exportCSVFile(np.array([year,month, day, hour, \\\n# \t\t\t\t\t\t\t\t\t\t\t\t\t\t FWPerHead, WFreshWeightIncrease, WHarvestedFreshWeight]).T, \"plantGrowthModelValidationdata\")\n" }, { "alpha_fraction": 0.6455585956573486, "alphanum_fraction": 0.6507675051689148, "avg_line_length": 73.15560150146484, "blob_id": "8d76ac59992450329dc0246a7b5ed24247a8503d", "content_id": "ffd6e6e0bec3d405a95a72f3702308cf7b7cc633", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75832, "license_type": "permissive", "max_line_length": 492, "num_lines": 1009, "path": "/CropElectricityYeildSimulator1.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n#############command to print out all array data\r\n# np.set_printoptions(threshold=np.inf)\r\n# print (\"directSolarRadiationToOPVWestDirection:{}\".format(directSolarRadiationToOPVWestDirection))\r\n# np.set_printoptions(threshold=1000)\r\n#############\r\n\r\n# ####################################################################################################\r\n# # Stop execution here...\r\n# sys.exit()\r\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\r\n# ####################################################################################################\r\n\r\n##########import package files##########\r\nimport datetime\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nimport math\r\nimport CropElectricityYeildSimulatorConstant as constant\r\nimport Util as util\r\nimport OPVFilm\r\n#import Lettuce\r\nimport CropElectricityYeildSimulatorDetail as simulatorDetail\r\nimport SimulatorClass\r\n#######################################################\r\n\r\n\r\ndef simulateCropElectricityYieldProfit1():\r\n '''\r\n version 1.0\r\n simulator of crop and electricity yield and the total profit\r\n '''\r\n\r\n # print (\"start modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\r\n\r\n # get the num of simulation days\r\n simulationDaysInt = util.getSimulationDaysInt()\r\n\r\n # declare the class and instance\r\n simulatorClass = SimulatorClass.SimulatorClass()\r\n\r\n # set spececific numbers to the instance\r\n # simulatorDetail.setSimulationSpecifications(simulatorClass)\r\n\r\n ##########file import (TucsonHourlyOuterEinvironmentData) start##########\r\n fileName = constant.environmentData\r\n year, \\\r\n month, \\\r\n day, \\\r\n hour, \\\r\n hourlyHorizontalDiffuseOuterSolarIrradiance, \\\r\n hourlyHorizontalTotalOuterSolarIrradiance, \\\r\n hourlyHorizontalDirectOuterSolarIrradiance, \\\r\n hourlyHorizontalTotalBeamMeterBodyTemperature, \\\r\n hourlyAirTemperature = util.getArraysFromData(fileName, simulatorClass)\r\n ##########file import (TucsonHourlyOuterEinvironmentData) end##########\r\n\r\n # set the imported data\r\n simulatorClass.hourlyHorizontalDirectOuterSolarIrradiance = hourlyHorizontalDirectOuterSolarIrradiance\r\n simulatorClass.hourlyHorizontalDiffuseOuterSolarIrradiance = hourlyHorizontalDiffuseOuterSolarIrradiance\r\n simulatorClass.hourlyHorizontalTotalOuterSolarIrradiance = hourlyHorizontalTotalOuterSolarIrradiance\r\n simulatorClass.hourlyHorizontalTotalBeamMeterBodyTemperature = hourlyHorizontalTotalBeamMeterBodyTemperature\r\n simulatorClass.hourlyAirTemperature = hourlyAirTemperature\r\n\r\n # ################## plot the imported direct and diffuse solar radiation start######################\r\n # Title = \"imported (measured horizontal) direct and diffuse solar radiation\"\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"total Solar irradiance [W m^-2]\"\r\n # util.plotTwoData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\r\n # simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation(), simulatorClass.getImportedHourlyHorizontalDiffuseSolarRadiation() ,Title, xAxisLabel, yAxisLabel, \\\r\n # \"hourlyHorizontalDirectOuterSolarIrradiance\", \"hourlyHorizontalDiffuseOuterSolarIrradiance\")\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the imported direct and diffuse solar radiation end######################\r\n\r\n # print (\"hourlyHorizontalDirectOuterSolarIrradiance:{}\".format(hourlyHorizontalDirectOuterSolarIrradiance))\r\n # print (\"max(simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation()):{}\".format(max(simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation())))\r\n\r\n # set new data which can be derived from the imported data\r\n util.deriveOtherArraysFromImportedData(simulatorClass)\r\n\r\n ################################################################################\r\n ##########solar irradiance to OPV calculation with imported data start##########\r\n ################################################################################\r\n if constant.ifUseOnlyRealData == True:\r\n\r\n # calculate with real data\r\n # hourly average [W m^-2]\r\n directSolarRadiationToOPVEastDirection, \\\r\n directSolarRadiationToOPVWestDirection, \\\r\n diffuseSolarRadiationToOPV, \\\r\n albedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(simulatorClass)\r\n\r\n # set the calculated data\r\n simulatorClass.setDirectSolarRadiationToOPVEastDirection(directSolarRadiationToOPVEastDirection)\r\n simulatorClass.setDirectSolarRadiationToOPVWestDirection(directSolarRadiationToOPVWestDirection)\r\n simulatorClass.setDiffuseSolarRadiationToOPV(diffuseSolarRadiationToOPV)\r\n simulatorClass.setAlbedoSolarRadiationToOPV(albedoSolarRadiationToOPV)\r\n\r\n # [W m^-2] per hour\r\n totalSolarRadiationToOPV = (simulatorClass.getDirectSolarRadiationToOPVEastDirection() + simulatorClass.getDirectSolarRadiationToOPVWestDirection() ) / 2.0 \\\r\n + simulatorClass.getDiffuseSolarRadiationToOPV() + simulatorClass.getAlbedoSolarRadiationToOPV()\r\n\r\n # if constant.ifExportFigures:\r\n # ##################plot the various real light intensity to OPV film start######################\r\n # Title = \"various real light intensity to OPV film\"\r\n # plotDataSet = np.array([simulatorClass.getDirectSolarRadiationToOPVEastDirection(), simulatorClass.getDirectSolarRadiationToOPVWestDirection(), \\\r\n # simulatorClass.getDiffuseSolarRadiationToOPV(), simulatorClass.getAlbedoSolarRadiationToOPV()])\r\n # labelList = np.array([\"direct To East Direction\", \"direct To West Direction\", \"diffuse\", \"albedo\"])\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"[W m^-2]\"\r\n # util.plotMultipleData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), plotDataSet, labelList, Title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ##################plot the various real light intensity to OPV film end######################\r\n\r\n # ##################plot the difference of total solar radiation with imported data (radiation to horizontal surface) and simulated data (radiation to tilted surface start######################\r\n # hourlyHorizontalTotalOuterSolarIrradiance = simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation() + simulatorClass.getImportedHourlyHorizontalDiffuseSolarRadiation()\r\n # Title = \"total solar radiation to OPV with measured data\"\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"total Solar irradiance [W m^-2]\"\r\n # util.plotTwoData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\r\n # hourlyHorizontalTotalOuterSolarIrradiance, totalSolarRadiationToOPV, Title, xAxisLabel, yAxisLabel, \"measured horizontal\", \"tilted with measured\")\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ##################plot the difference of total solar radiation with imported data (radiation to horizontal surface) and simulated data (radiation to tilted surface end######################\r\n\r\n\r\n ###################data export start##################\r\n util.exportCSVFile(np.array([year, month, day, hour,\r\n hourlyHorizontalDirectOuterSolarIrradiance,\r\n hourlyHorizontalDiffuseOuterSolarIrradiance,\r\n hourlyHorizontalTotalOuterSolarIrradiance,\r\n simulatorClass.getDirectSolarRadiationToOPVEastDirection(),\r\n simulatorClass.getDirectSolarRadiationToOPVWestDirection(),\r\n simulatorClass.getDiffuseSolarRadiationToOPV(),\r\n simulatorClass.getAlbedoSolarRadiationToOPV(),\r\n totalSolarRadiationToOPV]).T,\r\n \"hourlyMeasuredSolarRadiations\")\r\n ###################data export end##################\r\n\r\n # unit change: [W m^-2] -> [umol m^-2 s^-1] == PPFD\r\n directPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVEastDirection)\r\n directPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(directSolarRadiationToOPVWestDirection)\r\n diffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(diffuseSolarRadiationToOPV)\r\n groundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(albedoSolarRadiationToOPV)\r\n # print\"diffusePPFDToOPV.shape:{}\".format(diffusePPFDToOPV.shape)\r\n\r\n # set the matrix to the object\r\n simulatorClass.setDirectPPFDToOPVEastDirection(directPPFDToOPVEastDirection)\r\n simulatorClass.setDirectPPFDToOPVWestDirection(directPPFDToOPVWestDirection)\r\n simulatorClass.setDiffusePPFDToOPV(diffusePPFDToOPV)\r\n simulatorClass.setGroundReflectedPPFDToOPV(groundReflectedPPFDToOPV)\r\n\r\n # unit change: hourly [umol m^-2 s^-1] -> [mol m^-2 day^-1] == daily light integral (DLI) :number of photons received in a square meter per day\r\n directDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVEastDirection)\r\n directDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToOPVWestDirection)\r\n diffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(diffusePPFDToOPV)\r\n groundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(groundReflectedPPFDToOPV)\r\n totalDLIToOPV = (directDLIToOPVEastDirection+directDLIToOPVWestDirection) / 2.0 + diffuseDLIToOPV + groundReflectedDLIToOPV\r\n # print \"directDLIToOPVEastDirection:{}\".format(directDLIToOPVEastDirection)\r\n # print \"diffuseDLIToOPV.shape:{}\".format(diffuseDLIToOPV.shape)\r\n # print \"groundReflectedDLIToOPV:{}\".format(groundReflectedDLIToOPV)\r\n\r\n # set the array to the object\r\n simulatorClass.setDirectDLIToOPVEastDirection(directDLIToOPVEastDirection)\r\n simulatorClass.setDirectDLIToOPVWestDirection(directDLIToOPVWestDirection)\r\n simulatorClass.setDiffuseDLIToOPV(diffuseDLIToOPV)\r\n simulatorClass.setGroundReflectedDLIToOPV(groundReflectedDLIToOPV)\r\n\r\n # If necessary, get the solar radiation data only on 15th day\r\n if util.getSimulationDaysInt() > 31 and constant.ifGet15thDayData:\r\n # measured horizontal data\r\n hourlyMeasuredHorizontalTotalSolarRadiationOnly15th = util.getOnly15thDay(hourlyHorizontalTotalOuterSolarIrradiance)\r\n # measured tilted value\r\n hourlyMeasuredTiltedTotalSolarRadiationOnly15th = util.getOnly15thDay(totalSolarRadiationToOPV)\r\n\r\n yearOnly15th = util.getOnly15thDay(year)\r\n monthOnly15th = util.getOnly15thDay(month)\r\n dayOnly15th = util.getOnly15thDay(day)\r\n hourOnly15th = util.getOnly15thDay(hour)\r\n # data export\r\n util.exportCSVFile(np.array([yearOnly15th, monthOnly15th, dayOnly15th, hourOnly15th, hourlyMeasuredHorizontalTotalSolarRadiationOnly15th, hourlyMeasuredTiltedTotalSolarRadiationOnly15th]).T, \"hourlyMeasuredTotalSolarRadiationOnly15th\")\r\n\r\n ########################################################################################################################\r\n ################# calculate solar irradiance without real data (estimate solar irradiance) start #######################\r\n ########################################################################################################################\r\n elif constant.ifUseOnlyRealData == False:\r\n\r\n # activate the mode to use the formulas for estimation. This is used for branching the solar irradiance to PV module. See OPVFilm.py\r\n simulatorClass.setEstimateSolarRadiationMode(True)\r\n\r\n # calculate the solar radiation to the OPV film\r\n # [W m^-2] per hour\r\n estimatedDirectSolarRadiationToOPVEastDirection, \\\r\n estimatedDirectSolarRadiationToOPVWestDirection, \\\r\n estimatedDiffuseSolarRadiationToOPV, \\\r\n estimatedAlbedoSolarRadiationToOPV = simulatorDetail.calcOPVmoduleSolarIrradianceGHRoof(simulatorClass)\r\n estimatedTotalSolarRadiationToOPV = (estimatedDirectSolarRadiationToOPVEastDirection + estimatedDirectSolarRadiationToOPVWestDirection) / 2.0 + estimatedDiffuseSolarRadiationToOPV + estimatedAlbedoSolarRadiationToOPV\r\n\r\n # set the calc results\r\n # [W m^-2] per hour\r\n simulatorClass.setDirectSolarRadiationToOPVEastDirection(estimatedDirectSolarRadiationToOPVEastDirection)\r\n simulatorClass.setDirectSolarRadiationToOPVWestDirection(estimatedDirectSolarRadiationToOPVWestDirection)\r\n simulatorClass.setDiffuseSolarRadiationToOPV(estimatedDiffuseSolarRadiationToOPV)\r\n simulatorClass.setAlbedoSolarRadiationToOPV(estimatedAlbedoSolarRadiationToOPV)\r\n # # modified not to use the following variables\r\n # simulatorClass.setEstimatedDirectSolarRadiationToOPVEastDirection(estimatedDirectSolarRadiationToOPVEastDirection)\r\n # simulatorClass.setEstimatedDirectSolarRadiationToOPVWestDirection(estimatedDirectSolarRadiationToOPVWestDirection)\r\n # simulatorClass.setEstimatedDiffuseSolarRadiationToOPV(estimatedDiffuseSolarRadiationToOPV)\r\n # simulatorClass.setEstimatedAlbedoSolarRadiationToOPV(estimatedAlbedoSolarRadiationToOPV)\r\n\r\n # np.set_printoptions(threshold=np.inf)\r\n # print (\"estimatedDirectSolarRadiationToOPVEastDirection[W m^-2]:{}\".format(estimatedDirectSolarRadiationToOPVEastDirection))\r\n # print (\"estimatedDirectSolarRadiationToOPVWestDirection[W m^-2]:{}\".format(estimatedDirectSolarRadiationToOPVWestDirection))\r\n # print (\"estimatedDiffuseSolarRadiationToOPV[W m^-2]:{}\".format(estimatedDiffuseSolarRadiationToOPV))\r\n # print (\"estimatedAlbedoSolarRadiationToOPV[W m^-2]:{}\".format(estimatedAlbedoSolarRadiationToOPV))\r\n # np.set_printoptions(threshold=1000)\r\n\r\n # if constant.ifExportFigures:\r\n # ################## plot the distribution of estimated various DLI to OPV film start######################\r\n # Title = \"estimated various light intensity to tilted OPV film\"\r\n # plotDataSet = np.array([estimatedDirectSolarRadiationToOPVEastDirection, estimatedDirectSolarRadiationToOPVWestDirection, estimatedDiffuseSolarRadiationToOPV, estimatedAlbedoSolarRadiationToOPV])\r\n # labelList = np.array([\"Direct To East Direction\", \"Direct To West Direction\", \"Diffuse\", \"Albedo\"])\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"[W m^-2]\"\r\n # util.plotMultipleData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), plotDataSet, labelList, Title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the distribution of estimated various DLI to OPV film end######################\r\n\r\n # ################## plot the imported horizontal data vs estimated data (the tilt should be zeor) ######################\r\n # title = \"measured and estimated horizontal data (tilt should be zero)\"\r\n # xAxisLabel = \"hourly measured horizontal total outer solar radiation [W m^-2]\" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"hourly estimated horizontal Total outer solar radiation [W m^-2]\"\r\n # util.plotData(hourlyHorizontalTotalOuterSolarIrradiance, estimatedTotalSolarRadiationToOPV, title, xAxisLabel, yAxisLabel, None, True, 0.0, 1.0)\r\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################### plot the electricity yield per area with given OPV film end######################\r\n\r\n # [estimated data] unit change:\r\n estimatedDirectPPFDToOPVEastDirection = util.convertFromWattperSecSquareMeterToPPFD(estimatedDirectSolarRadiationToOPVEastDirection)\r\n estimatedDirectPPFDToOPVWestDirection = util.convertFromWattperSecSquareMeterToPPFD(estimatedDirectSolarRadiationToOPVWestDirection)\r\n estimatedDiffusePPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(estimatedDiffuseSolarRadiationToOPV)\r\n estimatedGroundReflectedPPFDToOPV = util.convertFromWattperSecSquareMeterToPPFD(estimatedAlbedoSolarRadiationToOPV)\r\n # print(\"estimatedDirectPPFDToOPVEastDirection:{}\".format(estimatedDirectPPFDToOPVEastDirection))\r\n # print(\"estimatedDirectPPFDToOPVWestDirection:{}\".format(estimatedDirectPPFDToOPVWestDirection))\r\n\r\n # set the variables\r\n simulatorClass.setDirectPPFDToOPVEastDirection(estimatedDirectPPFDToOPVEastDirection)\r\n simulatorClass.setDirectPPFDToOPVWestDirection(estimatedDirectPPFDToOPVWestDirection)\r\n simulatorClass.setDiffusePPFDToOPV(estimatedDiffusePPFDToOPV)\r\n simulatorClass.setGroundReflectedPPFDToOPV(estimatedGroundReflectedPPFDToOPV)\r\n # # modified not to use the following variables\r\n # simulatorClass.setEstimatedDirectPPFDToOPVEastDirection(estimatedDirectPPFDToOPVEastDirection)\r\n # simulatorClass.setEstimatedDirectPPFDToOPVWestDirection(estimatedDirectPPFDToOPVWestDirection)\r\n # simulatorClass.setEstimatedDiffusePPFDToOPV(estimatedDiffusePPFDToOPV)\r\n # simulatorClass.setEstimatedGroundReflectedPPFDToOPV(estimatedGroundReflectedPPFDToOPV)\r\n\r\n\r\n # [estimated data] unit change:\r\n estimatedDirectDLIToOPVEastDirection = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDirectPPFDToOPVEastDirection)\r\n estimatedDirectDLIToOPVWestDirection = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDirectPPFDToOPVWestDirection)\r\n estimatedDiffuseDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(estimatedDiffusePPFDToOPV)\r\n estimatedGroundReflectedDLIToOPV = util.convertFromHourlyPPFDWholeDayToDLI(estimatedGroundReflectedPPFDToOPV)\r\n # estimatedTotalDLIToOPV = (estimatedDirectDLIToOPVEastDirection + estimatedDirectDLIToOPVWestDirection) / 2.0 + estimatedDiffuseDLIToOPV + estimatedGroundReflectedDLIToOPV\r\n # set the variables\r\n # # modified not to use the following variables\r\n # simulatorClass.setEstimatedDirectDLIToOPVEastDirection(estimatedDirectDLIToOPVEastDirection)\r\n # simulatorClass.setEstimatedDirectDLIToOPVWestDirection(estimatedDirectDLIToOPVWestDirection)\r\n # simulatorClass.setEstimatedDiffuseDLIToOPV(estimatedDiffuseDLIToOPV)\r\n # simulatorClass.setEestimatedGroundReflectedDLIToOPV(estimatedGroundReflectedDLIToOPV)\r\n simulatorClass.setDirectDLIToOPVEastDirection(estimatedDirectDLIToOPVEastDirection)\r\n simulatorClass.setDirectDLIToOPVWestDirection(estimatedDirectDLIToOPVWestDirection)\r\n simulatorClass.setDiffuseDLIToOPV(estimatedDiffuseDLIToOPV)\r\n simulatorClass.setGroundReflectedDLIToOPV(estimatedGroundReflectedDLIToOPV)\r\n\r\n # deactivate the mode to the default value.\r\n simulatorClass.setEstimateSolarRadiationMode(False)\r\n\r\n # data export of solar irradiance\r\n util.exportCSVFile(np.array([year, month, day, hour,\r\n simulatorClass.directHorizontalSolarRadiation,\r\n simulatorClass.diffuseHorizontalSolarRadiation,\r\n simulatorClass.totalHorizontalSolarRadiation,\r\n simulatorClass.getDirectSolarRadiationToOPVEastDirection(),\r\n simulatorClass.getDirectSolarRadiationToOPVWestDirection(),\r\n simulatorClass.getDiffuseSolarRadiationToOPV(),\r\n simulatorClass.getAlbedoSolarRadiationToOPV(),\r\n estimatedTotalSolarRadiationToOPV]).T,\r\n \"estimatedSolarIrradiance\")\r\n\r\n # If necessary, get the solar radiation data only on 15th day\r\n if util.getSimulationDaysInt() > 31 and constant.ifGet15thDayData:\r\n\r\n # estimated horizontal value\r\n hourlyEstimatedTotalHorizontalSolarRadiationOnly15th = util.getOnly15thDay(simulatorClass.totalHorizontalSolarRadiation)\r\n # estmated tilted data\r\n hourlyEstimatedTotalTiltedSolarRadiationToOPVOnly15th = util.getOnly15thDay(estimatedTotalSolarRadiationToOPV)\r\n\r\n yearOnly15th = util.getOnly15thDay(year)\r\n monthOnly15th = util.getOnly15thDay(month)\r\n dayOnly15th = util.getOnly15thDay(day)\r\n hourOnly15th = util.getOnly15thDay(hour)\r\n # data export\r\n util.exportCSVFile(np.array([yearOnly15th, monthOnly15th, dayOnly15th, hourOnly15th, hourlyEstimatedTotalHorizontalSolarRadiationOnly15th, hourlyEstimatedTotalTiltedSolarRadiationToOPVOnly15th]).T,\r\n \"hourlyEstimatedTotalSolarRadiationOnly15th\")\r\n # util.exportCSVFile(hourlyEstimatedTotalSolarRadiationToOPVOnly15th, \"hourlyEstimatedTotalSolarRadiationToOPVOnly15th\")\r\n\r\n\r\n # if constant.ifExportFigures:\r\n # ##################plot the difference of total solar radiation with imported data and simulated data start######################\r\n # Title = \"total solar radiation to OPV with measured horizontal and estimated tilted\"\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"total Solar irradiance [W m^-2]\"\r\n # util.plotTwoData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\r\n # hourlyHorizontalTotalOuterSolarIrradiance, estimatedTotalSolarRadiationToOPV ,Title, xAxisLabel, yAxisLabel, \"measured horizontal\", \"estimated tilted\")\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ##################plot the difference of total solar radiation with imported data and simulated data end######################\r\n\r\n # ################## plot the difference of total DLI with real data and simulated data start######################\r\n # Title = \"difference of total DLI to tilted OPV with real data and estimation\"\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\r\n # util.plotTwoData(np.linspace(0, simulationDaysInt, simulationDaysInt), \\\r\n # totalDLIToOPV, estimatedTotalDLIToOPV ,Title, xAxisLabel, yAxisLabel, \"with real data\", \"wth no data\")\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the difference of total DLI with real data and simulated data end######################\r\n\r\n ################# calculate solar irradiance without real data (estimate the data) end #######################\r\n\r\n # # ####################################################################################################\r\n # # Stop execution here...\r\n # sys.exit()\r\n # # Move the above line to different parts of the assignment as you implement more of the functionality.\r\n # # ####################################################################################################\r\n\r\n\r\n # # export measured horizontal and estimated data only when the simulation date is 1 day. *Modify the condition if necessary.\r\n # elif constant.ifExportMeasuredHorizontalAndExtimatedData == True and util.getSimulationDaysInt() == 1:\r\n # util.exportCSVFile(np.array([estimatedTotalSolarRadiationToOPV, hourlyHorizontalTotalOuterSolarIrradiance]).T, \"hourlyMeasuredHOrizontalAndEstimatedTotalSolarRadiation\")\r\n\r\n # if constant.ifExportFigures:\r\n # ################## plot the distribution of direct and diffuse PPFD start######################\r\n # Title = \"TOTAL outer PPFD to OPV\"\r\n # xAxisLabel = \"time [hour]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"PPFD [umol m^-2 s^-1]\"\r\n # util.plotData(np.linspace(0, simulationDaysInt * constant.hourperDay, simulationDaysInt * constant.hourperDay), \\\r\n # directPPFDToOPV + diffusePPFDToOPV + groundReflectedPPFDToOPV, Title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the distribution of direct and diffuse PPFD end######################\r\n\r\n # ################## plot the distribution of direct and diffuse solar DLI with real data start######################\r\n # Title = \"direct and diffuse outer DLI to OPV\"\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\r\n # y1Label = \"(directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0\"\r\n # y2Label = \"diffuseDLIToOPV\"\r\n # util.plotTwoData(np.linspace(0, simulationDaysInt, simulationDaysInt), (directDLIToOPVEastDirection+directDLIToOPVWestDirection)/2.0, diffuseDLIToOPV, Title,\r\n # xAxisLabel, yAxisLabel, y1Label, y2Label)\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the distribution of direct and diffuse solar DLI end######################\r\n\r\n # ################## plot the distribution of various DLI to OPV film start######################\r\n # Title = \"various DLI to OPV film\"\r\n # plotDataSet = np.array([simulatorClass.getDirectDLIToOPVEastDirection(), simulatorClass.getDirectDLIToOPVWestDirection(), simulatorClass.getDiffuseDLIToOPV(), simulatorClass.getGroundReflectedDLIToOPV()])\r\n # labelList = np.array([\"directDLIToOPVEastDirection\", \"directDLIToOPVWestDirection\", \"diffuseDLIToOPV\", \"groundReflectedDLIToOPV\"])\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"DLI [mol m^-2 day^-1]\"\r\n # util.plotMultipleData(np.linspace(0, simulationDaysInt, simulationDaysInt), plotDataSet, labelList, Title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(Title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################## plot the distribution of various DLI to OPV film end######################\r\n\r\n ############################################################################################\r\n ################## calculate the daily electricity yield per area start#####################\r\n ############################################################################################\r\n # TODO: for more accurate modeling, we need actual data (considering the OPV material) for the temperature of OPV film, but right now, just use the imported body temperature.\r\n # get the daily electricity yield per area per day ([J/m^2/day]) based on the given light intensity ([Celsius],[W/m^2]).\r\n # regard the east side and west tilted OPV module differently/\r\n dailyJopvoutperAreaEastRoof = simulatorDetail.getDailyElectricityYieldperArea(simulatorClass, hourlyHorizontalTotalBeamMeterBodyTemperature, \\\r\n simulatorClass.getDirectSolarRadiationToOPVEastDirection(),\r\n simulatorClass.getDiffuseSolarRadiationToOPV(),\r\n simulatorClass.getAlbedoSolarRadiationToOPV())\r\n dailyJopvoutperAreaWestRoof = simulatorDetail.getDailyElectricityYieldperArea(simulatorClass, hourlyHorizontalTotalBeamMeterBodyTemperature, \\\r\n simulatorClass.getDirectSolarRadiationToOPVWestDirection(),\r\n simulatorClass.getDiffuseSolarRadiationToOPV(),\r\n simulatorClass.getAlbedoSolarRadiationToOPV())\r\n # print(\"dailyJopvoutperAreaEastRoof:{}\".format(dailyJopvoutperAreaEastRoof))\r\n # print(\"dailyJopvoutperAreaWestRoof:{}\".format(dailyJopvoutperAreaWestRoof))\r\n\r\n # unit change [J/m^2/day] -> [Wh/m^2/day]\r\n # dailyWhopvoutperArea = util.convertFromJouleToWattHour(dailyJopvoutperArea)\r\n dailyWhopvoutperAreaEastRoof = util.convertFromJouleToWattHour(dailyJopvoutperAreaEastRoof)\r\n dailyWhopvoutperAreaWestRoof = util.convertFromJouleToWattHour(dailyJopvoutperAreaWestRoof)\r\n # set the variables\r\n simulatorClass.dailyWhopvoutperAreaEastRoof = dailyWhopvoutperAreaEastRoof\r\n simulatorClass.dailyWhopvoutperAreaWestRoof = dailyWhopvoutperAreaWestRoof\r\n # print(\"dailyWhopvoutperAreaEastRoof:{}\".format(dailyWhopvoutperAreaEastRoof))\r\n # print(\"dailyWhopvoutperAreaWestRoof:{}\".format(dailyWhopvoutperAreaWestRoof ))\r\n\r\n # electricity production unit Exchange [Wh/m^2/day] -> [kWh/m^2/day]\r\n # dailykWhopvoutperArea = util.convertWhTokWh(dailyWhopvoutperArea)\r\n dailykWhopvoutperAreaEastRoof = util.convertWhTokWh(dailyWhopvoutperAreaEastRoof)\r\n dailykWhopvoutperAreaWestRoof = util.convertWhTokWh(dailyWhopvoutperAreaWestRoof)\r\n # set the variables\r\n simulatorClass.dailykWhopvoutperAreaEastRoof = dailykWhopvoutperAreaEastRoof\r\n simulatorClass.dailykWhopvoutperAreaWestRoof = dailykWhopvoutperAreaWestRoof\r\n # print(\"dailykWhopvoutperAreaEastRoof[kWh/m^2/day]:{}\".format(dailykWhopvoutperAreaEastRoof))\r\n # print(\"dailykWhopvoutperAreaWestRoof[kWh/m^2/day]:{}\".format(dailykWhopvoutperAreaWestRoof))\r\n\r\n # the total electricity produced (unit exchange: [kWh/m^2/day] -> [kWh/day])\r\n # consider that the coverage ratio can be different during summer\r\n OPVAreaCoverageRatioChangingInSummer = OPVFilm.getDifferentOPVCoverageRatioInSummerPeriod(constant.OPVAreaCoverageRatio, simulatorClass)\r\n # change the number of elements: per hour -> per day\r\n OPVAreaCoverageRatioPerDayChangingInSummer = OPVAreaCoverageRatioChangingInSummer[::constant.hourperDay]\r\n # totalkWhopvoutPerday = dailykWhopvoutperAreaEastRoof * (constant.OPVAreaFacingEastOrNorthfacingRoof) + dailykWhopvoutperAreaWestRoof * (constant.OPVAreaFacingWestOrSouthfacingRoof)\r\n totalkWhopvoutPerday = dailykWhopvoutperAreaEastRoof * OPVAreaCoverageRatioPerDayChangingInSummer * constant.greenhouseTotalRoofArea\r\n\r\n # totalkWhopvoutPerAreaPerday = totalkWhopvoutPerday/constant.greenhouseTotalRoofArea\r\n totalkWhopvoutPerRoofAreaPerday = totalkWhopvoutPerday/constant.greenhouseTotalRoofArea\r\n\r\n # set the calculated value\r\n simulatorClass.totalkWhopvoutPerday = totalkWhopvoutPerday\r\n simulatorClass.totalkWhopvoutPerAreaPerday = totalkWhopvoutPerRoofAreaPerday\r\n # print(\"totalkWhopvoutPerday[kWh/day]:{}\".format(totalkWhopvoutPerday))\r\n # print(\"totalkWhopvoutPerAreaPerday[kWh/m^2/day]:{}\".format(totalkWhopvoutPerAreaPerday))\r\n\r\n # if constant.ifExportFigures:\r\n # ################### plot the electricity yield per area with given OPV film\r\n # title = \"electricity yield per area vs OPV film (east_west average)\"\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"Electricity yield per OPV area [kWh/m^2/day]\"\r\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), (dailykWhopvoutperAreaEastRoof + dailykWhopvoutperAreaWestRoof)/2.0, title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ################### plot the electricity yield per area with given OPV film end\r\n\r\n # data export\r\n util.exportCSVFile(np.array([year[::24], month[::24], day[::24], dailykWhopvoutperAreaEastRoof, dailykWhopvoutperAreaWestRoof]).T,\r\n \"dailyElectricEnergyFromRoofsFacingEachDirection\")\r\n ##########################################################################################\r\n ################## calculate the daily electricity yield per area end#####################\r\n ##########################################################################################\r\n\r\n ##########################################################################################\r\n ################## calculate the daily electricity sales per area start###################\r\n ##########################################################################################\r\n # convert the year of each hour to the year to each day\r\n yearOfeachDay = year[::24]\r\n # convert the month of each hour to the month to each day\r\n monthOfeachDay = month[::24]\r\n # get the monthly electricity sales per area [USD/month/m^2]\r\n monthlyElectricitySalesperAreaEastRoof = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperAreaEastRoof, yearOfeachDay, monthOfeachDay ,simulatorClass)\r\n monthlyElectricitySalesperAreaWastRoof = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperAreaWestRoof, yearOfeachDay, monthOfeachDay, simulatorClass)\r\n # print(\"monthlyElectricitySalesperAreaEastRoof:{}\".format(monthlyElectricitySalesperAreaEastRoof))\r\n # print(\"monthlyElectricitySalesperAreaWastRoof:{}\".format(monthlyElectricitySalesperAreaWastRoof))\r\n # set the value to the object\r\n simulatorClass.setMonthlyElectricitySalesperAreaEastRoof(monthlyElectricitySalesperAreaEastRoof)\r\n simulatorClass.setMonthlyElectricitySalesperAreaWestRoof(monthlyElectricitySalesperAreaWastRoof)\r\n # print \"simulatorClass.getMonthlyElectricitySalesperArea():{}\".format(simulatorClass.getMonthlyElectricitySalesperArea())\r\n\r\n # electricity sales unit Exchange [USD/m^2/month] -> [USD/month]\r\n totalElectricitySalesPerMonth = monthlyElectricitySalesperAreaEastRoof * constant.OPVAreaFacingEastOrNorthfacingRoof + monthlyElectricitySalesperAreaWastRoof * constant.OPVAreaFacingWestOrSouthfacingRoof\r\n # print(\"totalElectricitySalesPerMonth[USD/month]:{}\".format(totalElectricitySalesPerMonth))\r\n # the averaged electricity sales [USD/m^2/month]\r\n if constant.OPVArea == 0.0:\r\n totalElectricitySalesPerOPVAreaPerMonth = [0.0]\r\n else:\r\n totalElectricitySalesPerOPVAreaPerMonth = totalElectricitySalesPerMonth / constant.OPVArea\r\n # set the value to the object\r\n simulatorClass.totalElectricitySalesPerAreaPerMonth = totalElectricitySalesPerOPVAreaPerMonth\r\n simulatorClass.totalElectricitySalesPerMonth = totalElectricitySalesPerMonth\r\n simulatorClass.totalElectricitySales = sum(totalElectricitySalesPerMonth)\r\n # print(\"totalElectricitySalesPerMonth:{}\".format(totalElectricitySalesPerMonth))\r\n # print(\"totalElectricitySales:{}\".format(sum(totalElectricitySalesPerMonth)))\r\n ###########################################################################################\r\n ################## calculate the daily electricity sales per area end#####################\r\n ###########################################################################################\r\n\r\n #####################################################################################################\r\n ##################calculate the electricity cost per area start######################################\r\n #####################################################################################################\r\n if constant.ifConsiderOPVCost is True:\r\n # get the depreciation price for the whole simulation period\r\n # it was assumed that the depreciation method is \tstraight line method\r\n # unit: USD\r\n # initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)\r\n initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(max(simulatorClass.OPVCoverageRatiosConsiderSummerRatio))\r\n # print(\"initialOPVCostUSD:{}\".format(initialOPVCostUSD))\r\n # unit: USD/day\r\n totalOPVCostUSDForDepreciation = initialOPVCostUSD * (util.getSimulationDaysInt() / constant.OPVDepreciationPeriodDays)\r\n\r\n # set the value to the object\r\n # print(\"totalOPVCostUSDForDepreciation:{}\".format(totalOPVCostUSDForDepreciation))\r\n # print(\"OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio):{}\".format(OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)))\r\n if constant.OPVAreaCoverageRatio == 0.0:\r\n simulatorClass.setOPVCostUSDForDepreciationPerOPVArea(0.0)\r\n else:\r\n simulatorClass.setOPVCostUSDForDepreciationPerOPVArea(totalOPVCostUSDForDepreciation / OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio))\r\n simulatorClass.totalOPVCostUSDForDepreciation = totalOPVCostUSDForDepreciation\r\n simulatorClass.totalOPVCostUSDForDepreciationPerGHFloorArea = totalOPVCostUSDForDepreciation / constant.greenhouseFloorArea\r\n\r\n # print(\"OPVCostUSDForDepreciationPerOPVArea:{}\".format(totalOPVCostUSDForDepreciation / OPVFilm.getOPVArea(constant.OPVAreaCoverageRatio)))\r\n # print(\"totalOPVCostUSDForDepreciation:{}\".format(totalOPVCostUSDForDepreciation))\r\n\r\n else:\r\n # set the value to the object. the value is zero if not consider the purchase cost\r\n simulatorClass.setOPVCostUSDForDepreciationPerOPVArea(0.0)\r\n simulatorClass.totalOPVCostUSDForDepreciation = 0.0\r\n simulatorClass.totalOPVCostUSDForDepreciationPerGHFloorArea = 0.0\r\n\r\n ###################################################################################################\r\n ##################calculate the electricity cost per area end######################################\r\n ###################################################################################################\r\n\r\n ################################################################################\r\n ################## calculate the electricity production profit/loss start#######\r\n ################################################################################\r\n # profit == sales - less(cost)\r\n electricityProductionProfit = simulatorClass.totalElectricitySales - simulatorClass.totalOPVCostUSDForDepreciation\r\n electricityProductionProfitPerGHFloorArea = electricityProductionProfit / constant.greenhouseFloorArea\r\n\r\n # set the variable to the object\r\n simulatorClass.electricityProductionProfit = electricityProductionProfit\r\n simulatorClass.electricityProductionProfitPerGHFloorArea = electricityProductionProfitPerGHFloorArea\r\n ################################################################################\r\n ################## calculate the electricity production profit/loss end#########\r\n ################################################################################\r\n\r\n ###############################################################################################\r\n ###################calculate the solar irradiance through multi span roof start################\r\n ###############################################################################################\r\n # The calculated irradiance is stored to the object in this function\r\n simulatorDetail.setDirectSolarIrradianceThroughMultiSpanRoof(simulatorClass)\r\n # data export\r\n util.exportCSVFile(np.array([year, month, day, hour, simulatorClass.integratedT_mat, simulatorClass.getHourlyDirectSolarRadiationAfterMultiSpanRoof(),]).T,\r\n \"directSolarRadiationAfterMultiSpanRoof\")\r\n ###########################################################################################\r\n ###################calculate the solar irradiance through multi span roof end##############\r\n ###########################################################################################\r\n\r\n ###########################################################################################\r\n ###################calculate the solar irradiance to plants start##########################\r\n ###########################################################################################\r\n # get/set cultivation days per harvest [days/harvest]\r\n cultivationDaysperHarvest = constant.cultivationDaysperHarvest\r\n simulatorClass.setCultivationDaysperHarvest(cultivationDaysperHarvest)\r\n\r\n # get/set OPV coverage ratio [-]\r\n OPVCoverage = constant.OPVAreaCoverageRatio\r\n simulatorClass.setOPVAreaCoverageRatio(OPVCoverage)\r\n\r\n # get/set OPV coverage ratio during fallow period[-]\r\n OPVCoverageSummerPeriod = constant.OPVAreaCoverageRatioSummerPeriod\r\n simulatorClass.setOPVCoverageRatioSummerPeriod(OPVCoverageSummerPeriod)\r\n\r\n # get if we assume to have shading curtain\r\n hasShadingCurtain = constant.hasShadingCurtain\r\n simulatorClass.setIfHasShadingCurtain(hasShadingCurtain)\r\n\r\n # consider the OPV film, shading curtain, structure,\r\n simulatorDetail.setSolarIrradianceToPlants(simulatorClass)\r\n\r\n # data export\r\n util.exportCSVFile(np.array([year, month, day, hour, simulatorClass.transmittanceThroughShadingCurtainChangingEachMonth]).T, \"transmittanceThroughShadingCurtain\")\r\n util.exportCSVFile(np.array([year, month, day, hour, simulatorClass.directSolarIrradianceToPlants, simulatorClass.diffuseSolarIrradianceToPlants]).T, \"solarIrradianceToPlants\")\r\n\r\n # the DLI to plants [mol/m^2/day]\r\n # totalDLItoPlants = simulatorDetail.getTotalDLIToPlants(OPVCoverage, importedDirectPPFDToOPV, importedDiffusePPFDToOPV, importedGroundReflectedPPFDToOPV,\\\r\n # hasShadingCurtain, shadingCurtainDeployPPFD, simulatorClass)\r\n totalDLItoPlants = simulatorClass.directDLIToPlants + simulatorClass.diffuseDLIToPlants\r\n # print(\"totalDLItoPlants:{}\".format(totalDLItoPlants))\r\n # print \"totalDLItoPlants.shape:{}\".format(totalDLItoPlants.shape)\r\n # unit: DLI/day\r\n simulatorClass.totalDLItoPlants = totalDLItoPlants\r\n\r\n # if constant.ifExportFigures:\r\n # ######################### plot a graph showing the DLI to plants ######################################\r\n # title = \"DLI to plants (OPV coverage \" + str(int(100*OPVCoverage)) + \"%)\"\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"DLI[mol/m^2/day]\"\r\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), totalDLItoPlants, title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # #######################################################################################################\r\n\r\n ########################################################################################\r\n ###################calculate the solar irradiance to plants end#########################\r\n ########################################################################################\r\n\r\n # ####################################################################################################\r\n # # Stop execution here...\r\n # sys.exit()\r\n # # Move the above line to different parts of the assignment as you implement more of the functionality.\r\n # ####################################################################################################\r\n\r\n #############################################################################\r\n ################## calculate the daily plant yield start#####################\r\n #############################################################################\r\n # # On the model, since it was assumed the temperature in the greenhouse is maintained at the set point by cooling system (pad and fan system), this function is not used.\r\n # # calc/set the thermal time to the object\r\n # simulatorDetail.setThermalTimeToPlants(simulatorClass)\r\n\r\n # get/set the plant growth model name [String]\r\n plantGrowthModel = constant.plantGrowthModel\r\n simulatorClass.setPlantGrowthModel(plantGrowthModel)\r\n\r\n #calculate plant yield given an OPV coverage and model :daily [g/head]\r\n shootFreshMassList, \\\r\n dailyFreshWeightPerHeadIncrease, \\\r\n accumulatedDailyFreshWeightPerHeadIncrease, \\\r\n dailyHarvestedFreshWeightPerHead = simulatorDetail.getPlantYieldSimulation(simulatorClass)\r\n # np.set_printoptions(threshold=np.inf)\r\n # print (\"shootFreshMassList:{}\".format(shootFreshMassList))\r\n # print (\"dailyFreshWeightPerHeadIncrease:{}\".format(dailyFreshWeightPerHeadIncrease))\r\n # print (\"accumulatedDailyFreshWeightPerHeadIncrease:{}\".format(accumulatedDailyFreshWeightPerHeadIncrease))\r\n # print (\"dailyHarvestedFreshWeightPerHead:{}\".format(dailyHarvestedFreshWeightPerHead))\r\n # np.set_printoptions(threshold=100)\r\n\r\n # get the penalized plant fresh weight with too strong sunlight : :daily [g/unit]\r\n # In this research, it was concluded not to assumed the penalty function. According to Jonathan M. Frantz and Glen Ritchie \"2004\". Exploring the Limits of Crop Productivity: Beyond the Limits of Tipburn in Lettuce, the literature on lettuce response to high PPF is not clear, and indeed, I also found there is a contradiction between Fu et al. (2012). Effects of different light intensities on anti-oxidative enzyme activity, quality and biomass in lettuce, and Jonathan M. Frantz and Glen Ritchie (2004) on this discussion.\r\n if constant.IfConsiderPhotoInhibition is True:\r\n\r\n penalizedDailyHarvestedFreshWeightPerHead = simulatorDetail.penalizeDailyHarvestedFreshWeightPerHead(dailyHarvestedFreshWeightPerHead, simulatorClass)\r\n print(\"penalizedDailyHarvestedFreshWeightPerHead:{}\".format(penalizedDailyHarvestedFreshWeightPerHead))\r\n\r\n if constant.ifExportFigures:\r\n ######################### plot dailyHarvestedFreshWeightPerHead and penalized dailyHarvestedFreshWeightPerHead\r\n # if no penalty occurs, these variables plot the same dots.\r\n title = \"HarvestedFreshWeight and penalized HarvestedFreshWeight \"\r\n xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n yAxisLabel = \"plant fresh weight[g/unit]\"\r\n util.plotTwoData(np.linspace(0, util.getSimulationDaysInt(), util.getSimulationDaysInt()), \\\r\n dailyHarvestedFreshWeightPerHead, penalizedDailyHarvestedFreshWeightPerHead, title, xAxisLabel, yAxisLabel, \"real data\", \"penalized data\")\r\n util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n #######################################################################\r\n\r\n # ######################### plot a graph showing only shootFreshMassList per unit #######################\r\n # title = \"plant yield per head vs time (OPV coverage \" + str(int(100*OPVCoverage)) + \"%)\"\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"plant fresh weight[g/head]\"\r\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), shootFreshMassList, title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # #######################################################################################################\r\n\r\n # ############command to print out all array data\r\n # np.set_printoptions(threshold=np.inf)\r\n # print(\"simulatorClass.LeafAreaIndex_J_VanHenten1994:{}\".format(simulatorClass.LeafAreaIndex_J_VanHenten1994))\r\n # np.set_printoptions(threshold=1000)\r\n # ############\r\n\r\n # data export\r\n util.exportCSVFile(np.array([year[::24], month[::24], day[::24], totalDLItoPlants, shootFreshMassList]).T, \"shootFreshMassAndDLIToPlants\")\r\n # util.exportCSVFile(np.array([year[::24], month[::24], day[::24], simulatorClass.LeafAreaIndex_J_VanHenten1994]).T, \"LeafAreaIndex_J_VanHenten1994\")\r\n\r\n # unit conversion; get the plant yield per day per area: [g/head/day] -> [g/m^2/day]\r\n shootFreshMassPerCultivationFloorAreaPerDay = util.convertUnitShootFreshMassToShootFreshMassperArea(shootFreshMassList)\r\n # print(\"shootFreshMassPerAreaPerDay:{}\".format(shootFreshMassPerAreaPerDay))\r\n # unit [g/head] -> [g/m^2]\r\n harvestedShootFreshMassPerCultivationFloorAreaPerDay = util.convertUnitShootFreshMassToShootFreshMassperArea(dailyHarvestedFreshWeightPerHead)\r\n # print(\"harvestedShootFreshMassPerCultivationFloorAreaPerDay:{}\".format(harvestedShootFreshMassPerCultivationFloorAreaPerDay))\r\n # unit conversion: [g/m^2/day] -> [kg/m^2/day]\r\n shootFreshMassPerCultivationFloorAreaKgPerDay = util.convertFromgramTokilogram(shootFreshMassPerCultivationFloorAreaPerDay)\r\n harvestedShootFreshMassPerCultivationFloorAreaKgPerDay = util.convertFromgramTokilogram(harvestedShootFreshMassPerCultivationFloorAreaPerDay)\r\n # print(\"harvestedShootFreshMassPerCultivationFloorAreaKgPerDay:{}\".format(harvestedShootFreshMassPerCultivationFloorAreaKgPerDay))\r\n\r\n # set the value to the object\r\n simulatorClass.shootFreshMassPerAreaKgPerDay = shootFreshMassPerCultivationFloorAreaKgPerDay\r\n simulatorClass.harvestedShootFreshMassPerAreaKgPerDay = harvestedShootFreshMassPerCultivationFloorAreaKgPerDay\r\n simulatorClass.totalHarvestedShootFreshMass = sum(harvestedShootFreshMassPerCultivationFloorAreaKgPerDay) * constant.greenhouseCultivationFloorArea\r\n # print(\"shootFreshMassPerAreaKgPerDay:{}\".format(shootFreshMassPerCultivationFloorAreaKgPerDay))\r\n # print(\"harvestedShootFreshMassPerCultivationFloorAreaKgPerDay:{}\".format(harvestedShootFreshMassPerCultivationFloorAreaKgPerDay))\r\n # print(\"simulatorClass.totalHarvestedShootFreshMass:{}\".format(simulatorClass.totalHarvestedShootFreshMass))\r\n\r\n if constant.ifExportFigures:\r\n # ######################## plot a graph showing only shootFreshMassList per square meter ########################\r\n # title = \"plant yield per area vs time (OPV coverage \" + str(int(100*OPVCoverage)) + \"%)\"\r\n # xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n # yAxisLabel = \"plant fresh weight[kg/m^2]\"\r\n # util.plotData(np.linspace(0, simulationDaysInt, simulationDaysInt), shootFreshMassPerAreaKgPerDay, title, xAxisLabel, yAxisLabel)\r\n # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n # ###############################################################################################################\r\n\r\n ################## plot various unit Plant Yield vs time\r\n # plotDataSet = np.array([shootFreshMassList, dailyFreshWeightPerHeadIncrease, accumulatedDailyFreshWeightPerHeadIncrease, dailyHarvestedFreshWeightPerHead])\r\n # labelList = np.array([\"shootFreshMassList\", \"dailyFreshWeightPerHeadIncrease\", \"accumulatedDailyFreshWeightPerHeadIncrease\", \"dailyHarvestedFreshWeightPerHead\"])\r\n plotDataSet = np.array([shootFreshMassList, dailyFreshWeightPerHeadIncrease, dailyHarvestedFreshWeightPerHead])\r\n labelList = np.array([\"shootFreshMassList\", \"dailyFreshWeightPerHeadIncrease\", \"dailyHarvestedFreshWeightPerHead\"])\r\n title = \"Various unit Plant Yield vs time (OPV coverage \" + str(int(100*OPVCoverage)) + \"%)\"\r\n xAxisLabel = \"time [day]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n yAxisLabel = \"Unit plant Fresh Weight [g/unit]\"\r\n yMin = 0.0\r\n # yMax = 1850.0\r\n yMax = 300.0\r\n util.plotMultipleData(np.linspace(0, simulationDaysInt, simulationDaysInt), plotDataSet, labelList, title, xAxisLabel, yAxisLabel, yMin, yMax)\r\n util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n #######################################################################\r\n\r\n # data export\r\n util.exportCSVFile(np.array([shootFreshMassList, dailyFreshWeightPerHeadIncrease, dailyHarvestedFreshWeightPerHead]).T, \"VariousPlantYieldVsTime\")\r\n\r\n ##########################################################################\r\n ################## calculate the daily plant yield end####################\r\n ##########################################################################\r\n\r\n ##########################################################################\r\n ################## calculate the daily plant sales start##################\r\n ##########################################################################\r\n # get the sales price of plant [USD/m^2]\r\n # It was assumed that there is no tipburn.\r\n # unit: USD/m^2/day\r\n dailyPlantSalesPerSquareMeter = simulatorDetail.getPlantSalesperSquareMeter(simulatorClass)\r\n # print (\"dailyPlantSalesperSquareMeter.shape:{}\".format(dailyPlantSalesperSquareMeter.shape))\r\n # unit: USD/m^2\r\n # This sales is the sales per unit cultivation area, not per the whole greenhouse floor area.\r\n totalPlantSalesPerCultivationFloorArea = sum(dailyPlantSalesPerSquareMeter)\r\n # print (\"totalPlantSalesperSquareMeter(USD):{}\".format(totalPlantSalesPerCultivationFloorArea))\r\n # unit: USD\r\n totalplantSales = totalPlantSalesPerCultivationFloorArea * constant.greenhouseCultivationFloorArea\r\n print (\"totalplantSales(USD):{}\".format(totalplantSales))\r\n totalPlantSalesPerGHFloorArea = totalplantSales / constant.greenhouseFloorArea\r\n\r\n\r\n # set the variable to the object\r\n simulatorClass.totalPlantSalesperSquareMeter = totalPlantSalesPerCultivationFloorArea\r\n simulatorClass.totalplantSales = totalplantSales\r\n simulatorClass.totalPlantSalesPerGHFloorArea = totalPlantSalesPerGHFloorArea\r\n print(\"totalPlantSalesPerGHFloorArea:{}\".format(totalPlantSalesPerGHFloorArea))\r\n ##########################################################################\r\n ################## calculate the daily plant sales end####################\r\n ##########################################################################\r\n\r\n ######################################################################################################\r\n ################## calculate the daily plant cost (greenhouse operation cost) start###################\r\n ######################################################################################################\r\n # it was assumed that the cost for growing plants is significantly composed of labor cost and electricity and fuel energy cost for heating/cooling (including pad and fan system)\r\n\r\n # get the cost for cooling and heating\r\n totalHeatingCostForPlants, totalCoolingCostForPlants = simulatorDetail.getGreenhouseOperationCostForGrowingPlants(simulatorClass)\r\n\r\n # data export\r\n util.exportCSVFile(np.array([simulatorClass.Q_v[\"coolingOrHeatingEnergy W m-2\"], simulatorClass.Q_sr[\"solarIrradianceToPlants W m-2\"], simulatorClass.Q_lh[\"latentHeatByTranspiration W m-2\"], \\\r\n simulatorClass.Q_sh[\"sensibleHeatFromConductionAndConvection W m-2\"], simulatorClass.Q_lw[\"longWaveRadiation W m-2\"]]).T, \"energeBalance(W m-2)\")\r\n # # data export\r\n # util.exportCSVFile(np.array([simulatorClass.s, simulatorClass.gamma_star, simulatorClass.r_s, \\\r\n # simulatorClass.r_b, simulatorClass.e_s, simulatorClass.e_a, simulatorClass.R_n, \\\r\n # simulatorClass.r_a, simulatorClass.r_b, simulatorClass.L, simulatorClass.r_c ]).T, \"latentHeatCalcData\")\r\n\r\n totalHeatingCostForPlantsPerGHFloorArea = totalHeatingCostForPlants / constant.greenhouseFloorArea\r\n totalCoolingCostForPlantsPerGHFloorArea = totalCoolingCostForPlants / constant.greenhouseFloorArea\r\n\r\n totalLaborCost = simulatorDetail.getLaborCost(simulatorClass)\r\n totalLaborCostPerGHFloorArea = totalLaborCost / constant.greenhouseFloorArea\r\n # set the values to the object\r\n simulatorClass.totalLaborCost = totalLaborCost\r\n simulatorClass.totalLaborCostPerGHFloorArea = totalLaborCostPerGHFloorArea\r\n\r\n totalPlantProductionCost = totalHeatingCostForPlants + totalCoolingCostForPlants + totalLaborCost\r\n totalPlantProductionCostPerGHFloorArea = totalHeatingCostForPlantsPerGHFloorArea + totalCoolingCostForPlantsPerGHFloorArea + totalLaborCostPerGHFloorArea\r\n # set the values to the object\r\n simulatorClass.totalPlantProductionCost = totalPlantProductionCost\r\n simulatorClass.totalPlantProductionCostPerGHFloorArea = totalPlantProductionCostPerGHFloorArea\r\n print(\"totalPlantProductionCost:{}\".format(totalPlantProductionCost))\r\n print(\"totalPlantProductionCostPerGHFloorArea:{}\".format(totalPlantProductionCostPerGHFloorArea))\r\n ######################################################################################################\r\n ################## calculate the daily plant cost (greenhouse operation cost) end#####################\r\n ######################################################################################################\r\n\r\n ################################################################################\r\n ################## calculate the plant production profit/los start##############\r\n ################################################################################\r\n totalPlantProfit = totalplantSales - totalPlantProductionCost\r\n totalPlantProfitPerGHFloorArea = totalPlantSalesPerGHFloorArea - totalPlantProductionCostPerGHFloorArea\r\n # set the values to the object\r\n simulatorClass.totalPlantProfit = totalPlantProfit\r\n simulatorClass.totalPlantProfitPerGHFloorArea = totalPlantProfitPerGHFloorArea\r\n # print(\"totalPlantProfit:{}\".format(totalPlantProfit))\r\n # print(\"totalPlantProfitPerGHFloorArea:{}\".format(totalPlantProfitPerGHFloorArea))\r\n\r\n\r\n ################################################################################\r\n ################## calculate the plant production profit/loss end###############\r\n ################################################################################\r\n\r\n ################################################################################\r\n ################## calculate the total economic profit/loss start###############\r\n ################################################################################\r\n # get the economic profit\r\n economicProfit = totalPlantProfit + electricityProductionProfit\r\n economicProfitPerGHFloorArea = totalPlantProfitPerGHFloorArea + electricityProductionProfitPerGHFloorArea\r\n # set the values to the object\r\n simulatorClass.economicProfit = economicProfit\r\n simulatorClass.economicProfitPerGHFloorArea = economicProfitPerGHFloorArea\r\n print(\"economicProfit:{}\".format(economicProfit))\r\n print(\"economicProfitPerGHFloorArea:{}\".format(economicProfitPerGHFloorArea))\r\n\r\n ##############################################################################\r\n ################## calculate the total economic profit/loss end###############\r\n ##############################################################################\r\n\r\n # data export\r\n # util.exportCSVFile(np.array([[simulatorClass.totalHarvestedShootFreshMass, simulatorClass.totalElectricitySales], [simulatorClass.totalOPVCostUSDForDepreciation], \\\r\n # [totalplantSales], [totalPlantSalesPerGHFloorArea], [totalPlantProductionCost], [totalPlantProductionCostPerGHFloorArea], \\\r\n # [economicProfit], [economicProfitPerGHFloorArea]]).T, \"yieldProfitSalesCost\")\r\n # print(\"simulatorClass.totalHarvestedShootFreshMass:{}\".format(simulatorClass.totalHarvestedShootFreshMass))\r\n util.exportCSVFile(np.array([[simulatorClass.totalHarvestedShootFreshMass], [simulatorClass.totalElectricitySales], [simulatorClass.totalOPVCostUSDForDepreciation], \\\r\n [totalplantSales], [totalPlantSalesPerGHFloorArea], [totalPlantProductionCost], [totalPlantProductionCostPerGHFloorArea], \\\r\n [economicProfit], [economicProfitPerGHFloorArea]]).T, \"yieldProfitSalesCost\")\r\n\r\n # print (\"end modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\r\n\r\n return simulatorClass\r\n\r\n\r\n# def optimizeOPVCoverageRatio(simulatorClass):\r\n# ############################################################################################\r\n# ###################Simulation with various opv film coverage ratio start####################\r\n# ############################################################################################\r\n# # Choose the simulation type\r\n# # simulationType = \"economicProfitWithRealSolar\"\r\n# # simulationType = \"plantAndElectricityYieldWithRealSolar\"\r\n# # simulationType = \"RealSolarAndEstimatedSolarComparison\"\r\n# simulationType = \"PlantAndElectricityYieldAndProfitOnEachOPVCoverage\"\r\n# # simulationType = \"stop\"\r\n#\r\n#\r\n# if simulationType == \"PlantAndElectricityYieldAndProfitOnEachOPVCoverage\":\r\n# #########initial parameters(statistics) value start##########\r\n# # substitute the constant value here. You do not need to change the values in the constant class when you want to try other variables\r\n#\r\n# #cultivation days per harvest [days/harvest]\r\n# cultivationDaysperHarvest = constant.cultivationDaysperHarvest\r\n# #the coverage ratio by OPV on the roofTop.\r\n# # OPVAreaCoverageRatio = constant.OPVAreaCoverageRatio\r\n# # #the area of OPV on the roofTop.\r\n# # OPVArea = OPVAreaCoverageRatio * constant.greenhouseRoofArea\r\n# # OPV film unit price[USD/m^2]\r\n# hasShadingCurtain = constant.hasShadingCurtain\r\n# # PPFD [umol m^-2 s^-1]\r\n# shadingCurtainDeployPPFD = constant.shadingCurtainDeployPPFD\r\n# #plant growth model type\r\n# # plantGrowthModel = constant.TaylorExpantionWithFluctuatingDLI\r\n# plantGrowthModel = constant.plantGrowthModel\r\n#\r\n# #if you continue to grow plants during the fallow period, then True\r\n# ifGrowForSummerPeriod = constant.ifGrowForSummerPeriod\r\n# simulatorClass.setIfGrowForSummerPeriod(ifGrowForSummerPeriod)\r\n#\r\n# # x-axis\r\n# OPVCoverageDelta = 0.01\r\n# #OPVCoverageDelta = 0.001\r\n# #the array for x-axis (OPV area [m^2])\r\n# OPVCoverageList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# #########initial parameters(statistics) value end##########\r\n#\r\n# #########define objective functions start################\r\n# # electricity yield for a given period: [J] for a given period\r\n# electricityYield = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # unit plant yield for a given period: [g] for a given period\r\n# unitPlantYield = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # unit total daily plant fresh mass increase for a given period with each OPV film coverage: [g] for a given period\r\n# unitDailyFreshWeightList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # unit harvested fresh mass weight for a given period with each OPV film coverage: [g] for a given period\r\n# dailyHarvestedFreshWeightPerHeadList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # plant sales per square meter with each OPV film coverage: [USD/m^2]\r\n# plantSalesperSquareMeterList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # plant cost per square meter with each OPV film coverage: [USD/m^2]\r\n# # plantCostperSquareMeterList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # plant profit per square meter with each OPV film coverage: [USD/m^2]\r\n# plantProfitperSquareMeterList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # plant profit with each OPV film coverage: [USD/m^2]\r\n# plantProfitList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n#\r\n# # monthly electricity sales per area with each OPV film coverage [USD/month]\r\n# monthlyElectricitySalesListEastRoof = np.zeros((int(1.0/OPVCoverageDelta), util.getSimulationMonthsInt()), dtype = float)\r\n# monthlyElectricitySalesListWestRoof = np.zeros((int(1.0/OPVCoverageDelta), util.getSimulationMonthsInt()), dtype = float)\r\n#\r\n# # electricity sales with each OPV film coverage [USD]\r\n# electricitySalesList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # electricitySalesListperAreaEastRoof = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # electricitySalesListperAreaWestRoof = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n#\r\n# # electricity sales with each OPV film coverage [USD]\r\n# electricityProfitList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# # economicProfit summing the electricity and plant profit [USD]\r\n# economicProfitList = np.zeros(int(1.0/OPVCoverageDelta), dtype = float)\r\n# #########define objective function end################\r\n#\r\n# # convert the year of each hour to the year to each day\r\n# yearOfeachDay = simulatorClass.getYear()[::24]\r\n# # convert the month of each hour to the month to each day\r\n# monthOfeachDay = simulatorClass.getMonth()[::24]\r\n# # print \"monthOfeachDay:{}\".format(monthOfeachDay)\r\n# # print \"monthOfeachDay.shape:{}\".format(monthOfeachDay.shape)\r\n#\r\n# # variable = OPV coverage ratio. loop by OPV coverage ratio\r\n# for i in range (0, int(1.0/OPVCoverageDelta)):\r\n# ################## calculate the electricity yield with different OPV coverage for given period start#####################\r\n# OPVCoverageList[i] = i * OPVCoverageDelta\r\n#\r\n# # [J/m^2] per day -> [J] electricity yield for a given period with a given area.\r\n# # sum the electricity yield of east and west direction roofs\r\n# electricityYield[i] += simulatorDetail.getWholeElectricityYieldEachOPVRatio(OPVCoverageList[i], dailyJopvoutperAreaEastRoof, simulatorClass, constant.greenhouseRoofArea / 2.0)\r\n# electricityYield[i] += simulatorDetail.getWholeElectricityYieldEachOPVRatio(OPVCoverageList[i], dailyJopvoutperAreaWestRoof, simulatorClass, constant.greenhouseRoofArea / 2.0)\r\n# # print(\"i:{}, electricityYield[i]:{}\".format(i, electricityYield[i]))\r\n#\r\n# # [J] -> [Wh]: divide by 3600\r\n# electricityYield[i] = util.convertFromJouleToWattHour(electricityYield[i])\r\n# # [Wh] -> [kWh]: divide by 1000\r\n# electricityYield[i] = util.convertFromJouleToWattHour(electricityYield[i])\r\n# ################## calculate the electricity yield with different OPV coverage for given period end#####################\r\n#\r\n# ##################calculate the electricity sales#######################\r\n# # get the monthly electricity sales per area [USD/month/m^2]\r\n# monthlyElectricitySalesperAreaEastRoof = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperAreaEastRoof, yearOfeachDay, monthOfeachDay)\r\n# monthlyElectricitySalesperAreaWestRoof = simulatorDetail.getMonthlyElectricitySalesperArea(dailyJopvoutperAreaWestRoof, yearOfeachDay, monthOfeachDay)\r\n#\r\n# # get the monthly electricity sales per each OPV coverage ratio [USD/month]\r\n# monthlyElectricitySalesListEastRoof[i] = simulatorDetail.getMonthlyElectricitySales(OPVCoverageList[i], monthlyElectricitySalesperAreaEastRoof, constant.greenhouseRoofArea / 2.0)\r\n# monthlyElectricitySalesListWestRoof[i] = simulatorDetail.getMonthlyElectricitySales(OPVCoverageList[i], monthlyElectricitySalesperAreaWestRoof, constant.greenhouseRoofArea / 2.0)\r\n#\r\n# # get the electricity sales per each OPV coverage ratio for given period [USD], suming the monthly electricity sales.\r\n# electricitySalesList[i] = sum(monthlyElectricitySalesListEastRoof[i]) + sum(monthlyElectricitySalesListWestRoof[i])\r\n# # print\"electricitySalesList:{}\".format(electricitySalesList)\r\n#\r\n# ##################calculate the electricity cost######################################\r\n# if constant.ifConsiderOPVCost is True:\r\n# initialOPVCostUSD = constant.OPVPricePerAreaUSD * OPVFilm.getOPVArea(OPVCoverageList[i])\r\n# OPVCostUSDForDepreciation =initialOPVCostUSD * (util.getSimulationDaysInt() / constant.OPVDepreciationPeriodDays)\r\n# else:\r\n# OPVCostUSDForDepreciation = 0.0\r\n#\r\n# ##################get the electricity profit ######################################\r\n# electricityProfitList[i] = electricitySalesList[i] - OPVCostUSDForDepreciation\r\n# # print (\"electricityYield:{}\".format(electricityYield))\r\n#\r\n#\r\n# # calc the electricity production per area [kWh/m^2]\r\n# print (\"electricity yield per area with 100% coverage ratio [kWh/m^2] was : {}\".format(electricityYield[int(1.0/OPVCoverageDelta) -1] / constant.greenhouseRoofArea))\r\n#\r\n# # ################## plot the electricity yield with different OPV coverage for given period start ################\r\n# # title = \"electricity yield with a given area vs OPV film\"\r\n# # xAxisLabel = \"OPV Coverage Ratio [-]\"\r\n# # yAxisLabel = \"Electricity yield for a given period [kWh]\"\r\n# # util.plotData(OPVCoverageList, electricityYield, title, xAxisLabel, yAxisLabel)\r\n# # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n# # ################## plot the electricity yield with different OPV coverage for given period end ##################\r\n#\r\n# # ################## plot the electricity profit with different OPV coverage for given period\r\n# # title = \"electricity profit for a given period vs OPV film coverage ratio\"\r\n# # xAxisLabel = \"OPV Coverage Ratio [-]: \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate\r\n# # yAxisLabel = \"Electricity profit for a given period [USD]\"\r\n# # util.plotData(OPVCoverageList, electricityProfitList, title, xAxisLabel, yAxisLabel)\r\n# # util.saveFigure(title + \" \" + constant.SimulationStartDate + \"-\" + constant.SimulationEndDate)\r\n#\r\n# ################## calculate the daily plant yield and profit start#####################\r\n# # variable = OPV coverage ratio. loop by OPV coverage ratio\r\n# for i in range (0, int(1.0/OPVCoverageDelta)):\r\n#\r\n# ##################calculate the plant yield\r\n# # Since the plants are not tilted, do not use the light intensity to the tilted surface, just use the inmported data or estimated data with 0 degree surface.\r\n# # daily [g/unit]\r\n# # shootFreshMassList, dailyFreshWeightPerHeadIncrease, accumulatedDailyFreshWeightPerHeadIncrease, dailyHarvestedFreshWeightPerHead = \\\r\n# # simulatorDetail.getPlantYieldSimulation(plantGrowthModel, cultivationDaysperHarvest,OPVCoverageList[i], \\\r\n# # (directPPFDToOPVEastDirection + directPPFDToOPVWestDirection)/2.0, diffusePPFDToOPV, groundReflectedPPFDToOPV, hasShadingCurtain, shadingCurtainDeployPPFD, simulatorClass)\r\n# shootFreshMassList, dailyFreshWeightPerHeadIncrease, accumulatedDailyFreshWeightPerHeadIncrease, dailyHarvestedFreshWeightPerHead = \\\r\n# simulatorDetail.getPlantYieldSimulation(plantGrowthModel, cultivationDaysperHarvest,OPVCoverageList[i], \\\r\n# importedDirectPPFDToOPV, importedDiffusePPFDToOPV, importedGroundReflectedPPFDToOPV, hasShadingCurtain, shadingCurtainDeployPPFD, simulatorClass)\r\n#\r\n# # sum the daily increase and get the total increase for a given period with a certain OPV coverage ratio\r\n# unitDailyFreshWeightList[i] = sum(dailyFreshWeightPerHeadIncrease)\r\n# # sum the daily increase and get the total harvest weight for a given period with a certain OPV coverage ratio\r\n# dailyHarvestedFreshWeightPerHeadList[i] = sum(dailyHarvestedFreshWeightPerHead)\r\n# # print \"dailyHarvestedFreshWeightPerHead.shape:{}\".format(dailyHarvestedFreshWeightPerHead.shape)\r\n#\r\n# ##################calculate the plant sales\r\n# # unit conversion; get the daily plant yield per given period per area: [g/unit] -> [g/m^2]\r\n# dailyHarvestedFreshWeightperArea = util.convertUnitShootFreshMassToShootFreshMassperArea(dailyHarvestedFreshWeightPerHead)\r\n# # unit conversion: [g/m^2] -> [kg/m^2]1\r\n# dailyHarvestedFreshWeightperAreaKg = util.convertFromgramTokilogram(dailyHarvestedFreshWeightperArea)\r\n# # get the sales price of plant [USD/m^2]\r\n# # if the average DLI during each harvest term is more than 17 mol/m^2/day, discount the price\r\n# # TODO may need to improve the function representing the affect of Tipburn\r\n# dailyPlantSalesperSquareMeter = simulatorDetail.getPlantSalesperSquareMeter(year, dailyHarvestedFreshWeightperAreaKg, totalDLItoPlants)\r\n#\r\n# plantSalesperSquareMeterList[i] = sum(dailyPlantSalesperSquareMeter)\r\n# # print \"dailyPlantSalesperSquareMeter.shape:{}\".format(dailyPlantSalesperSquareMeter.shape)\r\n#\r\n# ##################calculate the plant cost\r\n# # plant operation cost per square meter for given simulation period [USD/m^2]\r\n# plantCostperSquareMeter = simulatorDetail.getPlantCostperSquareMeter(simulationDaysInt)\r\n#\r\n# ##################plant profit per square meter with each OPV film coverage: [USD/m^2]\r\n# plantProfitperSquareMeterList[i] = plantSalesperSquareMeterList[i] - plantCostperSquareMeter\r\n# # print \"plantProfitperSquareMeterList[i]:{}\".format(plantProfitperSquareMeterList[i])\r\n# # print \"plantProfitperSquareMeterList[{}]:{}\".format(i, plantProfitperSquareMeterList[i])\r\n# plantProfitList[i] = plantProfitperSquareMeterList[i] * constant.greenhouseCultivationFloorArea\r\n#\r\n# # get the economic profit\r\n# economicProfitList = plantProfitList + electricityProfitList\r\n#\r\n#\r\n#\r\n#\r\n# profitVSOPVCoverageData=np.array(zip(OPVCoverageList, economicProfitList))\r\n# # export the OPVCoverageList and economicProfitList [USD]\r\n# util.exportCSVFile(profitVSOPVCoverageData, \"OPVCoverage-economicProfit\")\r\n#\r\n# print (\"end modeling: datetime.datetime.now():{}\".format(datetime.datetime.now()))\r\n#\r\n# return profitVSOPVCoverageData, simulatorClass\r\n#\r\n# print(\"iteration cot conducted\")\r\n# return None\r\n" }, { "alpha_fraction": 0.3928571343421936, "alphanum_fraction": 0.49843013286590576, "avg_line_length": 31.234176635742188, "blob_id": "89ad0bb00e6d7c580a250ae0685413511cf52b40", "content_id": "1127c52d02d6c7e12030d31abcf832d0804cfc2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5096, "license_type": "permissive", "max_line_length": 145, "num_lines": 158, "path": "/plantGrowthModelTestA_J_Both.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# dliList = np.array([40,40,40,40,40,40,40,40,40,40,\n# 40,40,40,40,40,40,40,40,40,40,\n# 40,40,40,40,40,40,40,40,40,40,\n# 40,40,40,40,40,])\n\ndliList = np.array([22,22,22,22,22,22,22,22,22,22,\n 22,22,22,22,22,22,22,22,22,22,\n 22,22,22,22,22,22,22,22,22,22,\n 22,22,22,22,22,])\n\ndli = dliList[0]\n\nsdm = 0\nd_sdm = 0\nsdmAccumulated = 0\n\na = 0\nb = 0.4822\nc = -0.006225\nt = 35\ndt = 1\n\nfor t in range(1, 1+len(dliList)):\n a = -8.596 + 0.0743 * dli\n\n sdm = math.e ** (a + b * t + c * t**2)\n d_sdm = (b + 2 * c * t) * sdm\n sdmAccumulated += d_sdm\n # print \"t:{} d_sdm:{}\".format(t, d_sdm)\nfm = sdm/0.045\nprint \"case1 sdm accumulated version: {}\".format(sdmAccumulated)\n\n##########################################################################\n\ndli = dliList[0]\n\nt = 35\na = -8.596 + 0.0743 * dli\n\nsdm = math.e ** (a + b * t + c * t**2)\n\nprint \"case2 sdm (analytical answer): {}\".format(sdm)\n##########################################################################\n# case 3\n\n# dliList = np.array([22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,])\ndli = dliList[0]\n\nsdmList = np.zeros(len(dliList)+1)\nd_sdmList = np.zeros(len(dliList)+1)\ndd_sdmList = np.zeros(len(dliList)+1)\n\nd_sdm = 0\nsdmAccumulated = 0\n\na = 0\nb = 0.4822\nc = -0.006225\nt = 35\ndt = 1\n# sdmInit = math.e ** (a + b * 0 + c * 0 **2)\nsdmInit = 0\nsdmList[0] = sdmInit\n\nfor t in range(1, 1+len(dliList)):\n a = -8.596 + 0.0743 * dli\n\n sdmList[t] = math.e ** (a + b * t + c * t**2)\n d_sdmList[t] = (b + 2 * c * t) * sdmList[t]\n dd_sdmList[t] = 2*c*sdmList[t] + (b + 2 * c * t)**2 * sdmList[t]\n # print \"d_sdmList[{}]:{}\".format(t, d_sdmList[t])\n # print \"dd_sdmList[{}]:{}\".format(t, dd_sdmList[t])\n\n # taylor expansion: x_0 = 0, h = 1 (source: http://eman-physics.net/math/taylor.html)\n sdmList[t] = sdmList[t-1] + d_sdmList[t-1] * dt + (1.0/(math.factorial(2)))*dd_sdmList[t-1]*((dt)**2)\n\n # print \"t = {} case3 sdmList[len(dliList)]: {}\".format(t, sdmList[t])\n\nfm = sdm/0.045\n\nprint \"t = {} case3 sdm (Taylor expansion 2nd order approximation): {}\".format(len(dliList), sdmList[len(dliList)])\n\n##############################################################################\n# case4\n#\n# dliList = np.array([22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,22,22,22,22,22,\n# 22,22,22,22,22,])\ndli = dliList[0]\n\nsdmList = np.zeros(len(dliList)+1)\nd_sdmList = np.zeros(len(dliList)+1)\ndd_sdmList = np.zeros(len(dliList)+1)\nddd_sdmList = np.zeros(len(dliList)+1)\n\nd_sdm = 0\nsdmAccumulated = 0\n\na = 0\nb = 0.4822\nc = -0.006225\nt = 35\ndt = 1\n# sdmInit = math.e ** (a + b * 0 + c * 0 **2)\nsdmInit = 0.0001\nsdmList[0] = sdmInit\n# sdmList[0] = math.e ** (a + b * 0.0 + c * 0.0**2)\n\nd_sdmList[0] = (b + 2 * c * 0.0) * sdmList[0]\ndd_sdmList[0] = 2 * c * sdmList[0] + (b + 2 * c * 0.0) ** 2 * sdmList[0]\nddd_sdmList[0] = 2 * c * d_sdmList[0] + 4 * c * (b + 2 * c * t) * sdmList[0] + (b + 2 * c * 0.0) ** 2 * d_sdmList[0]\n\n\n\nfor t in range(1, 1+len(dliList)):\n a = -8.596 + 0.0743 * dli\n\n sdmList[t] = math.e ** (a + b * t + c * t**2)\n d_sdmList[t] = (b + 2 * c * t) * sdmList[t]\n dd_sdmList[t] = 2*c*sdmList[t] + (b + 2 * c * t)**2 * sdmList[t]\n ddd_sdmList[t] = 2*c*d_sdmList[t] + 4*c*(b + 2 * c * t) * sdmList[t] + (b + 2 * c * t)**2 * d_sdmList[t]\n # print \"d_sdmList[{}]:{}\".format(t, d_sdmList[t])\n # print \"dd_sdmList[{}]:{}\".format(t, dd_sdmList[t])\n # print \"ddd_sdmList[t]:{}\".format(t, ddd_sdmList[t])\n sdmList[t] = math.e ** (a + b * t + c * t**2)\n d_sdmList[t] = (b + 2 * c * t) * sdmList[t]\n dd_sdmList[t] = 2*c*sdmList[t] + (b + 2 * c * t)**2 * sdmList[t]\n\n\n\n # taylor expansion: x_0 = 0, h = 1 (source: http://eman-physics.net/math/taylor.html)\n sdmList[t] = sdmList[t-1] + \\\n 1.0/(math.factorial(1))*d_sdmList[t-1] * dt + \\\n 1.0/(math.factorial(2))*dd_sdmList[t-1]*((dt)**2) + \\\n 1.0/(math.factorial(3))*ddd_sdmList[t-1]*((dt)**3)\n\n # sdmList[t] = sdmList[t] + \\\n # 1.0/(math.factorial(1))*d_sdmList[t] * dt + \\\n # 1.0/(math.factorial(2))*dd_sdmList[t]*((dt)**2) + \\\n # 1.0/(math.factorial(3))*ddd_sdmList[t]*((dt)**3)\n\n # print \"t = {} case4 sdmList[len(dliList)]: {}\".format(t, sdmList[t])\n # print \"t = {} case4 (1/(math.factorial(3)))*ddd_sdmList[t-1]*((dt)**3): {}\".format(t, (1.0/(math.factorial(3)))*ddd_sdmList[t-1]*((dt)**3))\n # print \"1/(math.factorial(3)):{}\".format(1.0/(math.factorial(3)))\n # print \"1/(math.factorial(2)):{}\".format(1.0/(math.factorial(2)))\n # print \"t:{}, sdmList:{}\".format(t, sdmList)\n\nfm = sdm/0.045\n\nprint \"t = {} case4 sdmList (Taylor expansion 3rd order approximation): {}\".format(len(dliList), sdmList[len(dliList)])\n\n\n\n" }, { "alpha_fraction": 0.7388610243797302, "alphanum_fraction": 0.7496810555458069, "avg_line_length": 57.36073684692383, "blob_id": "0ac96feeb5835f4270ce900a59e89f7d3ff11264", "content_id": "1c760f7431e5eb9b3a52e9274b601f50b995e175", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60359, "license_type": "permissive", "max_line_length": 257, "num_lines": 1034, "path": "/CropElectricityYeildSimulatorDetail.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 06 Nov 2016\n# last edit date: 14 Dec 2016\n#######################################################\n\n##########import package files##########\nfrom scipy import stats\nimport datetime\nimport sys\nimport os as os\nimport numpy as np\nimport ShadingCurtain\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util\nimport OPVFilm\nimport Lettuce\nimport PlantGrowthModelE_J_VanHenten\nimport PlantGrowthModelS_Pearson1997\nimport SolarIrradianceMultiSpanRoof\nfrom dateutil.relativedelta import *\nimport GreenhouseEnergyBalance as energyBalance\n\n#######################################################\n\n# def setSimulationSpecifications(simulatorClass):\n# '''\n# reference of the model:\n# :return:\n# '''\n\ndef calcOPVmoduleSolarIrradianceGHRoof(simulatorClass, roofDirectionNotation=constant.roofDirectionNotation):\n '''\n calculate the every kind of solar irradiance (W/m^2) to the OPV film. The tilt angel and direction angle of OPV panes is define in CropElectricityYeildSimulatorConstant.py\n Reference of the model: A. yano et. at., 2009, \"Electrical energy generated by photovoltaic modules mounted inside the roof of a north–south oriented greenhouse\" and M. kacira et. al., \"\"\n Reference URL: \"https://www.actahort.org/books/1037/1037_9.htm\" and \"https://www.sciencedirect.com/science/article/pii/S0960148104000060\"\n '''\n\n year = simulatorClass.getYear()\n month = simulatorClass.getMonth()\n day = simulatorClass.getDay()\n hour = simulatorClass.getHour()\n hourlyHorizontalDiffuseOuterSolarIrradiance = simulatorClass.getImportedHourlyHorizontalDiffuseSolarRadiation()\n hourlyHorizontalDirectOuterSolarIrradiance = simulatorClass.getImportedHourlyHorizontalDirectSolarRadiation()\n\n # [rad] symbol: delta\n hourlyDeclinationAngle = OPVFilm.calcDeclinationAngle(year, month, day)\n # print \"hourlyDecl\tinationAngle:{}\".format(np.degrees(hourlyDeclinationAngle))\n # print \"hourlyDeclinationAngle:{}\".format(hourlyDeclinationAngle)\n\n # [rad] symbol: omega\n hourlySolarHourAngle = OPVFilm.getSolarHourAngleKacira2003(hour)\n # print (\"hourlySolarHourAngle by Kacira2003:{}\".format(np.degrees(hourlySolarHourAngle)))\n\n # # [rad] symbol: omega\n # hourlySolarHourAngle = OPVFilm.getSolarHourAngleYano2009(hour)\n # print (\"hourlySolarHourAngle by Yano2009:{}\".format(np.degrees(hourlySolarHourAngle)))\n\n # [rad] symbol: alpha. elevation angle = altitude angle\n hourlySolarAltitudeAngle = OPVFilm.calcSolarAltitudeAngle(hourlyDeclinationAngle, hourlySolarHourAngle)\n # print \"np.degrees(hourlySolarAltitudeAngle):{}\".format(np.degrees(hourlySolarAltitudeAngle))\n # print \"hourlySolarAltitudeAngle:{}\".format(hourlySolarAltitudeAngle)\n\n # set the solar altitude angle, which is necessary to calculate the solar radiation through multispan roof\n simulatorClass.hourlySolarAltitudeAngle = hourlySolarAltitudeAngle\n\n # [rad] symbol: beta. azimuth angle\n hourlySolarAzimuthAngle = OPVFilm.calcSolarAzimuthAngle(hourlyDeclinationAngle, hourlySolarAltitudeAngle, hourlySolarHourAngle)\n # print \"hourlySolarAzimuthAngle:{}\".format(hourlySolarAzimuthAngle)\n\n # set the solar azimuth angle, which is necessary to calculate the solar radiation through multispan roof\n simulatorClass.hourlySolarAzimuthAngle = hourlySolarAzimuthAngle\n\n # used only in Kacira 2003\n # [rad] symbol: theta_z\n hourlyZenithAngle = math.radians(90.0) - hourlySolarAltitudeAngle\n # print \"math.radians(90.0):{}\".format(math.radians(90.0))\n # print \"hourlyZenithAngle:{}\".format(hourlyZenithAngle)\n\n # if the direction of greenhouse is north-south and the roof tilt direction is east-west\n if roofDirectionNotation == \"EastWestDirectionRoof\":\n # [rad] symbol: phi_p\n # module azimuth angle (yano 2009) == surface azimuth angle (kacira 2003)\n # if the OPV module facing east\n hourlyModuleAzimuthAngleEast = math.radians(-90.0)\n # hourlyModuleAzimuthAngleEast = math.radians(180.0)\n # if the OPV module facing west\n hourlyModuleAzimuthAngleWest = math.radians(90.0)\n # hourlyModuleAzimuthAngleWest = math.radians(0.0)\n # if the direction of greenhouse is east-west and the roof tilt direction is north-south\n elif roofDirectionNotation == \"NorthSouthDirectionRoof\":\n hourlyModuleAzimuthAngleNorth = math.radians(180.0)\n # if the OPV module facing west\n hourlyModuleAzimuthAngleSouth = math.radians(0.0)\n\n # set the module azimuth angle, which is necessary to calculate the solar radiation through multispan roof\n simulatorClass.hourlyModuleAzimuthAngleEast = hourlyModuleAzimuthAngleEast\n simulatorClass.hourlyModuleAzimuthAngleWest = hourlyModuleAzimuthAngleWest\n\n # this computation is necessary to calculate the horizontal incidence angle for horizontal direct solar irradiance. This data is used at getDirectHorizontalSolarRadiation function\n hourlyModuleAzimuthAngleSouth = math.radians(0.0)\n hourlyHorizontalSolarIncidenceAngle = OPVFilm.calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngleSouth, 0)\n # print \"hourlyHorizontalSolarIncidenceAngle:{}\".format(hourlyHorizontalSolarIncidenceAngle)\n\n if roofDirectionNotation == \"EastWestDirectionRoof\":\n #The incident angle of the beam sunlight on the module surface. [rad] symbol: theta_I\n hourlySolarIncidenceAngleEastDirection = OPVFilm.calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngleEast)\n hourlySolarIncidenceAngleWestDirection = OPVFilm.calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngleWest)\n # print(\"hourlySolarIncidenceAngleEastDirection:{}\".format(hourlySolarIncidenceAngleEastDirection))\n # print(\"hourlySolarIncidenceAngleWestDirection:{}\".format(hourlySolarIncidenceAngleWestDirection))\n # if the direction of greenhouse is east-west and the roof tilt direction is north-south\n elif roofDirectionNotation == \"NorthSouthDirectionRoof\":\n # The suitability of the output value is not examined because our greenhouse was \"EastWestDirectionRoof\" (= north-south direction greenhouse)\n hourlySolarIncidenceAngleEastDirection = OPVFilm.calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngleNorth)\n hourlySolarIncidenceAngleWestDirection = OPVFilm.calcSolarIncidenceAngleYano2009(hourlySolarAltitudeAngle, hourlySolarAzimuthAngle, hourlyModuleAzimuthAngleSouth)\n # print (\"hourlySolarIncidenceAngleEastDirection:{}\".format(hourlySolarIncidenceAngleEastDirection))\n # print (\"hourlySolarIncidenceAngleWestDirection:{}\".format(hourlySolarIncidenceAngleWestDirection))\n\n # set the incidence angle\n simulatorClass.hourlySolarIncidenceAngleEastDirection = hourlySolarIncidenceAngleEastDirection\n simulatorClass.hourlySolarIncidenceAngleWestDirection = hourlySolarIncidenceAngleWestDirection\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"hourlySolarIncidenceAngleEastDirection:{}\".format(hourlySolarIncidenceAngleEastDirection))\n # print(\"hourlySolarIncidenceAngleWestDirection:{}\".format(hourlySolarIncidenceAngleWestDirection))\n # np.set_printoptions(threshold=1000)\n\n # estimated horizontal solar irradiances [W m^-2]. these values are used only when estimating solar radiations.\n # symbol: I_DH.\n directHorizontalSolarRadiation = OPVFilm.getDirectHorizontalSolarRadiation(hourlySolarAltitudeAngle, hourlyHorizontalSolarIncidenceAngle)\n # print \"directHorizontalSolarRadiation:{}\".format(directHorizontalSolarRadiation)\n # set the data. this is necessary when estimating the solar irradiance under multi-span greenhouse with estimated data\n simulatorClass.directHorizontalSolarRadiation = directHorizontalSolarRadiation\n # symbol: I_S\n diffuseHorizontalSolarRadiation = OPVFilm.getDiffuseHorizontalSolarRadiation(hourlySolarAltitudeAngle, hourlyHorizontalSolarIncidenceAngle)\n simulatorClass.diffuseHorizontalSolarRadiation = diffuseHorizontalSolarRadiation\n # print \"diffuseHorizontalSolarRadiation:{}\".format(diffuseHorizontalSolarRadiation)\n # symbol: I_HT\n totalHorizontalSolarRadiation = directHorizontalSolarRadiation + diffuseHorizontalSolarRadiation\n simulatorClass.totalHorizontalSolarRadiation = totalHorizontalSolarRadiation\n\n # print \"totalHorizontalSolarRadiation:{}\".format(totalHorizontalSolarRadiation)\n\n # tilted surface solar radiation [W m^-2], real / estimated value branch is calculated in this functions\n # symbol: I_TD (= H_b at Kacira 2004). direct beam radiation on the tilted surface\n # print (\"call getDirectTitledSolarRadiation for east direction OPV\")\n directTiltedSolarRadiationEastDirection = OPVFilm.getDirectTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, hourlySolarIncidenceAngleEastDirection, \\\n hourlyHorizontalDirectOuterSolarIrradiance)\n # print (\"call getDirectTitledSolarRadiation for west direction OPV\")\n directTiltedSolarRadiationWestDirection = OPVFilm.getDirectTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, hourlySolarIncidenceAngleWestDirection, \\\n hourlyHorizontalDirectOuterSolarIrradiance)\n\n # print(\"directTiltedSolarRadiationEastDirection:{}\".format(directTiltedSolarRadiationEastDirection))\n # print(\"directTiltedSolarRadiationWestDirection:{}\".format(directTiltedSolarRadiationWestDirection))\n\n\n # symbol: I_TS (= H_d_p at Kacira 2004). diffused radiation on the tilted surface.\n diffuseTiltedSolarRadiation = OPVFilm.getDiffuseTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, diffuseHorizontalSolarRadiation, \\\n hourlyHorizontalDiffuseOuterSolarIrradiance)\n # print \"diffuseTiltedSolarRadiation:{}\".format(diffuseTiltedSolarRadiation)\n\n # symbol: I_Trho (= H_gr at Kacira 2004) (albedo radiation = reflectance from the ground)\n albedoTiltedSolarRadiation = OPVFilm.getAlbedoTitledSolarRadiation(simulatorClass, hourlySolarAltitudeAngle, totalHorizontalSolarRadiation, \\\n hourlyHorizontalDirectOuterSolarIrradiance+hourlyHorizontalDiffuseOuterSolarIrradiance)\n\n return directTiltedSolarRadiationEastDirection, directTiltedSolarRadiationWestDirection, diffuseTiltedSolarRadiation, albedoTiltedSolarRadiation\n\n # ####################################################################################################\n # # Stop execution here...\n # sys.exit()\n # # Move the above line to different parts of the assignment as you implement more of the functionality.\n # ####################################################################################################\n\ndef getDailyElectricityYieldperArea(simulatorClass,hourlyOPVTemperature, directSolarRadiationToOPV, diffuseSolarRadiationToOPV,groundReflectedSolarradiationToOPV):\n '''\n calculate the daily electricity yield per area (m^2).\n :param hourlyOPVTemperature: [celsius]\n :param directSolarRadiationToOPV: [W/m^2]\n :param diffuseSolarRadiationToOPV: [W/m^2]\n :param groundReflectedSolarradiationToOPV:[W/m^2]\n :return:\n '''\n # print \"total solar irradiance:{}\".format(directSolarRadiationToOPV+diffuseSolarRadiationToOPV+groundReflectedSolarradiationToOPV)\n\n # [W/m^2] == [J/s/m^2] -> [J/m^2] per day\n dailyJopvoutperArea = OPVFilm.calcOPVElectricEnergyperArea(simulatorClass, hourlyOPVTemperature, directSolarRadiationToOPV+diffuseSolarRadiationToOPV+groundReflectedSolarradiationToOPV)\n # print \"dailyJopvout:{}\".format(dailyJopvout)\n\n return dailyJopvoutperArea\n\n\ndef setDirectSolarIrradianceThroughMultiSpanRoof(simulatorClass):\n '''\n calculate the solar irradiance to multi-span roof.\n this calculates the solar irradiance to the single span.\n the variables names follow the symbols in the reference.\n Reference of the model: T. Soriano. et al, 2004, \"A Study of Direct Solar Radiation Transmission in Asymmetrical Multi-span Greenhouses using Scale Models and Simulation Models\"\n source: https://www.sciencedirect.com/science/article/pii/S1537511004000455\n\n '''\n # get the direct solar radiation [W/m^2]. these values are not directly used to calculate the transmittance but just used to check the existance of solar irradicne at each hour.\n directSolarRadiationToOPVEastFacingRoof = simulatorClass.getDirectSolarRadiationToOPVEastDirection()\n directSolarRadiationToOPVWestFacingRoof = simulatorClass.getDirectSolarRadiationToOPVWestDirection()\n # print(\"directSolarRadiationToOPVEastFacingRoof: {}\".format(directSolarRadiationToOPVEastFacingRoof))\n # print(\"directSolarRadiationToOPVWestFacingRoof: {}\".format(directSolarRadiationToOPVWestFacingRoof))\n\n # module azimuth of each roof facing the opposite direction [rad], which is a scalar value\n hourlyModuleAzimuthAngleEast = simulatorClass.hourlyModuleAzimuthAngleEast\n hourlyModuleAzimuthAngleWest = simulatorClass.hourlyModuleAzimuthAngleWest\n # print(\"hourlyModuleAzimuthAngleEast: {}\".format(hourlyModuleAzimuthAngleEast))\n # print(\"hourlyModuleAzimuthAngleWest: {}\".format(hourlyModuleAzimuthAngleWest))\n\n # angle between the incident ray and the horizontal axis perpendicular to the greenhouse span. This angle is symbolized with E in the reference paper [rad]\n EPerpendicularEastOrNorthFacingRoof = SolarIrradianceMultiSpanRoof.getAngleBetweenIncientRayAndHorizontalAxisPerpendicularToGHSpan(simulatorClass, hourlyModuleAzimuthAngleEast)\n EPerpendicularWestOrSouthFacingRoof = SolarIrradianceMultiSpanRoof.getAngleBetweenIncientRayAndHorizontalAxisPerpendicularToGHSpan(simulatorClass, hourlyModuleAzimuthAngleWest)\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"EPerpendicularEastOrNorthFacingRoof: {}\".format(EPerpendicularEastOrNorthFacingRoof))\n # print(\"EPerpendicularWestOrSouthFacingRoof: {}\".format(EPerpendicularWestOrSouthFacingRoof))\n # np.set_printoptions(threshold=1000)\n\n # # Referring to Soriano. et al, (2004), it was found that we can get the direct solar irradiance to horizontal surface inside multi-span greenhouse just by\n # # multiplying the outer solar irradiance to horizontal surface with\n # # angle between the incident ray and the horizontal axis perpendicular to the greenhouse span. This angle is nit symbolized in the reference paper.\n # # the following angles should be same in our case, but both were separately calculated for program expandability\n # EParallelEastOrNorthFacingRoof = SolarIrradianceMultiSpanRoof.getAngleBetweenIncientRayAndHorizontalAxisParallelToGHSpan(simulatorClass, hourlyModuleAzimuthAngleEast)\n # EParallelWestOrSouthFacingRoof = SolarIrradianceMultiSpanRoof.getAngleBetweenIncientRayAndHorizontalAxisParallelToGHSpan(simulatorClass, hourlyModuleAzimuthAngleWest)\n # # print(\"EParallelEastOrNorthFacingRoof: {}\".format(EParallelEastOrNorthFacingRoof))\n # # print(\"EParallelWestOrSouthFacingRoof: {}\".format(EParallelWestOrSouthFacingRoof))\n #\n # # get the T_mat for parallel irradiance\n # T_matForParallelIrrEastOrNorthFacingRoof = SolarIrradianceMultiSpanRoof.getTransmittanceForParallelIrrThroughMultiSpanRoof(simulatorClass,EParallelEastOrNorthFacingRoof)\n # T_matForParallelIrrWestOrSouthFacingRoof = SolarIrradianceMultiSpanRoof.getTransmittanceForParallelIrrThroughMultiSpanRoof(simulatorClass,EParallelWestOrSouthFacingRoof)\n # print(\"T_matForParallelIrrEastOrNorthFacingRoof: {}\".format(T_matForParallelIrrEastOrNorthFacingRoof))\n # print(\"T_matForParallelIrrWestOrSouthFacingRoof: {}\".format(T_matForParallelIrrWestOrSouthFacingRoof))\n\n # get the incidence angles\n hourlySolarIncidenceAngleEastDirection = simulatorClass.hourlySolarIncidenceAngleEastDirection\n hourlySolarIncidenceAngleWestDirection = simulatorClass.hourlySolarIncidenceAngleWestDirection\n\n # get the direct solar irradiance on each axis\n directSolarIrradiancePerpendicularToOPVEastDirection = directSolarRadiationToOPVEastFacingRoof * np.cos(hourlySolarIncidenceAngleEastDirection)\n directSolarIrradianceParallelToOPVEastDirection = directSolarRadiationToOPVEastFacingRoof * np.sin(hourlySolarIncidenceAngleEastDirection)\n directSolarIrradiancePerpendicularToOPVWestDirection = directSolarRadiationToOPVWestFacingRoof * np.cos(hourlySolarIncidenceAngleWestDirection)\n directSolarIrradianceParallelToOPVWestDirection = directSolarRadiationToOPVWestFacingRoof * np.sin(hourlySolarIncidenceAngleWestDirection)\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"directSolarIrradiancePerpendicularToOPVEastDirection: {}\".format(directSolarIrradiancePerpendicularToOPVEastDirection))\n # print(\"directSolarIrradianceParallelToOPVEastDirection: {}\".format(directSolarIrradianceParallelToOPVEastDirection))\n # print(\"directSolarIrradiancePerpendicularToOPVWestDirection: {}\".format(directSolarIrradiancePerpendicularToOPVWestDirection))\n # print(\"directSolarIrradianceParallelToOPVWestDirection: {}\".format(directSolarIrradianceParallelToOPVWestDirection))\n # np.set_printoptions(threshold=1000)\n\n # the the T_mat for parpendicular irradiance\n # print(\"getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingEastOrNorth start\")\n\n # to avoide the error \"RuntimeError: maximum recursion depth exceeded\", the maximum recursion limitation is increased.\n # sys.setrecursionlimit(constant.mMax)\n # print(\"sys.getrecursionlimit():{}\".format(sys.getrecursionlimit()))\n T_matForPerpendicularIrrEastOrNorthFacingRoof = SolarIrradianceMultiSpanRoof.getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingEastOrNorth(\\\n simulatorClass, directSolarIrradiancePerpendicularToOPVEastDirection, EPerpendicularEastOrNorthFacingRoof)\n # print(\"getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingWestOrSouth start: {}\")\n T_matForPerpendicularIrrWestOrSouthFacingRoof = SolarIrradianceMultiSpanRoof.getTransmittanceForPerpendicularIrrThroughMultiSpanRoofFacingWestOrSouth(\\\n simulatorClass, directSolarIrradiancePerpendicularToOPVWestDirection, EPerpendicularWestOrSouthFacingRoof)\n # roll back the recursive limitation setting. The default number should be changed according to each local env.\n # sys.setrecursionlimit(constant.defaultIterationLimit)\n\n # set the data\n simulatorClass.T_matForPerpendicularIrrEastOrNorthFacingRoof = T_matForPerpendicularIrrEastOrNorthFacingRoof\n simulatorClass.T_matForPerpendicularIrrWestOrSouthFacingRoof = T_matForPerpendicularIrrWestOrSouthFacingRoof\n\n # the overall transmittance of multispanroof. The solar irradiance inside the greenhouse can be derived only by multiplying this with the outer solar irradiance for horizontal surface\n integratedT_mat = SolarIrradianceMultiSpanRoof.getIntegratedT_matFromBothRoofs(T_matForPerpendicularIrrEastOrNorthFacingRoof, T_matForPerpendicularIrrWestOrSouthFacingRoof)\n # set the data\n simulatorClass.integratedT_mat = integratedT_mat\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"T_matForPerpendicularIrrEastOrNorthFacingRoof: {}\".format(T_matForPerpendicularIrrEastOrNorthFacingRoof))\n # print(\"T_matForPerpendicularIrrWestOrSouthFacingRoof: {}\".format(T_matForPerpendicularIrrWestOrSouthFacingRoof))\n # print(\"integratedT_mat:{}\".format(integratedT_mat))\n # np.set_printoptions(threshold=1000)\n\n # get the solar irradiance inside\n if constant.ifUseOnlyRealData == True:\n hourlyDirectSolarRadiationAfterMultiSpanRoof = integratedT_mat * simulatorClass.hourlyHorizontalDirectOuterSolarIrradiance\n # this uses the estimated direct solar irradiance (without real data)\n else:\n hourlyDirectSolarRadiationAfterMultiSpanRoof = integratedT_mat * simulatorClass.directHorizontalSolarRadiation\n\n # set the solar irradiance [W/m^2]\n simulatorClass.setHourlyDirectSolarRadiationAfterMultiSpanRoof(hourlyDirectSolarRadiationAfterMultiSpanRoof)\n\n # unit change of the imported outer solar radiation: [W m^-2] -> [umol m^-2 s^-1] == PPFD\n hourlyDirectPPFDTAfterMultiSpanRoof = Util.convertFromWattperSecSquareMeterToPPFD(hourlyDirectSolarRadiationAfterMultiSpanRoof)\n # set the solar irradiance [umol m^-2 s^-1] == PPFD\n simulatorClass.setHourlyDirectPPFDAfterMultiSpanRoof(hourlyDirectPPFDTAfterMultiSpanRoof)\n\n # convert the unit into PPFD snd DLI\n directDLIAfterMultiSpanRoof = Util.convertFromHourlyPPFDWholeDayToDLI(hourlyDirectPPFDTAfterMultiSpanRoof)\n simulatorClass.setHourlyDirectPPFDAfterMultiSpanRoof(directDLIAfterMultiSpanRoof)\n\n\ndef setSolarIrradianceToPlants(simulatorClass):\n '''\n # calculate the light intensity to plants after penetrating the roof, considering the sidewall material transmittance, shading curtain, and the greenhouse structure shading\n it was assumed ground reflectance does not significantly affect the solar irradiance to plants\n '''\n\n directSolarIrradianceBeforeShadingCurtain = OPVFilm.getDirectSolarIrradianceBeforeShadingCurtain(simulatorClass)\n # set the data to the object\n simulatorClass.directSolarIrradianceBeforeShadingCurtain = directSolarIrradianceBeforeShadingCurtain\n diffuseSolarIrradianceBeforeShadingCurtain = OPVFilm.getDiffuseSolarIrradianceBeforeShadingCurtain(simulatorClass)\n # set the data to the object\n simulatorClass.diffuseSolarIrradianceBeforeShadingCurtain = diffuseSolarIrradianceBeforeShadingCurtain\n\n # get the shading curtain transmittance\n ShadingCurtain.getHourlyShadingCurtainDeploymentPatternChangingEachMonthMain(simulatorClass)\n # #############command to print out all array data\n # np.set_printoptions(threshold=np.inf)\n # print(\"simulatorClass.transmittanceThroughShadingCurtainChangingEachMonth:{}\".format(simulatorClass.transmittanceThroughShadingCurtainChangingEachMonth))\n # np.set_printoptions(threshold=1000)\n # #############\n\n # calculate the light intensity to plants [W m-2]\n directSolarIrradianceToPlants = OPVFilm.getDirectSolarIrradianceToPlants(simulatorClass, directSolarIrradianceBeforeShadingCurtain)\n diffuseSolarIrradianceToPlants = OPVFilm.getDiffuseSolarIrradianceToPlants(simulatorClass, diffuseSolarIrradianceBeforeShadingCurtain)\n\n # # change this part if you can to see how plant fresh weight changes with the change of solar irradiance to plants\n # directSolarIrradianceToPlants = directSolarIrradianceToPlants * 2.0\n # diffuseSolarIrradianceToPlants = diffuseSolarIrradianceToPlants * 2.0\n\n # set the data to the object\n simulatorClass.directSolarIrradianceToPlants = directSolarIrradianceToPlants\n simulatorClass.diffuseSolarIrradianceToPlants = diffuseSolarIrradianceToPlants\n\n # #############command to print out all array data\n # np.set_printoptions(threshold=np.inf)\n # print(\"directSolarIrradianceToPlants:{}\".format(directSolarIrradianceToPlants))\n # print(\"diffuseSolarIrradianceToPlants:{}\".format(diffuseSolarIrradianceToPlants))\n # np.set_printoptions(threshold=1000)\n # #############\n\n # unit change of the imported outer solar radiation: [W m^-2] -> [umol m^-2 s^-1] == PPFD\n directPPFDToPlants = Util.convertFromWattperSecSquareMeterToPPFD(directSolarIrradianceToPlants)\n diffusePPFDToPlants = Util.convertFromWattperSecSquareMeterToPPFD(diffuseSolarIrradianceToPlants)\n # set the solar irradiance [umol m^-2 s^-1] == PPFD\n simulatorClass.directPPFDToPlants = directPPFDToPlants\n simulatorClass.diffusePPFDToPlants = diffusePPFDToPlants\n\n # convert the unit into PPFD snd DLI\n directDLIToPlants = Util.convertFromHourlyPPFDWholeDayToDLI(directPPFDToPlants)\n diffuseDLIToPlants = Util.convertFromHourlyPPFDWholeDayToDLI(diffusePPFDToPlants)\n simulatorClass.directDLIToPlants = directDLIToPlants\n simulatorClass.diffuseDLIToPlants = diffuseDLIToPlants\n # #############command to print out all array data\n # np.set_printoptions(threshold=np.inf)\n # print(\"directDLIToPlants:{}\".format(directDLIToPlants))\n # print(\"diffuseDLIToPlants:{}\".format(diffuseDLIToPlants))\n # np.set_printoptions(threshold=1000)\n # #############\n\n# def setThermalTimeToPlants(simulatorClass):\n# '''\n# calc/set the thermal time to the object: average Celsius temperature per day * days [Celsius d]\n# On the model, since it was assumed the temperature in the greenhouse is maintained at the set point by cooling system (pad and fan system), this function is not used.\n# '''\n# importedHourlyAirTemperature = simulatorClass.getImportedHourlyAirTemperature()\n#\n# # TODO assume the greenhouse temperature from the outer air temperature\n# airTemperatureInGreenhouse = importedHourlyAirTemperature\n# simulatorClass.setThermalTimeToPlants(airTemperatureInGreenhouse)\n\n\ndef getPlantYieldSimulation(simulatorClass):\n '''\n calculate the daily plant yield\n\n :param cultivationDaysperHarvest: [days / harvest]\n :param OPVAreaCoverageRatio: [-] range(0-1)\n :param directPPFDToOPV: hourly average [umol m^-2 s^-1] == PPFD\n :param diffusePPFDToOPV: hourly average [umol m^-2 s^-1] == PPFD\n :param groundReflectedPPFDToOPV: hourly average [umol m^-2 s^-1] == PPFD\n :param hasShadingCurtain: Boolean\n :param ShadingCurtainDeployPPFD: float [umol m^-2 s^-1] == PPFD\n :param cropElectricityYieldSimulator1: object\n :return:\n '''\n plantGrowthModel = simulatorClass.getPlantGrowthModel()\n\n # get cultivation days per harvest. this may not be used in some plant growth models\n cultivationDaysperHarvest = simulatorClass.getCultivationDaysperHarvest()\n # OPVAreaCoverageRatio = simulatorClass.getOPVAreaCoverageRatio()\n # hasShadingCurtain = simulatorClass.getIfHasShadingCurtain()\n # ShadingCurtainDeployPPFD = simulatorClass.getShadingCurtainDeployPPFD()\n\n # # This unit conversion was done at getSolarIrradianceToPlants\n # # calculate the light intensity to plants\n # # hourly average PPFD [umol m^-2 s^-1]\n # hourlyInnerPPFDToPlants = OPVFilm.calcHourlyInnerLightIntensityPPFD(directPPFDToMultiSpanRoof + diffusePPFDToMultiSpanRoof + groundReflectedPPFDToMultiSpanRoof, \\\n # OPVAreaCoverageRatio, constant.OPVPARTransmittance, hasShadingCurtain,ShadingCurtainDeployPPFD, simulatorClass)\n\n # np.set_printoptions(threshold=np.inf)\n # print \"OPVAreaCoverageRatio:{}, directPPFDToOPV+diffusePPFDToOPV+groundReflectedPPFDToOPV:{}\".format(OPVAreaCoverageRatio, directPPFDToOPV+diffusePPFDToOPV+groundReflectedPPFDToOPV)\n # np.set_printoptions(threshold=1000)\n\n # calculate the daily increase of unit fresh weight\n # this model considers only solar irradiance, and so this will not be so practical\n # the simulated cultivar is butter head lettuce\n if plantGrowthModel == constant.A_J_Both_Modified_TaylorExpantionWithFluctuatingDLI:\n #unit [g/head]\n shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitHarvestedFreshWeight = \\\n Lettuce.calcUnitDailyFreshWeightBoth2003TaylorExpantionWithVaryingDLI(simulatorClass.directPPFDToPlants + simulatorClass.diffusePPFDToPlants, cultivationDaysperHarvest, simulatorClass)\n # print \"shootFreshMassList.shape:{}\".format(shootFreshMassList.shape)\n\n # the simulated cultivar is Berlo and Norden\n elif plantGrowthModel == constant.E_J_VanHenten1994:\n # unit [g/head]\n shootFreshMassList, \\\n unitDailyFreshWeightIncrease, \\\n accumulatedUnitDailyFreshWeightIncrease, \\\n unitHarvestedFreshWeight = \\\n PlantGrowthModelE_J_VanHenten.calcUnitDailyFreshWeightE_J_VanHenten1994(simulatorClass)\n # print(\"shootFreshMassList.shape[0]:{}\".format(shootFreshMassList.shape[0]))\n # print(\"unitDailyFreshWeightIncrease.shape[0]:{}\".format(unitDailyFreshWeightIncrease.shape[0]))\n # print(\"accumulatedUnitDailyFreshWeightIncrease.shape[0]:{}\".format(accumulatedUnitDailyFreshWeightIncrease.shape[0]))\n # print(\"unitHarvestedFreshWeight.shape[0]:{}\".format(unitHarvestedFreshWeight.shape[0]))\n\n # set the data\n simulatorClass.shootFreshMassList = shootFreshMassList\n\n # Be careful! this model returns hourly weight, not daily weight. so convert the hourly value into daily value.\n dailyShootFreshMassList = shootFreshMassList[23::constant.hourperDay]\n # print(\"dailyShootFreshMassList:{}\".format(dailyShootFreshMassList))\n\n # dailyUnitDailyFreshWeightIncrease = np.array(sum[ unitDailyFreshWeightIncrease[constant.hourperDay*(i-1):constant.hourperDay*i]] \\\n # for i in range (0, unitDailyFreshWeightIncrease.shape[0]/constant.hourperDay ))\n dailyUnitDailyFreshWeightIncrease = Lettuce.getFreshWeightIncrease(dailyShootFreshMassList)\n dailyAccumulatedUnitDailyFreshWeightIncrease = Lettuce.getAccumulatedFreshWeightIncrease(dailyShootFreshMassList)\n dailyUnitHarvestedFreshWeight = Lettuce.getHarvestedFreshWeight(dailyShootFreshMassList)\n # print(\"dailyUnitDailyFreshWeightIncrease.shape:{}\".format(dailyUnitDailyFreshWeightIncrease.shape))\n # print(\"dailyAccumulatedUnitDailyFreshWeightIncrease.shape:{}\".format(dailyAccumulatedUnitDailyFreshWeightIncrease.shape))\n # print(\"dailyUnitHarvestedFreshWeight.shape:{}\".format(dailyUnitHarvestedFreshWeight.shape))\n # print(\"dailyUnitHarvestedFreshWeight:{}\".format(dailyUnitHarvestedFreshWeight))\n\n # this model was coded, but the result was not better than constant.E_J_VanHenten1994\n elif plantGrowthModel == constant.S_Pearson1997:\n # unit [g/head]\n dailyShootFreshMassList, \\\n dailyUnitDailyFreshWeightIncrease, \\\n dailyAccumulatedUnitDailyFreshWeightIncrease, \\\n dailyUnitHarvestedFreshWeight = \\\n PlantGrowthModelS_Pearson1997.calcUnitDailyFreshWeightS_Pearson1997(simulatorClass)\n\n else:\n print (\"no valid model name is assigned. Stop the simulation. Please choose a registered one\")\n ####################################################################################################\n # Stop execution here...\n sys.exit()\n # Move the above line to different parts of the assignment as you implement more of the functionality.\n ####################################################################################################\n\n # set the values to the object\n simulatorClass.dailyShootFreshMass = dailyShootFreshMassList\n simulatorClass.dailyUnitDailyFreshWeightIncrease = dailyUnitDailyFreshWeightIncrease\n simulatorClass.dailyAccumulatedUnitDailyFreshWeightIncrease = dailyAccumulatedUnitDailyFreshWeightIncrease\n simulatorClass.dailyUnitHarvestedFreshWeight = dailyUnitHarvestedFreshWeight\n\n return dailyShootFreshMassList, dailyUnitDailyFreshWeightIncrease, dailyAccumulatedUnitDailyFreshWeightIncrease, dailyUnitHarvestedFreshWeight\n\n\ndef getTotalDLIToPlants(OPVAreaCoverageRatio, directPPFDToOPV, diffusePPFDToOPV, groundReflectedPPFDToOPV, hasShadingCurtain, ShadingCurtainDeployPPFD, \\\n cropElectricityYieldSimulator1):\n '''\n the daily light integral to plants for the given simulation period.\n :param OPVAreaCoverageRatio:\n :param directPPFDToOPV:\n :param diffusePPFDToOPV:\n :param groundReflectedPPFDToOPV:\n :param hasShadingCurtain:\n :param ShadingCurtainDeployPPFD:\n :param cropElectricityYieldSimulator1: instance\n :return:\n '''\n\n # calculate the light intensity to plants\n # hourly average PPFD [umol m^-2 s^-1]\n hourlyInnerPPFDToPlants = OPVFilm.calcHourlyInnerLightIntensityPPFD(directPPFDToOPV+diffusePPFDToOPV+groundReflectedPPFDToOPV, \\\n OPVAreaCoverageRatio, constant.OPVPARTransmittance, hasShadingCurtain,ShadingCurtainDeployPPFD, cropElectricityYieldSimulator1)\n\n # convert PPFD to DLI\n innerDLIToPlants = Util.convertFromHourlyPPFDWholeDayToDLI(hourlyInnerPPFDToPlants)\n # print \"innerDLIToPlants:{}\".format(innerDLIToPlants)\n\n return innerDLIToPlants\n\n\ndef penalizeDailyHarvestedFreshWeightPerHead(dailyHarvestedFreshWeightPerHead, simulatorClass):\n '''\n the function was made based on the data of plant fresh weights for 400 600, and 800 PPFD (umiol m^-2, s-1) in the source below:\n Table 1 at \"Effects of different light intensities on anti-oxidative enzyme activity, quality and biomass in lettuce, Weiguo Fu, Pingping Li, Yanyou Wu, Juanjuan Tang\"\n\n The parameters were derived with the solver of Excel 2007, the process is written in \"penalizePlantYieldBySolarRadiation.xlsx\"\n\n Table 1. Quality and biomass of above-ground part of lettuce under different light intensity treatments\n Light intensity (μmol m-2 s-1) Biomass of above-ground part (g plant-1, FW)\n 100 127.98 ± 8.32\n 200 145.65 ± 7.53\n 400 158.45 ± 6.21\n 600 162.89 ± 7.13\n 800 135.56 ± 5.76\n '''\n\n penalizedUnitDailyHarvestedFreshWeight = np.zeros(dailyHarvestedFreshWeightPerHead.shape[0])\n\n # the DLI including both direct and diffuse to plants\n totalDLItoPlants = simulatorClass.totalDLItoPlants\n # print(\"totalDLItoPlants:{}\".format(totalDLItoPlants))\n\n # get the average DLI of each cultivation cycle.\n if simulatorClass.getPlantGrowthModel() == constant.A_J_Both_Modified_TaylorExpantionWithFluctuatingDLI:\n averageDLIonEachCycle = simulatorClass.getAverageDLIonEachCycle()\n else:\n averageDLIonEachCycle = np.zeros(simulatorClass.totalDLItoPlants.shape[0])\n\n nextCultivationStartDay = 0\n for i in range(0, simulatorClass.totalDLItoPlants.shape[0]):\n\n # if the date is not the harvest date, then skip.\n if dailyHarvestedFreshWeightPerHead[i] == 0.0:\n continue\n # Right row, E_J_VanHenten1994 is assumed\n else:\n # calc the average DLI during each cultivation cycle\n averageDLIonEachCycle[i] = np.mean(totalDLItoPlants[nextCultivationStartDay:i + 1])\n print(\"i:{}, averageDLIonEachCycle:{}\".format(i, averageDLIonEachCycle[i]))\n # update lastHarvestDay\n # It was assumed to take 3 days to the next cultivation cycle assuming \"transplanting shock prevented growth during the first 48 h\", and it takes one day for preparation.\n nextCultivationStartDay = i + 3\n\n # print(\"averageDLIonEachCycle:{}\".format(averageDLIonEachCycle))\n\n # parameters, which is derived from the data of the reference\n photoPriod = {\"hour\":14.0}\n optimumLightIntensityDLI = {\"mol m-2 d-1\": 26.61516313}\n # maximumYieldFW = {\"g unit-1\": 164.9777479}\n maximumYieldFW = getPenalizedUnitFreshWeight(optimumLightIntensityDLI[\"mol m-2 d-1\"])\n # print(\"maximumYieldFW:{}\".format(maximumYieldFW))\n # convert PPFD to DLI\n # optimumLightIntensityPPFD = {\"umol m-2 s-1\": 524.1249999}\n # optimumLightIntensityDLI = {\"mol m-2 d-1\": optimumLightIntensityPPFD[\"umol m-2 s-1\"] * constant.secondperMinute * constant.minuteperHour * photoPriod[\"hour\"] / 1000000.0}\n\n i = 0\n while i < dailyHarvestedFreshWeightPerHead.shape[0]:\n\n # if the date is not the harvest date, then skip.\n if dailyHarvestedFreshWeightPerHead[i] == 0.0:\n i += 1\n continue\n else:\n\n print (\"averageDLIonEachCycle:{}\".format(averageDLIonEachCycle[i]))\n print (\"dailyHarvestedFreshWeightPerHead[i]:{}\".format(dailyHarvestedFreshWeightPerHead[i]))\n print(\"getPenalizedUnitFreshWeight(averageDLIonEachCycle[i]):{}, i:{}\".format(getPenalizedUnitFreshWeight(averageDLIonEachCycle[i]), i))\n\n if averageDLIonEachCycle[i] > optimumLightIntensityDLI[\"mol m-2 d-1\"] and getPenalizedUnitFreshWeight(averageDLIonEachCycle[i]) > 0.0:\n # penalize the plant fresh weight\n print (\"penaize the fresh weight, i:{}\".format(i))\n penalizedUnitDailyHarvestedFreshWeight[i] = dailyHarvestedFreshWeightPerHead[i] - dailyHarvestedFreshWeightPerHead[i] / maximumYieldFW[\"g unit-1\"] * (maximumYieldFW[\"g unit-1\"] - getPenalizedUnitFreshWeight(averageDLIonEachCycle[i]))\n\n print(\"penalizedUnitDailyHarvestedFreshWeight[i]:{}\".format(penalizedUnitDailyHarvestedFreshWeight[i]))\n print(\"unitDailyHarvestedFreshWeight[i]:{}\".format(dailyHarvestedFreshWeightPerHead[i]))\n\n # if the penalty is too strong and the weight becomes zero\n elif averageDLIonEachCycle[i] > optimumLightIntensityDLI[\"mol m-2 d-1\"] and getPenalizedUnitFreshWeight(averageDLIonEachCycle[i]) <= 0.0:\n print (\"the light intensity may be too strong. The yield was penalized to zero\")\n penalizedUnitDailyHarvestedFreshWeight[i] = 0.0\n\n # if no penalization occured\n else:\n penalizedUnitDailyHarvestedFreshWeight[i] = dailyHarvestedFreshWeightPerHead[i]\n i += 1\n\n return penalizedUnitDailyHarvestedFreshWeight\n\n\ndef getPenalizedUnitFreshWeight(lightIntensityDLI):\n '''\n The following parameters were derived from the soruce mentioned at penalizeDailyHarvestedFreshWeightPerHead\n\n '''\n a = -0.1563\n b = 8.3199\n c = 54.26\n return a * lightIntensityDLI**2 + b * lightIntensityDLI + c\n\n\ndef getWholeElectricityYieldEachOPVRatio(OPVAreaCoverageRatio, dailyJopvout, cropElectricityYieldSimulator1, greenhouseRoofArea = None):\n '''\n return the total electricity yield for a given period by the given OPV area(OPVAreaCoverageRatio * constant.greenhouseRoofArea)\n :param OPVAreaCoverageRatio: [-] proportionOPVAreaCoverageRatio\n :param dailyJopvout: [J/m^2] per day\n :return: dailyJopvout [J/m^2] by whole OPV area\n '''\n\n # get the OPV coverage ratio changing during the fallow period\n unfixedOPVCoverageRatio = OPVFilm.getDifferentOPVCoverageRatioInSummerPeriod(OPVAreaCoverageRatio, cropElectricityYieldSimulator1)\n # change the num of list from hourly data (365 * 24) to daily data (365)\n unfixedOPVCoverageRatio = unfixedOPVCoverageRatio[::24]\n\n if greenhouseRoofArea is None:\n return sum(dailyJopvout * unfixedOPVCoverageRatio * constant.greenhouseRoofArea)\n else:\n return sum(dailyJopvout * unfixedOPVCoverageRatio * greenhouseRoofArea)\n # # print \"dailyJopvout:{}\".format(dailyJopvout)\n # totalJopvout = sum(dailyJopvout)\n # if greenhouseRoofArea is None:\n # return totalJopvout * unfixedOPVCoverageRatio * constant.greenhouseRoofArea\n # else:\n # return totalJopvout * unfixedOPVCoverageRatio * greenhouseRoofArea\n\n\ndef getMonthlyElectricitySalesperArea(dailyJopvoutperArea, yearOfeachDay, monthOfeachDay, simulatorClass):\n '''\n\n :param dailyJopvoutperArea:\n :param yearOfeachDay:\n :param monthOfeachDay:\n :return:\n '''\n # unit: J/m^2/month\n monthlyElectricityYieldperArea = OPVFilm.getMonthlyElectricityProductionFromDailyData(dailyJopvoutperArea, yearOfeachDay, monthOfeachDay)\n # print(\"monthlyElectricityYieldperArea:{}\".format(monthlyElectricityYieldperArea))\n\n # import the electricity sales price file: source (download the CSV file): https://www.eia.gov/electricity/data/browser/#/topic/7?agg=0,1&geo=0000000001&endsec=vg&freq=M&start=200101&end=201802&ctype=linechart&ltype=pin&rtype=s&maptype=0&rse=0&pin=\n fileName = constant.averageRetailPriceOfElectricityMonthly\n # import the file removing the header\n fileData = Util.readData(fileName, relativePath=\"\", skip_header=1, d='\\t')\n # print (\"fileData:{}\".format(fileData))\n\n simulatorClass.monthlyElectricityRetailPrice = fileData\n\n # print \"monthlyElectricityYieldperArea.shape[0]:{}\".format(monthlyElectricityYieldperArea.shape[0])\n # year = np.zeros(monthlyElectricityYieldperArea.shape[0])\n # month = np.zeros(monthlyElectricityYieldperArea.shape[0])\n monthlyResidentialElectricityPrice = np.zeros(monthlyElectricityYieldperArea.shape[0])\n\n index = 0\n for monthlyData in fileData:\n # exclude the data out of the set start month and end month\n # print(\"monthlyData:{}\".format(monthlyData))\n if datetime.date(int(monthlyData[1]), int(monthlyData[0]), 1) + relativedelta(months=1) <= Util.getStartDateDateType() or \\\n datetime.date(int(monthlyData[1]), int(monthlyData[0]), 1) > Util.getEndDateDateType():\n continue\n\n # year[index] = monthlyData[1]\n # month[index] = monthlyData[0]\n # take the residential electricity retail price\n monthlyResidentialElectricityPrice[index] = monthlyData[2]\n\n # print \"monthlyData:{}\".format(monthlyData)\n index += 1\n\n # print(\"monthlyResidentialElectricityPrice[Cents/kwh]:{}\".format(monthlyResidentialElectricityPrice))\n\n # unit exchange: [J/m^2] -> [wh/m^2]\n monthlyWhopvoutperArea =Util.convertFromJouleToWattHour(monthlyElectricityYieldperArea)\n # unit exchange: [wh/m^2] -> [kwh/m^2]\n monthlyKWhopvoutperArea =Util.convertWhTokWh(monthlyWhopvoutperArea)\n # print(\"monthlyKWhopvoutperArea[kwh/m^2]:{}\".format(monthlyKWhopvoutperArea))\n # [USD/month/m^2]\n monthlyElectricitySalesperArea = OPVFilm.getMonthlyElectricitySalesperArea(monthlyKWhopvoutperArea, monthlyResidentialElectricityPrice)\n # print \"monthlyElectricitySalesperArea:{}\".format(monthlyElectricitySalesperArea)\n\n return monthlyElectricitySalesperArea\n\n\ndef getMonthlyElectricitySales(OPVCoverage, monthlyElectricitySalesperArea, greenhouseRoofArea = None):\n '''\n return the monthly electricity saled given a cetain OPV coverage ratio\n\n :param OPVCoverageList:\n :param monthlyElectricitySalesperArea:\n :return:\n '''\n if greenhouseRoofArea is None:\n return monthlyElectricitySalesperArea * OPVCoverage * constant.greenhouseRoofArea\n else:\n return monthlyElectricitySalesperArea * OPVCoverage * greenhouseRoofArea\n\ndef getElectricitySalesperAreaEachOPVRatio():\n return 0\n\ndef getElectricityCostperArea():\n return 0\n\n\ndef getPlantSalesperSquareMeter(simulatorClass):\n \"\"\"\n calculate the sales price of lettuce per square meter.\n The referred price is Lettuce, romaine, per lb. (453.6 gm) in U.S ( Northeast region: Connecticut, Maine, Massachusetts, New Hampshire, New Jersey, New York, Pennsylvania, Rhode Island, and Vermont.), city average, average price, not seasonally adjusted\n reference URL: https://data.bls.gov/timeseries/APU0000FL2101?amp%253bdata_tool=XGtable&output_view=data&include_graphs=true\n \"\"\"\n\n # get the following data from the object\n totalDLIToPlants = simulatorClass.totalDLItoPlants\n\n #################### this conversion is not used any more ####################\n # # the price of lettuce per hundredweight [cwt]\n # priceperCwtEachHour = Lettuce.getLettucePricepercwt(year)\n # # unit conversion: cwt -> kg\n # priceperKgEachHour = priceperCwtEachHour / constant.kgpercwt * constant.plantPriceDiscountRatio_justForSimulation\n # # print \"harvestedFreshWeightListperAreaKg:{}\".format(harvestedFreshWeightListperAreaKg)\n # # print \"dailyHarvestedFreshWeightListperAreaKg.shape:{}\".format(dailyHarvestedFreshWeightListperAreaKg.shape)\n # # print \"priceperKg:{}\".format(priceperKg)\n # # convert the price each hour to the price each day\n # priceperKgEachDay = priceperKgEachHour[::24]\n # # print \"priceperKgEachDay:{}\".format(priceperKgEachDay)\n # # print \"priceperKgEachDay.shape:{}\".format(priceperKgEachDay.shape)\n #################################################################################0\n # get the retail price of lettuce harvested at each cycle\n # unit: USD/m^2/day\n plantSalesPerSquareMeter = Lettuce.getRetailPricePerArea(simulatorClass)\n # print (\"plantSalesPerSquareMeter:{}\".format(plantSalesPerSquareMeter))\n\n if constant.IfConsiderDiscountByTipburn == True:\n # Tipburn discount\n # TODO: need to refine more\n plantSalesperSquareMeter = Lettuce.discountPlantSalesperSquareMeterByTipburn(plantSalesPerSquareMeter, totalDLIToPlants)\n\n return plantSalesPerSquareMeter\n\ndef getGreenhouseOperationCostForGrowingPlants(simulatorClass):\n '''\n This function estimates the economic cost for cooling and heating a greenhouse by simulating the energy balance model of a greenhouse.\n '''\n\n # get environment data to calculate the energy for cooling and heating\n\n # the energy for cooling and heating\n energyBalance.getGHEnergyConsumptionByCoolingHeating(simulatorClass)\n # unit: W\n Q_vW = simulatorClass.Q_vW\n # ############command to print out all array data\n # np.set_printoptions(threshold=np.inf)\n # print(\"Q_vW:{}\".format(Q_vW))\n # np.set_printoptions(threshold=1000)\n # ############\n\n # if the energy balance is minus, we need to heat to maintain the internal temperature. [W m]\n requiredHeatingEnergyForPlants = np.array([-Q_vW[\"coolingOrHeatingEnergy W\"][i] if Q_vW[\"coolingOrHeatingEnergy W\"][i] < 0.0 else 0.0 for i in range (Q_vW[\"coolingOrHeatingEnergy W\"].shape[0])])\n # if the energy balance is plus, we need to cool to maintain the internal temperature. [W m]\n requiredCoolingEnergyForPlants = np.array([Q_vW[\"coolingOrHeatingEnergy W\"][i] if Q_vW[\"coolingOrHeatingEnergy W\"][i] > 0.0 else 0.0 for i in range (Q_vW[\"coolingOrHeatingEnergy W\"].shape[0])])\n\n\n # ############command to print out all array data\n # np.set_printoptions(threshold=np.inf)\n # print(\"requiredCoolingEnergyForPlants:{}\".format(requiredCoolingEnergyForPlants))\n # np.set_printoptions(threshold=1000)\n # ############\n\n # unit: USD\n totalHeatingCostForPlants = energyBalance.getGHHeatingEnergyCostForPlants(requiredHeatingEnergyForPlants, simulatorClass)\n totalCoolingCostForPlants = energyBalance.getGHCoolingEnergyCostForPlants(requiredCoolingEnergyForPlants, simulatorClass)\n simulatorClass.totalHeatingCostForPlants = totalHeatingCostForPlants\n simulatorClass.totalCoolingCostForPlants = totalCoolingCostForPlants\n\n # unit: USD m-2\n totalHeatingCostForPlantsPerGHFloorArea = totalHeatingCostForPlants / constant.greenhouseFloorArea\n totalCoolingCostForPlantsPerGHFloorArea = totalCoolingCostForPlants / constant.greenhouseFloorArea\n simulatorClass.totalHeatingCostForPlantsPerGHFloorArea = totalHeatingCostForPlantsPerGHFloorArea\n simulatorClass.totalCoolingCostForPlantsPerGHFloorArea = totalCoolingCostForPlantsPerGHFloorArea\n\n\n return totalHeatingCostForPlants, totalCoolingCostForPlants\n\n\ndef getLaborCost(simulatorClass):\n \"\"\"\n get the total labor cost during the simulation period\n :return:\n \"\"\"\n\n harvestedShootFreshMassPerAreaKgPerDay = simulatorClass.harvestedShootFreshMassPerAreaKgPerDay\n # unit:kg\n totalHarvestedShootFreshMass = sum(harvestedShootFreshMassPerAreaKgPerDay) * constant.greenhouseCultivationFloorArea\n # print(\"totalHarvestedShootFreshMass:{}\".format(totalHarvestedShootFreshMass))\n\n # source: https://onlinelibrary.wiley.com/doi/abs/10.1111/cjag.12161\n # unit: [labors/10000 kg yield]\n necessaryLaborPer10000kgYield = constant.necessaryLaborPer10000kgYield\n\n # source:https://www.bls.gov/regions/west/news-release/occupationalemploymentandwages_tucson.htm\n # unit:USD/labor/hour\n hourlyWagePerPerson = constant.hourlyWagePerPerson\n\n # unit:hour/day\n workingHourPerDay = constant.workingHourPerDay\n\n totalLaborCost = (totalHarvestedShootFreshMass / 10000.0) * necessaryLaborPer10000kgYield * workingHourPerDay * hourlyWagePerPerson * Util.getSimulationDaysInt()\n # print(\"totalLaborCost:{}\".format(totalLaborCost))\n\n return totalLaborCost\n\n\ndef getPlantCostperSquareMeter(simulationDays):\n '''\n calculate the cost for plant cultivation for given period\n :param year:\n :return:\n '''\n # [USD/m^2]\n return constant.plantcostperSquaremeterperYear * simulationDays / constant.dayperYear\n\n\n\n\n\n\n################################################# old code below################################\n\ndef calcOptimizedOPVAreaMaximizingtotalEconomicProfit(OPVAreaVector, totalEconomicProfitperYearVector):\n '''\n determine the best OPVArea maximizing the economic profit\n param:\n OPVAreaVector\n totalEconomicProfitperYearVector\n return:\n none\n '''\n maxtotalEconomicProfitperYear = np.max(totalEconomicProfitperYearVector)\n bestOPVArea = OPVAreaVector[np.argmax(totalEconomicProfitperYearVector)]\n print (\"The OPV area maximizing the economic profit is {}m^2 the max economic profit is {}USD/year \".format(bestOPVArea, maxtotalEconomicProfitperYear))\n\n\ndef trainWeightsRLShadingCurtainDayStep(hasShadingCurtain, qLearningAgentsShadingCurtain=None, cropElectricityYieldSimulator1 = None):\n '''\n\n :param hasShadingCurtain:\n :param cropElectricityYieldSimulator1:\n :return:\n '''\n\n if hasShadingCurtain:\n\n # # set values necessary for RL training/testing\n # # for dLIEachdayThroughInnerStructure on a certain day\n # hourlyInnerLightIntensityPPFDThroughInnerStructure = cropElectricityYieldSimulator1.getHourlyInnerLightIntensityPPFDThroughInnerStructure()\n # # set dLIThroughInnerStructure to the object\n # dLIThroughInnerStructure = Util.convertFromHourlyPPFDWholeDayToDLI(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n # qLearningAgentsShadingCurtain.setDLIThroughInnerStructure(dLIThroughInnerStructure)\n\n print (\"training parameters: epsilon={}, gamma={}, alpha={}, period:{}\".format(\\\n qLearningAgentsShadingCurtain.epsilon, qLearningAgentsShadingCurtain.gamma, qLearningAgentsShadingCurtain.alpha, constant.SimulationStartDate + \"-\" + constant.SimulationEndDate))\n for trainingIteration in range (0, qLearningAgentsShadingCurtain.numTraining):\n\n if trainingIteration % 100 == 0:\n # print(\"Iteration checkpoint: datetime.datetime.now():{}. trainingIteration:{}\".format(datetime.datetime.now(), trainingIteration ))\n print(\"trainingIteration: {}, qLearningAgentsShadingCurtain.weights:{}, datetime.datetime.now():{}\".format(\\\n trainingIteration, qLearningAgentsShadingCurtain.weights, datetime.datetime.now()))\n\n # training the q value function\n for day in range (0, Util.getSimulationDaysInt()):\n\n state = day\n\n #########################################################################\n ############# set values necessary for RL training features##############\n #########################################################################\n # set day to the instance\n qLearningAgentsShadingCurtain.setDay(day)\n # dLIEachdayThroughInnerStructure on a certain day, necessary to cal DLI to PLants\n # qLearningAgentsShadingCurtain.setDLIEachDayThroughInnerStructure(dLIThroughInnerStructure[state])\n\n #set num of days from Jan 1st.\n daysFromJan1st = Util.getNumOfDaysFromJan1st(Util.getStartDateDateType() + datetime.timedelta(days=day))\n # date on a certain day\n qLearningAgentsShadingCurtain.setDaysFromJan1st(daysFromJan1st)\n\n # action = \"openCurtain\" or \"closeCurtain\"\n # if the state is at the terminal state, action is None.\n action = qLearningAgentsShadingCurtain.getAction(state)\n\n # if the q value is not initialized, initialize the q value. if initialized, just get the q value given state and action\n # state = qlearningAgentsShadingCurtain.getQValue(day, action)\n approximateQvalue = qLearningAgentsShadingCurtain.getApproximateQValue(state, action)\n # print (\"approximateQvalue:{}\".format(approximateQvalue))\n # set approximateQvalue to Q\n qLearningAgentsShadingCurtain.setApproximateQValue(approximateQvalue, state, action)\n\n\n # approximatedQvalueNextState = []\n # for action in qLearningAgentsShadingCurtain.getLegalActions(day):\n # approximatedQvalueNextState.append(qLearningAgentsShadingCurtain.getApproximateQValue(day + 1, action))\n # approximateMaxQvalueNextState = max[approximatedQvalueNextState]\n\n # get the maximum q value in the next state\n if (state+1) == Util.getSimulationDaysInt():\n approximateMaxQvalueNextState = 0.0\n else:\n approximateMaxQvalueNextState = qLearningAgentsShadingCurtain.getApproximateValue(state + 1)\n\n # calc the difference between the current q value and the maximum q value in the next state, which is used for updating weights\n\n difference = (qLearningAgentsShadingCurtain.getReward(day) + approximateMaxQvalueNextState) - approximateQvalue\n # print (\"qLearningAgentsShadingCurtain.getReward(day):{}\".format(qLearningAgentsShadingCurtain.getReward(day)))\n # print (\"approximateMaxQvalueNextState:{}\".format(approximateMaxQvalueNextState))\n # print (\"approximateQvalue:{}\".format(approximateQvalue))\n # print (\"difference:{}\".format(difference))\n\n # update weight of the q learning function\n qLearningAgentsShadingCurtain.updateApproximateWeight(difference)\n\n # print (\"qLearningAgentsShadingCurtain.weights:{}\".format(qLearningAgentsShadingCurtain.weights))\n # print (\"check trainingIteration:{}\".format(trainingIteration))\n\n # print (\"qLearningAgentsShadingCurtain.weights:{}\".format(qLearningAgentsShadingCurtain.weights))\n print (\"qLearningAgentsShadingCurtain.approximateQ:{}\".format(qLearningAgentsShadingCurtain.approximateQ))\n\n return qLearningAgentsShadingCurtain\n # ####################################################################################################\n # Stop execution here...\n # sys.exit()\n # Move the above line to different parts of the assignment as you implement more of the functionality.\n # ####################################################################################################\n\ndef testWeightsRLShadingCurtainDayStep(hasShadingCurtain, qLearningAgentsShadingCurtain = None, cropElectricityYieldSimulator1=None):\n\n numTesting = qLearningAgentsShadingCurtain.numTesting\n\n if hasShadingCurtain:\n\n # change the exploration rate into zero because in testing, RL does not explore\n qLearningAgentsShadingCurtain.epsilon = 0.0\n # array to store the sales price at each iteration\n plantSalesperSquareMeterList = np.zeros(numTesting)\n\n for testingIteration in range(0, numTesting):\n\n # get values necessary for RL training, which was done at\n # hourlyInnerLightIntensityPPFDThroughInnerStructure = cropElectricityYieldSimulator1.getHourlyInnerLightIntensityPPFDThroughInnerStructure()\n # dLIThroughInnerStructure = Util.convertFromHourlyPPFDWholeDayToDLI(hourlyInnerLightIntensityPPFDThroughInnerStructure)\n # set dLIThroughInnerStructure to the object\n # qLearningAgentsShadingCurtain.setDLIThroughInnerStructure(dLIThroughInnerStructure)\n\n print(\"testingIteration: {}, qLearningAgentsShadingCurtain.weights:{}, datetime.datetime.now():{}, period:{}\".format( \\\n testingIteration, qLearningAgentsShadingCurtain.weights, datetime.datetime.now(), constant.SimulationStartDate + \"-\" + constant.SimulationEndDate ))\n\n # training the q value function\n for day in range(0, Util.getSimulationDaysInt()):\n\n state = day\n #########################################################################\n ############# set values necessary for RL training features##############\n #########################################################################\n # set day to the instance\n qLearningAgentsShadingCurtain.setDay(day)\n # dLIEachdayThroughInnerStructure on a certain day, necessary to cal DLI to PLants\n # qLearningAgentsShadingCurtain.setDLIEachDayThroughInnerStructure(dLIThroughInnerStructure[state])\n\n # set num of days from Jan 1st.\n daysFromJan1st = Util.getNumOfDaysFromJan1st(Util.getStartDateDateType() + datetime.timedelta(days=day))\n # date on a certain day\n qLearningAgentsShadingCurtain.setDaysFromJan1st(daysFromJan1st)\n\n # action = \"openCurtain\" or \"closeCurtain\"\n # if the state is at the terminal state, action is None.\n action = qLearningAgentsShadingCurtain.getPolicy(state)\n # store the action at each state at tuples in list for a record.\n qLearningAgentsShadingCurtain.policies[state] = action\n\n ################## calculate the daily plant yield start#####################\n\n #### calc the DLI on a certain state\n dLIEachDayThroughInnerStructure = qLearningAgentsShadingCurtain.getDLIThroughInnerStructureElement(state)\n\n dLIEachDayToPlants = 0.0\n if action == constant.openCurtainString:\n dLIEachDayToPlants = dLIEachDayThroughInnerStructure\n elif action == constant.closeCurtainString:\n dLIEachDayToPlants = dLIEachDayThroughInnerStructure * constant.shadingTransmittanceRatio\n\n #store the DLI ateach state by list for a record. since the sequence is important, not use a dictionary.\n qLearningAgentsShadingCurtain.dLIEachDayToPlants[day] = dLIEachDayToPlants\n\n ###### calc plant weight increase with a certain DLI\n # num of days from the latest seeding\n daysFromSeeding = state % constant.cultivationDaysperHarvest\n\n # if the calc method is A.J Both 2003 model\n if qLearningAgentsShadingCurtain.cropElectricityYieldSimulator1.getPlantGrowthModel() == constant.TaylorExpantionWithFluctuatingDLI:\n # daily [g/unit]\n unitDailyFreshWeightIncreaseElement = \\\n Lettuce.calcUnitDailyFreshWeightIncreaseBoth2003Taylor(dLIEachDayToPlants, constant.cultivationDaysperHarvest, daysFromSeeding)\n # update the values to the instance\n qLearningAgentsShadingCurtain.setUnitDailyFreshWeightIncreaseElementShadingCurtain(unitDailyFreshWeightIncreaseElement, state)\n # print (\"1 unitDailyFreshWeightIncrease [g/unit]:{}, state:{}\".format(unitDailyFreshWeightIncreaseElement, state))\n\n else:\n print (\"[test] error: feasture w_2 not considered. choosing un-existing plant growth model\")\n ################## calculate the daily plant yield end#####################\n\n ################## calculate the total plant sales start#####################\n print (\"DLI to plants at each day [mol/m^2/m^2]\".format(qLearningAgentsShadingCurtain.dLIEachDayToPlants))\n\n unitPlantWeight = qLearningAgentsShadingCurtain.getUnitDailyFreshWeightIncreaseListShadingCurtain()\n print (\"unitPlantWeight [g/unit]:{}\".format(unitPlantWeight))\n totalUnitPlantWeight = sum(unitPlantWeight)\n\n\n # unit conversion; get the daily plant yield per given period per area: [g/unit] -> [g/m^2]\n unitPlantWeightperArea = Util.convertUnitShootFreshMassToShootFreshMassperArea(unitPlantWeight)\n # unit conversion: [g/m^2] -> [kg/m^2]1\n unitPlantWeightperAreaKg = Util.convertFromgramTokilogram(unitPlantWeightperArea)\n\n # get the sales price of plant [USD/m^2]\n # if the average DLI during each harvest term is more than 17 mol/m^2/day, discount the price\n dailyPlantSalesperSquareMeter = getPlantSalesperSquareMeter(\\\n cropElectricityYieldSimulator1.getYear(), unitPlantWeightperAreaKg, qLearningAgentsShadingCurtain.dLIEachDayToPlants)\n plantSalesperSquareMeter = sum(dailyPlantSalesperSquareMeter)\n plantSalesperSquareMeterList[testingIteration] = plantSalesperSquareMeter\n # print \"dailyPlantSalesperSquareMeter.shape:{}\".format(dailyPlantSalesperSquareMeter.shape)\n\n print (\"plantSalesperSquareMeterList[{}]:{}\".format(testingIteration, plantSalesperSquareMeterList))\n\n ################## calculate the total plant sakes end#####################\n\n else:\n print (\"shading curtain assumed not to be given. the function without shading curtain will be made in the future\")\n\n # return the average of testing results\n return plantSalesperSquareMeter\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5610317587852478, "alphanum_fraction": 0.6347305178642273, "avg_line_length": 32.90625, "blob_id": "7f59fe3f3f1d27a24db9cc84c528848cca2f3e8e", "content_id": "31b785d7dedfbb216151639c9655e434a70d9893", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2172, "license_type": "permissive", "max_line_length": 103, "num_lines": 64, "path": "/PlantGrowthModelE_J_VanHentenConstant.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 15 Jun 2017\n# last edit date: 15 Jun 2017\n#######################################################\n\n# convert ratio of CO2 into SUgar (CH2O)\nc_alpha = 30.0 / 44.0\n# respiratory and syntehsis losses of non-structural material due to growth\nc_beta = 0.8\n# the saturation growth rate at 20[C] [/s]\nc_gr_max = {'s-1' :5.0 * 10**(-6)}\n\nc_gamma = 1.0\n# Q10 factor for growth\nc_Q10_gr = 1.6\n# the maintenance respiration coefficient for shoot at 25[C]\nc_resp_sht = {'s-1' : 3.47 * 10**(-7)}\n# the maintenance respiration coefficient for root at 25[C]\nc_resp_rt = {'s-1' : 1.16 * 10**(-7)}\n# Q10 factor of the maintenance respiration\nc_Q10_resp = 2.0\n# the ratio of the root dry weight to the total crop dry weight\nc_tau = 0.15\n# extinction coefficient\n# c_K = 0.9\n# extinction coefficient for 25 heads/m^2 density\nc_K = 1.0\n# structural leaf area ratio\nc_lar = {'m2 g-2' : 75 * 10**(-3)}\n# density of CO2\nc_omega = {'g m-3' : 1.83 * 10**(-3) }\n# the CO2 compensation point at 20[C]\nc_upperCaseGamma = {'ppm': 40.0}\n# the Q10 value which account for the effect of temperature on upperCaseGamma (Γ)\nc_Q10_upperCaseGamma = 2.0\n# light use efficiency at very high CO2 concentrations\nc_epsilon = {'g J-1' : 17.0 * 10 ** (-6)}\n# the boundary layer conductance of lettuce leaves\ng_bnd = {'m s-1' : 0.007}\n# the stomatal resistance\ng_stm = {'m s-1': 0.005 }\n# parameters for the carboxylation conductance\nc_car1 = -1.32 * 10**(-5)\nc_car2 = 5.94 * 10**(-4)\nc_car3 = -2.64 * 10**(-3)\n\ncanopyTemp = {'celsius': 17.5 }\n# canopyTemp = {'celsius': 40 }\n# canopyTemp = {'celsius': 5 }\n\ncarboxilationConductatnce = c_car1 * canopyTemp['celsius']**2 + c_car1 * canopyTemp['celsius'] + c_car3\n# print (carboxilationConductatnce)\n\n\ncanopyTemp = {'celsius': 40 }\ncarboxilationConductatnce = c_car1 * canopyTemp['celsius']**2 + c_car1 * canopyTemp['celsius'] + c_car3\n# print (carboxilationConductatnce)\n\n\ncanopyTemp = {'celsius': 5 }\ncarboxilationConductatnce = c_car1 * canopyTemp['celsius']**2 + c_car1 * canopyTemp['celsius'] + c_car3\n# print (carboxilationConductatnce)\n\n" }, { "alpha_fraction": 0.6578376293182373, "alphanum_fraction": 0.7134172916412354, "avg_line_length": 40.83636474609375, "blob_id": "ed0f0b3af5725e9384be0514d863f539cf36d8ec", "content_id": "756afa429519b736e263291417ca5c1f957f3fe4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "permissive", "max_line_length": 216, "num_lines": 55, "path": "/PlantGrowthModelS_Pearson1997Constant.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n##########import package files##########\nfrom scipy import stats\nimport sys\nimport datetime\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util\n#######################################################\n\n# reference of the model: https://www.tandfonline.com/doi/abs/10.1080/14620316.1997.11515538\n# \tA validated model to predict the effects of environment on the growth of lettuce (Lactuca sativa L.): Implications for climate change\n\n# Optimum temperature for conversion of storage to structural dry weight [Celsius]\nT_OS = 30.0\n# Partitioning coefficient of storage to structural dry weight [g / (g * day * Celsius)]\n\n#TODO there is no source about the value k. In my model, this was changed in my own way so that the model exibits an appropriate transition of head weight. So, need to validate it.\n# k = 6.9 * 10.0**(-2)\nk = 6.9 * 10.0**(-2) / 4.0\n# Factor to convert CO 2 to plant dry weight [-]\npsi = 30.0/44.0\n# Distance between plants [m]\nh = 0.2\n# Leaf area ratio [1/(m^2 * kg)]\nF_G = 75.0\n# Leaf light utilization efficiency [kg(CO_2) / J ]\nalpha_m = 14.0 * 10**(-9)\n# Photo-respiration constant [kg(CO_2) / (m^2 * s)]\nbeta = 1.0 * 10**(-7)\n# Leaf conductance\ntau = 0.002\n# Thermal time for the cessation of photosynthesis [Celsius * d]\ntheta_m = 1600.0\n# Optimum temperature for photosynthesis [Celsius]\nT_op = 25.0\n# Rate constant for the effect of temperature on photosynthesis [1/Celcius]\nphi = 0.02\n# Respiration rate constant [g(W_S)/g(W_G)]\nR_G = 0.3\n# Ontogenetic respiration rate constant [-]\ngamma = 3.0\n# Rate constant for the effect of temperature on respiration [1/Celsius]\nepsilon = 0.03\n\n\n# optimal temperature [Celusius]\n# reference: Pearson, S. Hadley, P. Wheldon, A.E. (1993), \"A reanalysis of the effects of temperature and irradiance on time to flowering in chrysanthemum (Dendranthema grandiflora)\"\n# https://scholar.google.com/citations?user=_xeFP80AAAAJ&hl=en#d=gs_md_cita-d&p=&u=%2Fcitations%3Fview_op%3Dview_citation%26hl%3Den%26user%3D_xeFP80AAAAJ%26citation_for_view%3D_xeFP80AAAAJ%3Au-x6o8ySG0sC%26tzom%3D420\n# Effective temperature is the sub-optimum temperature equivalent of a supra-optimum temperature in terms of developmental rate.\nT_o = T_op\n\n\n" }, { "alpha_fraction": 0.7890337705612183, "alphanum_fraction": 0.789862871170044, "avg_line_length": 39.496368408203125, "blob_id": "0f4428d00f3845ed52e6bbf571d3d270c6c05aef", "content_id": "bed2a282163a96209b4a09cbc43085dfdde28533", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44628, "license_type": "permissive", "max_line_length": 124, "num_lines": 1102, "path": "/SimulatorClass.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "import CropElectricityYeildSimulatorConstant as constant\n\nclass SimulatorClass:\n\n # constructor\n def __init__(self):\n\n self._OPVAreaCoverageRatio = constant.OPVAreaCoverageRatio\n self._OPVCoverageRatioSummerPeriod = constant.OPVAreaCoverageRatioSummerPeriod\n self._OPVCoverageRatiosConsiderSummerRatio = None\n self._plantGrowthModel = constant.plantGrowthModel\n self._shootFreshMassList = None\n self._cultivationDaysperHarvest = constant.cultivationDaysperHarvest\n self._hasShadingCurtain = constant.hasShadingCurtain\n self._shadingCurtainDeployPPFD = constant.plantGrowthModel\n self._profitVSOPVCoverageData = None\n self._monthlyElectricitySalesperArea = None\n self._monthlyElectricitySalesperAreaEastRoof = None\n self._monthlyElectricitySalesperAreaWestRoof = None\n self._totalElectricitySalesPerAreaPerMonth = None\n self._totalElectricitySalesPerMonth = None\n self._totalElectricitySales = None\n self._oPVCostUSDForDepreciationPerOPVArea = None\n self._totalOPVCostUSDForDepreciation = None\n self._totalOPVCostUSDForDepreciationPerGHFloorArea = None\n self._electricityProductionProfit = None\n self._electricityProductionProfitPerGHFloorArea = None\n self._hourlyInnerLightIntensityPPFDThroughGlazing = None\n self._hourlyInnerLightIntensityPPFDThroughInnerStructure = None\n self._directPPFDToOPVEastDirection = None\n self._directPPFDToOPVWestDirection = None\n self._diffusePPFDToOPV = None\n self._groundReflectedPPFDToOPV = None\n # self._totalDLItoPlantsBaselineShadingCuratin = None\n self._directDLIToOPVEastDirection = None\n self._directDLIToOPVWestDirection = None\n self._diffuseDLIToOPV = None\n self._groundReflectedDLIToOPV = None\n self._hourlyDirectSolarRadiationAfterMultiSpanRoof = None\n self._hourlyDiffuseSolarRadiationAfterMultiSpanRoof = None\n self._groundReflectedRadiationAfterMultiSpanRoof = None\n self._hourlyDirectPPFDAfterMultiSpanRoof = None\n self._hourlyDiffusePPFDAfterMultiSpanRoof = None\n self._groundReflectedPPFDAfterMultiSpanRoof = None\n self._shootFreshMassList = None\n self._unitDailyFreshWeightIncrease = None\n self._accumulatedUnitDailyFreshWeightIncrease = None\n self._unitDailyHarvestedFreshWeight = None\n self._totalPlantSalesperSquareMeter = None\n self._totalPlantSales = None\n self._totalPlantSalesPerGHFloorArea = None\n self._Q_v = {\"coolingOrHeatingEnergy W m-2\": None}\n self._Q_sr = {\"solarIrradianceToPlants W m-2\": None}\n self._Q_lh = {\"sensibleHeatFromConductionAndConvection W m-2\": None}\n self._Q_sh = {\"latentHeatByTranspiration W m-2\": None}\n self._Q_lw = {\"longWaveRadiation W m-2\": None}\n self._Q_vW = {\"coolingOrHeatingEnergy W\": None}\n self._Q_srW = {\"solarIrradianceToPlants W\": None}\n self._Q_lhW = {\"sensibleHeatFromConductionAndConvection W\": None}\n self._Q_shW = {\"latentHeatByTranspiration W\": None}\n self._Q_lwW = {\"longWaveRadiation W\": None}\n self._monthlyRequiredGHHeatingEnergyForPlants = None\n self._totalHeatingCostForPlants = None\n\n self._totalCoolingCostForPlants = None\n\n self._totalLaborCost = None\n self._totalLaborCostPerGHFloorArea = None\n self._totalPlantProductionCost = None\n self._totalPlantProductionCostPerGHFloorArea = None\n self._totalPlantProfit = None\n self._totalPlantProfitPerGHFloorArea = None\n self._economicProfit = None\n self._economicProfitPerGHFloorArea = None\n self._averageDLIonEachCycle = None\n self._year = None\n self._month = None\n self._day = None\n self._hour = None\n self._importedHourlyHorizontalDirectSolarRadiation = None\n self._importedHourlyHorizontalDiffuseSolarRadiation = None\n self._importedHourlyHorizontalTotalBeamMeterBodyTemperature = None\n self._importedHourlyAirTemperature = None\n self._hourlyRelativeHumidity = None\n self._directSolarRadiationToOPVEastDirection = None\n self._directSolarRadiationToOPVWestDirection = None\n self._diffuseSolarRadiationToOPV = None\n self._albedoSolarRadiationToOPV = None\n self._estimatedDirectSolarRadiationToOPVEastDirection = None\n self._estimatedDirectSolarRadiationToOPVWestDirection = None\n self._estimatedDiffuseSolarRadiationToOPV = None\n self._estimatedAlbedoSolarRadiationToOPV = None\n self._hourlySolarIncidenceAngleEastDirection = None\n self._hourlySolarIncidenceAngleWestDirection = None\n self._directSolarIrradianceBeforeShadingCurtain = None\n self._diffuseSolarIrradianceBeforeShadingCurtain = None\n self._directSolarIrradianceToPlants = None\n self._diffuseSolarIrradianceToPlants = None\n self._transmittanceThroughShadingCurtainChangingEachMonth = None\n self._directPPFDToPlants = None\n self._diffusePPFDToPlants = None\n self._directDLIToPlants = None\n self._diffuseDLIToPlants = None\n self._totalDLItoPlants = None\n self._hourlyDayOrNightFlag = None\n self._hourlyHorizontalDiffuseOuterSolarIrradiance = None\n self._hourlyHorizontalTotalOuterSolarIrradiance = None\n self._hourlyHorizontalDirectOuterSolarIrradiance = None\n self._hourlyHorizontalTotalBeamMeterBodyTemperature = None\n self._hourlyAirTemperature = None\n # self._ifGrowForSummerPeriod = False\n self._ifGrowForSummerPeriod = None\n\n # if you want to calculate the estimated data which does not require the measured data, set this variable True.\n self._estimateSolarRadiationMode = False\n\n self._ifHasShadingCurtain = None\n self._hourlySolarAltitudeAngle = None\n self._hourlySolarAzimuthAngle = None\n self._hourlyModuleAzimuthAngleEast = None\n self._hourlyModuleAzimuthAngleWest = None\n self._T_matForPerpendicularIrrEastOrNorthFacingRoof = None\n self._T_matForPerpendicularIrrWestOrSouthFacingRoof = None\n self._integratedT_mat = None\n self._directHorizontalSolarRadiation = None\n self._diffuseHorizontalSolarRadiation = None\n self._totalHorizontalSolarRadiation = None\n\n self._dailyWhopvoutperAreaEastRoof = None\n self._dailyWhopvoutperAreaWestRoof = None\n self._dailykWhopvoutperAreaEastRoof = None\n self._dailykWhopvoutperAreaWestRoof = None\n self._totalkWhopvoutPerday = None\n self._monthlyElectricityRetailPrice = None\n self._totalkWhopvoutPerAreaPerday = None\n self._totalkWhopvoutPerAreaPerday = None\n\n self._LeafAreaIndex_J_VanHenten1994 = None\n self._summerPeriodFlagArray = None\n self._dailyShootFreshMass = None\n self._dailyUnitDailyFreshWeightIncrease = None\n self._dailyAccumulatedUnitDailyFreshWeightIncrease = None\n self._dailyUnitHarvestedFreshWeight = None\n self._shootFreshMassPerAreaKgPerDay = None\n self._harvestedShootFreshMassPerAreaKgPerDay = None\n self._totalHarvestedShootFreshMass = None\n\n # variables for validation\n self._GHSolarIrradianceValidationData = None\n self._GHAirTemperatureValidationData = None\n\n #################################################################################################\n ################################## variables for debugging start ################################\n #################################################################################################\n\n self._r_a = None\n self._L = None\n self._r_b = None\n self._e_a = None\n self._e_s = None\n self._r_s = None\n self._r_c = None\n self._gamma = None\n self._gamma_star = None\n self._s = None\n self._R_n = None\n\n @property\n def r_a(self):\n return self._r_a\n @r_a.setter\n def r_a(self, r_a):\n self._r_a= r_a\n\n @property\n def L(self):\n return self._L\n @L.setter\n def L(self, L):\n self._L= L\n\n @property\n def r_b(self):\n return self._r_b\n @r_b.setter\n def r_b(self, r_b):\n self._r_b= r_b\n\n @property\n def e_a(self):\n return self._e_a\n @e_a.setter\n def e_a(self, e_a):\n self._e_a= e_a\n\n @property\n def e_s(self):\n return self._e_s\n @e_s.setter\n def e_s(self, e_s):\n self._e_s= e_s\n\n @property\n def r_s(self):\n return self._r_s\n @r_s.setter\n def r_s(self, r_s):\n self._r_s = r_s\n\n @property\n def r_c(self):\n return self._r_c\n @r_c.setter\n def r_c(self, r_c):\n self._r_c = r_c\n\n @property\n def gamma(self):\n return self._gamma\n @gamma.setter\n def gamma(self, gamma):\n self._gamma = gamma\n\n @property\n def gamma_star(self):\n return self._gamma_star\n @gamma_star.setter\n def gamma_star(self, gamma_star):\n self._gamma_star = gamma_star\n\n @property\n def s(self):\n return self._s\n @s.setter\n def s(self, s):\n self._s =s\n\n @property\n def R_n(self):\n return self._R_n\n @R_n.setter\n def R_n(self, R_n):\n self._R_n = R_n\n\n #################################################################################################\n ################################## variables for debugging end ##################################\n #################################################################################################\n\n def setOPVAreaCoverageRatio(self, OPVAreaCoverageRatio):\n self._OPVAreaCoverageRatio = OPVAreaCoverageRatio\n def getOPVAreaCoverageRatio(self):\n return self._OPVAreaCoverageRatio\n\n def setOPVCoverageRatioSummerPeriod(self, OPVCoverageRatioSummerPeriod):\n self._OPVCoverageRatioSummerPeriod = OPVCoverageRatioSummerPeriod\n def getOPVCoverageRatioSummerPeriod(self):\n return self._OPVCoverageRatioSummerPeriod\n\n @property\n def OPVCoverageRatiosConsiderSummerRatio(self):\n return self._OPVCoverageRatiosConsiderSummerRatio\n @OPVCoverageRatiosConsiderSummerRatio.setter\n def OPVCoverageRatiosConsiderSummerRatio(self, OPVCoverageRatiosConsiderSummerRatio):\n self._OPVCoverageRatiosConsiderSummerRatio = OPVCoverageRatiosConsiderSummerRatio\n\n def setPlantGrowthModel(self, plantGrowthModel):\n self._plantGrowthModel = plantGrowthModel\n def getPlantGrowthModel(self):\n return self._plantGrowthModel\n\n @property\n def shootFreshMassList(self):\n return self._shootFreshMassList\n @shootFreshMassList.setter\n def shootFreshMassList(self, shootFreshMassList):\n self._shootFreshMassList = shootFreshMassList\n\n def setCultivationDaysperHarvest(self, cultivationDaysperHarvest):\n self._cultivationDaysperHarvest = cultivationDaysperHarvest\n def getCultivationDaysperHarvest(self):\n return self._cultivationDaysperHarvest\n\n def setHasShadingCurtain(self, hasShadingCurtain):\n self._hasShadingCurtain = hasShadingCurtain\n def getHasShadingCurtain(self):\n return self.hasShadingCurtain\n\n def setShadingCurtainDeployPPFD(self, shadingCurtainDeployPPFD):\n self._shadingCurtainDeployPPFD = shadingCurtainDeployPPFD\n def getShadingCurtainDeployPPFD(self):\n return self._shadingCurtainDeployPPFD\n\n def setProfitVSOPVCoverageData(self,profitVSOPVCoverageData):\n self._profitVSOPVCoverageData = profitVSOPVCoverageData\n def getProfitVSOPVCoverageData(self):\n return self._profitVSOPVCoverageData\n\n def setMonthlyElectricitySalesperArea(self, monthlyElectricitySalesperArea):\n self._monthlyElectricitySalesperArea = monthlyElectricitySalesperArea\n def getMonthlyElectricitySalesperArea(self):\n return self._monthlyElectricitySalesperArea\n\n def setMonthlyElectricitySalesperAreaEastRoof(self, monthlyElectricitySalesperAreaEastRoof):\n self._monthlyElectricitySalesperAreaEastRoof = monthlyElectricitySalesperAreaEastRoof\n def getMonthlyElectricitySalesperAreaEastRoof(self):\n return self._monthlyElectricitySalesperAreaEastRoof\n\n def setMonthlyElectricitySalesperAreaWestRoof(self, monthlyElectricitySalesperAreaWestRoof):\n self._monthlyElectricitySalesperAreaWestRoof = monthlyElectricitySalesperAreaWestRoof\n\n def getMonthlyElectricitySalesperAreaWestRoof(self):\n return self._monthlyElectricitySalesperAreaWestRoof\n\n @property\n def totalElectricitySalesPerMonth(self):\n return self._totalElectricitySalesPerMonth\n @totalElectricitySalesPerMonth.setter\n def totalElectricitySalesPerMonth(self, totalElectricitySalesPerMonth):\n self._totalElectricitySalesPerMonth = totalElectricitySalesPerMonth\n\n @property\n def totalElectricitySalesPerAreaPerMonth(self):\n return self._totalElectricitySalesPerAreaPerMonth\n @totalElectricitySalesPerAreaPerMonth.setter\n def totalElectricitySalesPerAreaPerMonth(self, totalElectricitySalesPerAreaPerMonth):\n self._totalElectricitySalesPerAreaPerMonth = totalElectricitySalesPerAreaPerMonth\n\n @property\n def totalElectricitySales(self):\n return self._totalElectricitySales\n @totalElectricitySales.setter\n def totalElectricitySales(self, totalElectricitySales):\n self._totalElectricitySales = totalElectricitySales\n\n def setOPVCostUSDForDepreciationPerOPVArea(self, oPVCostUSDForDepreciationPerOPVArea):\n self._oPVCostUSDForDepreciationPerOPVArea = oPVCostUSDForDepreciationPerOPVArea\n def getOPVCostUSDForDepreciationPerOPVArea(self):\n return self._oPVCostUSDForDepreciationPerOPVArea\n\n @property\n def totalOPVCostUSDForDepreciation(self):\n return self._totalOPVCostUSDForDepreciation\n @totalOPVCostUSDForDepreciation.setter\n def totalOPVCostUSDForDepreciation(self, totalOPVCostUSDForDepreciation):\n self._totalOPVCostUSDForDepreciation = totalOPVCostUSDForDepreciation\n\n @property\n def totalOPVCostUSDForDepreciationPerGHFloorArea(self):\n return self._totalOPVCostUSDForDepreciationPerGHFloorArea\n @totalOPVCostUSDForDepreciationPerGHFloorArea.setter\n def totalOPVCostUSDForDepreciationPerGHFloorArea(self, totalOPVCostUSDForDepreciationPerGHFloorArea):\n self._totalOPVCostUSDForDepreciationPerGHFloorArea = totalOPVCostUSDForDepreciationPerGHFloorArea\n\n @property\n def electricityProductionProfit(self):\n return self._electricityProductionProfit\n @electricityProductionProfit.setter\n def electricityProductionProfit(self, electricityProductionProfit):\n self._electricityProductionProfit = electricityProductionProfit\n\n @property\n def electricityProductionProfitPerGHFloorArea(self):\n return self._electricityProductionProfitPerGHFloorArea\n @electricityProductionProfitPerGHFloorArea.setter\n def electricityProductionProfitPerGHFloorArea(self, electricityProductionProfitPerGHFloorArea):\n self._electricityProductionProfitPerGHFloorArea = electricityProductionProfitPerGHFloorArea\n\n ######################## measured solar radiation to OPV start ########################\n def setDirectSolarRadiationToOPVEastDirection(self, directSolarRadiationToOPVEastDirection):\n self._directSolarRadiationToOPVEastDirection = directSolarRadiationToOPVEastDirection\n\n def getDirectSolarRadiationToOPVEastDirection(self):\n return self._directSolarRadiationToOPVEastDirection\n\n def setDirectSolarRadiationToOPVWestDirection(self, directSolarRadiationToOPVWestDirection):\n self._directSolarRadiationToOPVWestDirection = directSolarRadiationToOPVWestDirection\n\n def getDirectSolarRadiationToOPVWestDirection(self):\n return self._directSolarRadiationToOPVWestDirection\n\n def setDiffuseSolarRadiationToOPV(self, diffuseSolarRadiationToOPV):\n self._diffuseSolarRadiationToOPV = diffuseSolarRadiationToOPV\n\n def getDiffuseSolarRadiationToOPV(self):\n return self._diffuseSolarRadiationToOPV\n\n def setAlbedoSolarRadiationToOPV(self, albedoSolarRadiationToOPV):\n self._albedoSolarRadiationToOPV = albedoSolarRadiationToOPV\n\n def getAlbedoSolarRadiationToOPV(self):\n return self._albedoSolarRadiationToOPV\n ######################## measured solar radiation to OPV end ########################\n\n ######################## estimated solar radiation to OPV start ########################\n def setEstimatedDirectSolarRadiationToOPVEastDirection(self, estimatedDirectSolarRadiationToOPVEastDirection):\n self._estimatedDirectSolarRadiationToOPVEastDirection = estimatedDirectSolarRadiationToOPVEastDirection\n\n def getEstimatedDirectSolarRadiationToOPVEastDirection(self):\n return self._estimatedDirectSolarRadiationToOPVEastDirection\n\n def setEstimatedDirectSolarRadiationToOPVWestDirection(self, estimatedDirectSolarRadiationToOPVWestDirection):\n self._estimatedDirectSolarRadiationToOPVWestDirection = estimatedDirectSolarRadiationToOPVWestDirection\n\n def getEstimatedDirectSolarRadiationToOPVWestDirection(self):\n return self._estimatedDirectSolarRadiationToOPVWestDirection\n\n def setEstimatedDiffuseSolarRadiationToOPV(self, estimatedDiffuseSolarRadiationToOPV):\n self._albedoSolarRadiationToOPV = estimatedDiffuseSolarRadiationToOPV\n\n def getEstimatedDiffuseSolarRadiationToOPV(self):\n return self._estimatedDiffuseSolarRadiationToOPV\n\n def setEstimatedAlbedoSolarRadiationToOPV(self, estimatedAlbedoSolarRadiationToOPV):\n self._estimatedAlbedoSolarRadiationToOPV = estimatedAlbedoSolarRadiationToOPV\n\n def getEstimatedAlbedoSolarRadiationToOPV(self):\n return self._estimatedAlbedoSolarRadiationToOPV\n ######################## estimated solar radiation to OPV end ########################\n\n\n\n def setHourlyInnerLightIntensityPPFDThroughGlazing(self, hourlyInnerLightIntensityPPFDThroughGlazing):\n self._hourlyInnerLightIntensityPPFDThroughGlazing = hourlyInnerLightIntensityPPFDThroughGlazing\n def getHourlyInnerLightIntensityPPFDThroughGlazing(self):\n return self._hourlyInnerLightIntensityPPFDThroughGlazing\n\n def setHourlyInnerLightIntensityPPFDThroughInnerStructure(self, hourlyInnerLightIntensityPPFDThroughInnerStructure):\n self._hourlyInnerLightIntensityPPFDThroughInnerStructure = hourlyInnerLightIntensityPPFDThroughInnerStructure\n def getHourlyInnerLightIntensityPPFDThroughInnerStructure(self):\n return self._hourlyInnerLightIntensityPPFDThroughInnerStructure\n\n def setDirectPPFDToOPVEastDirection(self, directPPFDToOPVEastDirection):\n self._directPPFDToOPVEastDirection = directPPFDToOPVEastDirection\n def getDirectPPFDToOPVEastDirection(self):\n return self._directPPFDToOPVEastDirection\n\n def setDirectPPFDToOPVWestDirection(self, directPPFDToOPVWestDirection):\n self._directPPFDToOPVWestDirection = directPPFDToOPVWestDirection\n def getDirectPPFDToOPVWestDirection(self):\n return self._directPPFDToOPVWestDirection\n\n def setDiffusePPFDToOPV(self, diffusePPFDToOPV):\n self._diffusePPFDToOPV = diffusePPFDToOPV\n def getDiffusePPFDToOPV(self):\n return self._diffusePPFDToOPV\n\n def setGroundReflectedPPFDToOPV(self, groundReflectedPPFDToOPV):\n self._groundReflectedPPFDToOPV = groundReflectedPPFDToOPV\n def getGroundReflectedPPFDToOPV(self):\n return self._groundReflectedPPFDToOPV\n\n # def setTotalDLItoPlantsBaselineShadingCuratin(self, totalDLItoPlantsBaselineShadingCuratin):\n # self._totalDLItoPlantsBaselineShadingCuratin = totalDLItoPlantsBaselineShadingCuratin\n # def getTotalDLItoPlantsBaselineShadingCuratin(self):\n # return self._totalDLItoPlantsBaselineShadingCuratin\n\n def setDirectDLIToOPVEastDirection(self, directDLIToOPVEastDirection):\n self._directDLIToOPVEastDirection = directDLIToOPVEastDirection\n def getDirectDLIToOPVEastDirection(self):\n return self._directDLIToOPVEastDirection\n\n def setDirectDLIToOPVWestDirection(self, directDLIToOPVWestDirection):\n self._directDLIToOPVWestDirection = directDLIToOPVWestDirection\n def getDirectDLIToOPVWestDirection(self):\n return self._directDLIToOPVWestDirection\n\n def setDiffuseDLIToOPV(self, diffuseDLIToOPV):\n self._diffuseDLIToOPV = diffuseDLIToOPV\n def getDiffuseDLIToOPV(self):\n return self._diffuseDLIToOPV\n\n def setGroundReflectedDLIToOPV(self, groundReflectedDLIToOPV):\n self._groundReflectedDLIToOPV = groundReflectedDLIToOPV\n def getGroundReflectedDLIToOPV(self):\n return self._groundReflectedDLIToOPV\n\n\n ##############################solar irradiance to multi span roof start##############################\n def setHourlyDirectSolarRadiationAfterMultiSpanRoof(self, hourlyDirectSolarRadiationAfterMultiSpanRoof):\n self._hourlyDirectSolarRadiationAfterMultiSpanRoof = hourlyDirectSolarRadiationAfterMultiSpanRoof\n def getHourlyDirectSolarRadiationAfterMultiSpanRoof(self):\n return self._hourlyDirectSolarRadiationAfterMultiSpanRoof\n\n def setHourlyDiffuseSolarRadiationAfterMultiSpanRoof(self, hourlyDiffuseSolarRadiationAfterMultiSpanRoof):\n self._hourlyDiffuseSolarRadiationAfterMultiSpanRoof = hourlyDiffuseSolarRadiationAfterMultiSpanRoof\n def getHourlyDiffuseSolarRadiationAfterMultiSpanRoof(self):\n return self._hourlyDiffuseSolarRadiationAfterMultiSpanRoof\n\n def setGroundReflectedRadiationAfterMultiSpanRoof(self, groundReflectedRadiationAfterMultiSpanRoof):\n self._groundReflectedRadiationAfterMultiSpanRoof = groundReflectedRadiationAfterMultiSpanRoof\n def getGroundReflectedRadiationAfterMultiSpanRoof(self):\n return self._groundReflectedRadiationAfterMultiSpanRoof\n\n def setHourlyDirectPPFDAfterMultiSpanRoof(self, hourlyDirectPPFDAfterMultiSpanRoof):\n self._hourlyDirectPPFDAfterMultiSpanRoof = hourlyDirectPPFDAfterMultiSpanRoof\n def getHourlyDirectPPFDAfterMultiSpanRoof(self):\n return self._hourlyDirectPPFDAfterMultiSpanRoof\n\n def setHourlyDiffusePPFDAfterMultiSpanRoof(self, hourlyDiffusePPFDAfterMultiSpanRoof):\n self._hourlyDiffusePPFDAfterMultiSpanRoof = hourlyDiffusePPFDAfterMultiSpanRoof\n def getHourlyDiffusePPFDAfterMultiSpanRoof(self):\n return self._hourlyDiffusePPFDAfterMultiSpanRoof\n\n def setGroundReflectedPPFDAfterMultiSpanRoof(self, groundReflectedPPFDAfterMultiSpanRoof):\n self._groundReflectedPPFDAfterMultiSpanRoof = groundReflectedPPFDAfterMultiSpanRoof\n def getGroundReflectedPPFDAfterMultiSpanRoof(self):\n return self._groundReflectedPPFDAfterMultiSpanRoof\n ##############################solar irradiance to multi span roof end##############################\n\n def setShootFreshMassList(self, shootFreshMassList):\n self._shootFreshMassList = shootFreshMassList\n def getShootFreshMassList(self):\n return self._shootFreshMassList\n\n def setUnitDailyFreshWeightIncrease(self, setUnitDailyFreshWeightIncrease):\n self._unitDailyFreshWeightIncrease = setUnitDailyFreshWeightIncrease\n def getUnitDailyFreshWeightIncrease(self):\n return self._unitDailyFreshWeightIncrease\n\n def setAccumulatedUnitDailyFreshWeightIncrease(self, accumulatedUnitDailyFreshWeightIncrease):\n self._accumulatedUnitDailyFreshWeightIncrease = accumulatedUnitDailyFreshWeightIncrease\n def getAccumulatedUnitDailyFreshWeightIncrease(self):\n return self._accumulatedUnitDailyFreshWeightIncrease\n\n def setUnitDailyHarvestedFreshWeight(self, unitDailyHarvestedFreshWeight):\n self._unitDailyHarvestedFreshWeight = unitDailyHarvestedFreshWeight\n def getUnitDailyHarvestedFreshWeight(self):\n return self._unitDailyHarvestedFreshWeight\n\n @property\n def totalPlantSales(self):\n return self._totalPlantSales\n @totalPlantSales.setter\n def totalPlantSales(self, totalPlantSales):\n self._totalPlantSales = totalPlantSales\n\n @property\n def totalPlantSalesperSquareMeter(self):\n return self._totalPlantSalesperSquareMeter\n @totalPlantSalesperSquareMeter.setter\n def totalPlantSalesperSquareMeter(self, totalPlantSalesperSquareMeter):\n self._totalPlantSalesperSquareMeter = totalPlantSalesperSquareMeter\n\n @property\n def totalPlantSalesPerGHFloorArea(self):\n return self._totalPlantSalesPerGHFloorArea\n @totalPlantSalesPerGHFloorArea.setter\n def totalPlantSalesPerGHFloorArea(self, totalPlantSalesPerGHFloorArea):\n self._totalPlantSalesPerGHFloorArea = totalPlantSalesPerGHFloorArea\n\n @property\n def Q_v(self):\n return self._Q_v\n @Q_v.setter\n def Q_v(self, Q_v):\n self._Q_v = Q_v\n\n @property\n def Q_sr(self):\n return self._Q_sr\n @Q_sr.setter\n def Q_sr(self, Q_sr):\n self._Q_sr = Q_sr\n\n @property\n def Q_lh(self):\n return self._Q_lh\n @Q_lh.setter\n def Q_lh(self, Q_lh):\n self._Q_lh = Q_lh\n\n @property\n def Q_sh(self):\n return self._Q_sh\n @Q_sh.setter\n def Q_sh(self, Q_sh):\n self._Q_sh = Q_sh\n\n @property\n def Q_lw(self):\n return self._Q_lw\n @Q_lw.setter\n def Q_lw(self, Q_lw):\n self._Q_lw = Q_lw\n\n @property\n def Q_vW(self):\n return self._Q_vW\n @Q_vW.setter\n def Q_vW(self, Q_vW):\n self._Q_vW = Q_vW\n\n @property\n def Q_srW(self):\n return self._Q_srW\n @Q_srW.setter\n def Q_srW(self, Q_srW):\n self._Q_srW = Q_srW\n\n @property\n def Q_lhW(self):\n return self._Q_lhW\n @Q_lhW.setter\n def Q_lhW(self, Q_lhW):\n self._Q_lhW = Q_lhW\n\n @property\n def Q_shW(self):\n return self._Q_shW\n @Q_shW.setter\n def Q_shW(self, Q_shW):\n self._Q_shW = Q_shW\n\n @property\n def Q_lwW(self):\n return self._Q_lwW\n @Q_lwW.setter\n def Q_lwW(self, Q_lwW):\n self._Q_lwW = Q_lwW\n\n @property\n def monthlyRequiredGHHeatingEnergyForPlants(self):\n return self._monthlyRequiredGHHeatingEnergyForPlants\n @monthlyRequiredGHHeatingEnergyForPlants.setter\n def monthlyRequiredGHHeatingEnergyForPlants(self, monthlyRequiredGHHeatingEnergyForPlants):\n self._monthlyRequiredGHHeatingEnergyForPlants = monthlyRequiredGHHeatingEnergyForPlants\n\n @property\n def totalHeatingCostForPlants(self):\n return self._totalHeatingCostForPlants\n @totalHeatingCostForPlants.setter\n def totalHeatingCostForPlants(self, totalHeatingCostForPlants):\n self._totalHeatingCostForPlants = totalHeatingCostForPlants\n\n\n @property\n def totalLaborCost(self):\n return self._totalLaborCost\n @totalLaborCost.setter\n def totalLaborCost(self, totalLaborCost):\n self._totalLaborCost = totalLaborCost\n\n @property\n def totalLaborCostPerGHFloorArea(self):\n return self._totalLaborCostPerGHFloorArea\n @totalLaborCostPerGHFloorArea.setter\n def totalLaborCostPerGHFloorArea(self, totalLaborCostPerGHFloorArea):\n self._totalLaborCostPerGHFloorArea = totalLaborCostPerGHFloorArea\n\n @property\n def totalPlantProductionCost(self):\n return self._totalPlantProductionCost\n @totalPlantProductionCost.setter\n def totalPlantProductionCost(self, totalPlantProductionCost):\n self._totalPlantProductionCost = totalPlantProductionCost\n\n @property\n def totalPlantProductionCostPerGHFloorArea(self):\n return self._totalPlantProductionCostPerGHFloorArea\n @totalPlantProductionCostPerGHFloorArea.setter\n def totalPlantProductionCostPerGHFloorArea(self, totalPlantProductionCostPerGHFloorArea):\n self._totalPlantProductionCostPerGHFloorArea = totalPlantProductionCostPerGHFloorArea\n\n @property\n def totalPlantProfit(self):\n return self._totalPlantProfit\n @totalPlantProfit.setter\n def totalPlantProfit(self, totalPlantProfit):\n self._totalPlantProfit = totalPlantProfit\n\n @property\n def totalPlantProfitPerGHFloorArea(self):\n return self._totalPlantProfitPerGHFloorArea\n @totalPlantProfitPerGHFloorArea.setter\n def totalPlantProfitPerGHFloorArea(self, totalPlantProfitPerGHFloorArea):\n self._totalPlantProfitPerGHFloorArea = totalPlantProfitPerGHFloorArea\n\n @property\n def economicProfit(self):\n return self._economicProfit\n @economicProfit.setter\n def economicProfit(self, economicProfit):\n self._economicProfit = economicProfit\n\n @property\n def economicProfitPerGHFloorArea(self):\n return self._economicProfitPerGHFloorArea\n @economicProfitPerGHFloorArea.setter\n def economicProfitPerGHFloorArea(self, economicProfitPerGHFloorArea):\n self._economicProfitPerGHFloorArea = economicProfitPerGHFloorArea\n\n def setAverageDLIonEachCycle(self, averageDLIonEachCycle):\n self._averageDLIonEachCycle = averageDLIonEachCycle\n def getAverageDLIonEachCycle(self):\n return self._averageDLIonEachCycle\n\n def setYear(self, year):\n self._year = year\n def getYear(self):\n return self._year\n\n def setMonth(self, month):\n self._month = month\n def getMonth(self):\n return self._month\n\n def setDay(self, day):\n self._day = day\n def getDay(self):\n return self._day\n\n def setHour(self, hour):\n self._hour = hour\n def getHour(self):\n return self._hour\n\n ######################### imported data start #########################\n def setImportedHourlyHorizontalDirectSolarRadiation(self, importedHourlyHorizontalDirectSolarRadiation):\n self._importedHourlyHorizontalDirectSolarRadiation = importedHourlyHorizontalDirectSolarRadiation\n def getImportedHourlyHorizontalDirectSolarRadiation(self):\n return self._importedHourlyHorizontalDirectSolarRadiation\n\n def setImportedHourlyHorizontalDiffuseSolarRadiation(self, importedHourlyHorizontalDiffuseSolarRadiation):\n self._importedHourlyHorizontalDiffuseSolarRadiation = importedHourlyHorizontalDiffuseSolarRadiation\n def getImportedHourlyHorizontalDiffuseSolarRadiation(self):\n return self._importedHourlyHorizontalDiffuseSolarRadiation\n\n def setImportedHourlyHorizontalTotalBeamMeterBodyTemperature(self, importedHourlyHorizontalTotalBeamMeterBodyTemperature):\n self._importedHourlyHorizontalTotalBeamMeterBodyTemperature = importedHourlyHorizontalTotalBeamMeterBodyTemperature\n def getImportedHourlyHorizontalTotalBeamMeterBodyTemperature(self):\n return self._importedHourlyHorizontalTotalBeamMeterBodyTemperature\n\n def setImportedHourlyAirTemperature(self, importedHourlyAirTemperature):\n self._importedHourlyAirTemperature = importedHourlyAirTemperature\n def getImportedHourlyAirTemperature(self):\n return self._importedHourlyAirTemperature\n\n @property\n def hourlyRelativeHumidity(self):\n return self._hourlyRelativeHumidity\n @hourlyRelativeHumidity.setter\n def hourlyRelativeHumidity(self, hourlyRelativeHumidity):\n self._hourlyRelativeHumidity = hourlyRelativeHumidity\n ######################### imported data end #########################\n\n\n ######################### solar radiation to tilted OPV (roof) start #########################\n @property\n def dailyWhopvoutperAreaEastRoof(self):\n return self._dailyWhopvoutperAreaEastRoof\n @dailyWhopvoutperAreaEastRoof.setter\n def dailyWhopvoutperAreaEastRoof(self, dailyWhopvoutperAreaEastRoof):\n self._dailyWhopvoutperAreaEastRoof = dailyWhopvoutperAreaEastRoof\n\n @property\n def dailyWhopvoutperAreaWestRoof(self):\n return self._dailyWhopvoutperAreaWestRoof\n @dailyWhopvoutperAreaWestRoof.setter\n def dailyWhopvoutperAreaWestRoof(self, dailyWhopvoutperAreaWestRoof):\n self._dailyWhopvoutperAreaWestRoof = dailyWhopvoutperAreaWestRoof\n\n @property\n def dailykWhopvoutperAreaEastRoof(self):\n return self._dailykWhopvoutperAreaEastRoof\n @dailykWhopvoutperAreaEastRoof.setter\n def dailykWhopvoutperAreaEastRoof(self, dailykWhopvoutperAreaEastRoof):\n self._dailykWhopvoutperAreaEastRoof = dailykWhopvoutperAreaEastRoof\n\n @property\n def dailykWhopvoutperAreaWestRoof(self):\n return self._dailykWhopvoutperAreaWestRoof\n @dailykWhopvoutperAreaWestRoof.setter\n def dailykWhopvoutperAreaWestRoof(self, dailykWhopvoutperAreaWestRoof):\n self._dailykWhopvoutperAreaWestRoof = dailykWhopvoutperAreaWestRoof\n\n @property\n def totalkWhopvoutPerday(self):\n return self._totalkWhopvoutPerday\n @totalkWhopvoutPerday.setter\n def totalkWhopvoutPerday(self, totalkWhopvoutPerday):\n self._totalkWhopvoutPerday = totalkWhopvoutPerday\n\n @property\n def monthlyElectricityRetailPrice(self):\n return self._monthlyElectricityRetailPrice\n @monthlyElectricityRetailPrice.setter\n def monthlyElectricityRetailPrice(self, monthlyElectricityRetailPrice):\n self._monthlyElectricityRetailPrice = monthlyElectricityRetailPrice\n\n @property\n def totalkWhopvoutPerAreaPerday(self):\n return self._totalkWhopvoutPerAreaPerday\n @totalkWhopvoutPerAreaPerday.setter\n def totalkWhopvoutPerAreaPerday(self, totalkWhopvoutPerAreaPerday):\n self._totalkWhopvoutPerAreaPerday = totalkWhopvoutPerAreaPerday\n ######################### solar radiation to tilted OPV (roof) end #########################\n\n def setIfGrowForSummerPeriod(self, ifGrowForSummerPeriod):\n self._ifGrowForSummerPeriod = ifGrowForSummerPeriod\n def getIfGrowForSummerPeriod(self):\n return self._ifGrowForSummerPeriod\n\n def setEstimateSolarRadiationMode(self, estimateSolarRadiationMode):\n self._estimateSolarRadiationMode = estimateSolarRadiationMode\n\n def getEstimateSolarRadiationMode(self):\n return self._estimateSolarRadiationMode\n\n def setIfHasShadingCurtain(self, ifHasShadingCurtain):\n self._ifHasShadingCurtain = ifHasShadingCurtain\n\n def getIfHasShadingCurtain(self):\n return self._ifHasShadingCurtain\n\n ############################################ angles start################\n @property\n def hourlySolarIncidenceAngleEastDirection(self):\n return self._hourlySolarIncidenceAngleEastDirection\n @hourlySolarIncidenceAngleEastDirection.setter\n def hourlySolarIncidenceAngleEastDirection(self, hourlySolarIncidenceAngleEastDirection):\n self._hourlySolarIncidenceAngleEastDirection = hourlySolarIncidenceAngleEastDirection\n\n @property\n def hourlySolarIncidenceAngleWestDirection(self):\n return self._hourlySolarIncidenceAngleWestDirection\n @hourlySolarIncidenceAngleWestDirection.setter\n def hourlySolarIncidenceAngleWestDirection(self, hourlySolarIncidenceAngleWestDirection):\n self._hourlySolarIncidenceAngleWestDirection = hourlySolarIncidenceAngleWestDirection\n\n @property\n def hourlySolarAltitudeAngle(self):\n return self._hourlySolarAltitudeAngle\n\n @hourlySolarAltitudeAngle.setter\n def hourlySolarAltitudeAngle(self, hourlySolarAltitudeAngle):\n self._hourlySolarAltitudeAngle = hourlySolarAltitudeAngle\n\n\n @property\n def hourlySolarAzimuthAngle(self):\n return self._hourlySolarAzimuthAngle\n\n @hourlySolarAzimuthAngle.setter\n def hourlySolarAzimuthAngle(self, hourlySolarAzimuthAngle):\n self._hourlySolarAzimuthAngle = hourlySolarAzimuthAngle\n\n\n @property\n def hourlyModuleAzimuthAngleEast(self):\n return self._hourlyModuleAzimuthAngleEast\n\n @hourlyModuleAzimuthAngleEast.setter\n def hourlyModuleAzimuthAngleEast(self, hourlyModuleAzimuthAngleEast):\n self._hourlyModuleAzimuthAngleEast = hourlyModuleAzimuthAngleEast\n\n\n @property\n def hourlyModuleAzimuthAngleWest(self):\n return self._hourlyModuleAzimuthAngleWest\n\n @hourlyModuleAzimuthAngleWest.setter\n def hourlyModuleAzimuthAngleWest(self, hourlyModuleAzimuthAngleWest):\n self._hourlyModuleAzimuthAngleWest = hourlyModuleAzimuthAngleWest\n ############################################ angles end################\n\n ##############################solar irradiance to plants start##############################\n @property\n def directSolarIrradianceBeforeShadingCurtain(self):\n return self._directSolarIrradianceBeforeShadingCurtain\n @directSolarIrradianceBeforeShadingCurtain.setter\n def directSolarIrradianceBeforeShadingCurtain(self, directSolarIrradianceBeforeShadingCurtain):\n self._directSolarIrradianceBeforeShadingCurtain = directSolarIrradianceBeforeShadingCurtain\n\n @property\n def diffuseSolarIrradianceBeforeShadingCurtain(self):\n return self._diffuseSolarIrradianceBeforeShadingCurtain\n @diffuseSolarIrradianceBeforeShadingCurtain.setter\n def diffuseSolarIrradianceBeforeShadingCurtain(self, diffuseSolarIrradianceBeforeShadingCurtain):\n self._diffuseSolarIrradianceBeforeShadingCurtain = diffuseSolarIrradianceBeforeShadingCurtain\n\n @property\n def directSolarIrradianceToPlants(self):\n return self._directSolarIrradianceToPlants\n @directSolarIrradianceToPlants.setter\n def directSolarIrradianceToPlants(self, directSolarIrradianceToPlants):\n self._directSolarIrradianceToPlants = directSolarIrradianceToPlants\n\n @property\n def diffuseSolarIrradianceToPlants(self):\n return self._diffuseSolarIrradianceToPlants\n @diffuseSolarIrradianceToPlants.setter\n def diffuseSolarIrradianceToPlants(self, diffuseSolarIrradianceToPlants):\n self._diffuseSolarIrradianceToPlants = diffuseSolarIrradianceToPlants\n\n @property\n def transmittanceThroughShadingCurtainChangingEachMonth(self):\n return self._transmittanceThroughShadingCurtainChangingEachMonth\n @transmittanceThroughShadingCurtainChangingEachMonth.setter\n def transmittanceThroughShadingCurtainChangingEachMonth(self, transmittanceThroughShadingCurtainChangingEachMonth):\n self._transmittanceThroughShadingCurtainChangingEachMonth = transmittanceThroughShadingCurtainChangingEachMonth\n\n @property\n def directPPFDToPlants(self):\n return self._directPPFDToPlants\n @directPPFDToPlants.setter\n def directPPFDToPlants(self, directPPFDToPlants):\n self._directPPFDToPlants = directPPFDToPlants\n\n @property\n def diffusePPFDToPlants(self):\n return self._diffusePPFDToPlants\n @diffusePPFDToPlants.setter\n def diffusePPFDToPlants(self, diffusePPFDToPlants):\n self._diffusePPFDToPlants = diffusePPFDToPlants\n\n @property\n def directDLIToPlants(self):\n return self._directDLIToPlants\n @directDLIToPlants.setter\n def directDLIToPlants(self, directDLIToPlants):\n self._directDLIToPlants = directDLIToPlants\n\n @property\n def diffuseDLIToPlants(self):\n return self._diffuseDLIToPlants\n @diffuseDLIToPlants.setter\n def diffuseDLIToPlants(self, diffuseDLIToPlants):\n self._diffuseDLIToPlants = diffuseDLIToPlants\n\n @property\n def totalDLItoPlants(self):\n return self._totalDLItoPlants\n @totalDLItoPlants.setter\n def totalDLItoPlants(self, totalDLItoPlants):\n self._totalDLItoPlants = totalDLItoPlants\n ##############################solar irradiance to plants end##############################\n\n @property\n def hourlyDayOrNightFlag(self):\n return self._hourlyDayOrNightFlag\n @hourlyDayOrNightFlag.setter\n def hourlyDayOrNightFlag(self, hourlyDayOrNightFlag):\n self._hourlyDayOrNightFlag = hourlyDayOrNightFlag\n\n ##############################imported data start##############################\n @property\n def hourlyHorizontalDirectOuterSolarIrradiance(self):\n return self._hourlyHorizontalDirectOuterSolarIrradiance\n @hourlyHorizontalDirectOuterSolarIrradiance.setter\n def hourlyHorizontalDirectOuterSolarIrradiance(self, hourlyHorizontalDirectOuterSolarIrradiance):\n self._hourlyHorizontalDirectOuterSolarIrradiance = hourlyHorizontalDirectOuterSolarIrradiance\n\n @property\n def hourlyHorizontalDiffuseOuterSolarIrradiance(self):\n return self._hourlyHorizontalDiffuseOuterSolarIrradiance\n @hourlyHorizontalDiffuseOuterSolarIrradiance.setter\n def hourlyHorizontalDiffuseOuterSolarIrradiance(self, hourlyHorizontalDiffuseOuterSolarIrradiance):\n self._hourlyHorizontalDiffuseOuterSolarIrradiance = hourlyHorizontalDiffuseOuterSolarIrradiance\n\n @property\n def hourlyHorizontalTotalOuterSolarIrradiance(self):\n return self._hourlyHorizontalTotalOuterSolarIrradiance\n @hourlyHorizontalTotalOuterSolarIrradiance.setter\n def hourlyHorizontalTotalOuterSolarIrradiance(self, hourlyHorizontalTotalOuterSolarIrradiance):\n self._hourlyHorizontalTotalOuterSolarIrradiance = hourlyHorizontalTotalOuterSolarIrradiance\n\n @property\n def hourlyHorizontalTotalBeamMeterBodyTemperature(self):\n return self._hourlyHorizontalTotalBeamMeterBodyTemperature\n @hourlyHorizontalTotalBeamMeterBodyTemperature.setter\n def hourlyHorizontalTotalBeamMeterBodyTemperature(self, hourlyHorizontalTotalBeamMeterBodyTemperature):\n self._hourlyHorizontalTotalBeamMeterBodyTemperature = hourlyHorizontalTotalBeamMeterBodyTemperature\n\n @property\n def hourlyAirTemperature(self):\n return self._hourlyAirTemperature\n @hourlyAirTemperature.setter\n def hourlyAirTemperature(self, hourlyAirTemperature):\n self._hourlyAirTemperature = hourlyAirTemperature\n ##############################imported data end##############################\n\n ##############################multispan roof transmittance start##############################\n @property\n def T_matForPerpendicularIrrEastOrNorthFacingRoof(self):\n return self._T_matForPerpendicularIrrEastOrNorthFacingRoof\n @T_matForPerpendicularIrrEastOrNorthFacingRoof.setter\n def T_matForPerpendicularIrrEastOrNorthFacingRoof(self, T_matForPerpendicularIrrEastOrNorthFacingRoof):\n self._T_matForPerpendicularIrrEastOrNorthFacingRoof = T_matForPerpendicularIrrEastOrNorthFacingRoof\n\n @property\n def T_matForPerpendicularIrrWestOrSouthFacingRoof(self):\n return self._T_matForPerpendicularIrrWestOrSouthFacingRoof\n @T_matForPerpendicularIrrWestOrSouthFacingRoof.setter\n def T_matForPerpendicularIrrWestOrSouthFacingRoof(self, T_matForPerpendicularIrrWestOrSouthFacingRoof):\n self._T_matForPerpendicularIrrWestOrSouthFacingRoof = T_matForPerpendicularIrrWestOrSouthFacingRoof\n\n @property\n def integratedT_mat(self):\n return self._integratedT_mat\n @integratedT_mat.setter\n def integratedT_mat(self, integratedT_mat):\n self._integratedT_mat = integratedT_mat\n ##############################multispan roof transmittance end##############################\n\n @property\n def directHorizontalSolarRadiation(self):\n return self._directHorizontalSolarRadiation\n @directHorizontalSolarRadiation.setter\n def directHorizontalSolarRadiation(self, directHorizontalSolarRadiation):\n self._directHorizontalSolarRadiation = directHorizontalSolarRadiation\n\n @property\n def diffuseHorizontalSolarRadiation(self):\n return self._diffuseHorizontalSolarRadiation\n @diffuseHorizontalSolarRadiation.setter\n def diffuseHorizontalSolarRadiation(self, diffuseHorizontalSolarRadiation):\n self._diffuseHorizontalSolarRadiation = diffuseHorizontalSolarRadiation\n\n @property\n def totalHorizontalSolarRadiation(self):\n return self._totalHorizontalSolarRadiation\n @totalHorizontalSolarRadiation.setter\n def totalHorizontalSolarRadiation(self, totalHorizontalSolarRadiation):\n self._totalHorizontalSolarRadiation = totalHorizontalSolarRadiation\n\n ##############################plant weights (growth)start ##############################\n @property\n def LeafAreaIndex_J_VanHenten1994(self):\n return self._LeafAreaIndex_J_VanHenten1994\n @LeafAreaIndex_J_VanHenten1994.setter\n def LeafAreaIndex_J_VanHenten1994(self, LeafAreaIndex_J_VanHenten1994):\n self._LeafAreaIndex_J_VanHenten1994 = LeafAreaIndex_J_VanHenten1994\n\n @property\n def summerPeriodFlagArray(self):\n return self._summerPeriodFlagArray\n @summerPeriodFlagArray.setter\n def summerPeriodFlagArray(self, summerPeriodFlagArray):\n self._summerPeriodFlagArray = summerPeriodFlagArray\n\n @property\n def dailyShootFreshMass(self):\n return self._dailyShootFreshMass\n @dailyShootFreshMass.setter\n def dailyShootFreshMass(self, dailyShootFreshMass):\n self._dailyShootFreshMass = dailyShootFreshMass\n\n @property\n def dailyUnitDailyFreshWeightIncrease(self):\n return self._dailyUnitDailyFreshWeightIncrease\n @dailyUnitDailyFreshWeightIncrease.setter\n def dailyUnitDailyFreshWeightIncrease(self, dailyUnitDailyFreshWeightIncrease):\n self._dailyUnitDailyFreshWeightIncrease = dailyUnitDailyFreshWeightIncrease\n\n @property\n def dailyAccumulatedUnitDailyFreshWeightIncrease(self):\n return self._dailyAccumulatedUnitDailyFreshWeightIncrease\n @dailyAccumulatedUnitDailyFreshWeightIncrease.setter\n def dailyAccumulatedUnitDailyFreshWeightIncrease(self, dailyAccumulatedUnitDailyFreshWeightIncrease):\n self._dailyAccumulatedUnitDailyFreshWeightIncrease = dailyAccumulatedUnitDailyFreshWeightIncrease\n\n @property\n def dailyUnitHarvestedFreshWeight(self):\n return self._dailyUnitHarvestedFreshWeight\n @dailyUnitHarvestedFreshWeight.setter\n def dailyUnitHarvestedFreshWeight(self, dailyUnitHarvestedFreshWeight):\n self._dailyUnitHarvestedFreshWeight = dailyUnitHarvestedFreshWeight\n\n @property\n def shootFreshMassPerAreaKgPerDay(self):\n return self._shootFreshMassPerAreaKgPerDay\n @shootFreshMassPerAreaKgPerDay.setter\n def shootFreshMassPerAreaKgPerDay(self, shootFreshMassPerAreaKgPerDay):\n self._shootFreshMassPerAreaKgPerDay = shootFreshMassPerAreaKgPerDay\n\n @property\n def harvestedShootFreshMassPerAreaKgPerDay(self):\n return self._harvestedShootFreshMassPerAreaKgPerDay\n @harvestedShootFreshMassPerAreaKgPerDay.setter\n def harvestedShootFreshMassPerAreaKgPerDay(self, harvestedShootFreshMassPerAreaKgPerDay):\n self._harvestedShootFreshMassPerAreaKgPerDay = harvestedShootFreshMassPerAreaKgPerDay\n\n @property\n def totalHarvestedShootFreshMass(self):\n return self._totalHarvestedShootFreshMass\n @totalHarvestedShootFreshMass.setter\n def totalHarvestedShootFreshMass(self, totalHarvestedShootFreshMass):\n self._totalHarvestedShootFreshMass = totalHarvestedShootFreshMass\n\n @property\n def GHSolarIrradianceValidationData(self):\n return self._GHSolarIrradianceValidationData\n @GHSolarIrradianceValidationData.setter\n def GHSolarIrradianceValidationData(self, GHSolarIrradianceValidationData):\n self._GHSolarIrradianceValidationData = GHSolarIrradianceValidationData\n\n @property\n def GHAirTemperatureValidationData(self):\n return self._GHAirTemperatureValidationData\n @GHAirTemperatureValidationData.setter\n def GHAirTemperatureValidationData(self, GHAirTemperatureValidationData):\n self._GHAirTemperatureValidationData = GHAirTemperatureValidationData\n\n" }, { "alpha_fraction": 0.7102828025817871, "alphanum_fraction": 0.7268165349960327, "avg_line_length": 52.69626235961914, "blob_id": "201bbdc9f0fb1300d3173e3fc230e11da0534836", "content_id": "dfa54c168f47c580636375de0a333eb0501ff151", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34487, "license_type": "permissive", "max_line_length": 261, "num_lines": 642, "path": "/Lettuce.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 12 Dec 2016\n# last edit date: 14 Dec 2016\n#######################################################\n\n##########import package files##########\nimport os as os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport datetime\nimport sys\nfrom dateutil.relativedelta import relativedelta\n#######################################################\n\n\ndef calcDailyInnerLightIntensityPPFDSum (HourlyInnerLightIntensityPPFD, productionCycle, cultivationDaysperHarvest, dayOfCultivation):\n '''\n sum all of the hourly light intensity for a specific cultivation day\n\n param:HourlyInnerLightIntensityPPFD (μmol/m^2/s)\n param:productionCycle (-)\n param:cultivationDaysperHarvest (days)\n param:dayOfCultivation (th day)\n\n return:dailyInnerLightIntensityPPFD (μmol/m^2/s)\n '''\n #unit: (μmol/m^2/s)\n dailyInnerLightIntensityPPFDsum = 0.0\n\n # sum hourly solra radiation to the daily radiation\n for hour in range (0, int(constant.hourperDay)):\n dailyInnerLightIntensityPPFDsum += HourlyInnerLightIntensityPPFD[productionCycle * cultivationDaysperHarvest * int(constant.hourperDay) + dayOfCultivation * int(constant.hourperDay) + hour]\n\n return dailyInnerLightIntensityPPFDsum\n\n\ndef calcDailyFreshWeightIncreaseByShimizuEtAl2008Revised(dailyInnerLightIntensityDLI, cultivationDaysperHarvest):\n '''\n calculate the fresh weight increase per day based on the revised model of Shimizu et at (2008): Hiroshi SHIMIZU, Megumi KUSHIDA and Wataru FUJINUMA, 2008, “A Growth Model for Leaf Lettuce under Greenhouse Environments”\n The detail is described at ProjectB INFO 521\n\n param:dailyInnerLightIntensityPPFD [μmol/m^2/s]\n return: dailyFreshWeightIncrease[g/day]\n '''\n print(\"dailyInnerLightIntensityDLI:{}\".format(dailyInnerLightIntensityDLI))\n print(\"cultivationDaysperHarvest:{}\".format(cultivationDaysperHarvest))\n\n # average the light intensity for the cultivation by the period of lighting (photoperiod), which is assumed to be 14 hours.\n dailyInnerLightIntensityDLIAverage = dailyInnerLightIntensityDLI / constant.photoperiod\n # print \"dailyInnerLightIntensityPPFDAverage:{}\".format(dailyInnerLightIntensityPPFDAverage)\n\n # the expected fresh weight at harvest. the unit is [g] coding Eq. 1-3-2-6 and 1-3-2-7\n # the minimum final weight [g]\n finalWeightperHarvest = 8.72\n\n if dailyInnerLightIntensityDLIAverage < 1330:\n # finalWeightperHarvest = 0.00000060 * HourlyinnerLightIntensityPPFDAveragePerproductionCycle**3 - 0.00162758 * HourlyinnerLightIntensityPPFDAveragePerproductionCycle**2 + 1.14477896 * HourlyinnerLightIntensityPPFDAveragePerproductionCycle - 46.39100859\n finalWeightperHarvest = 0.00000060 * dailyInnerLightIntensityDLIAverage ** 3 - 0.00162758 * dailyInnerLightIntensityDLIAverage ** 2 + 1.14477896 * dailyInnerLightIntensityDLIAverage - 46.39100859\n\n # the actual fresh weight of crop per harvest. the unit is g . Eq 1-3-2-4\n dailyFreshWeightIncrease = (9.0977 * finalWeightperHarvest - 17.254) * (7.26 * 10 ** (-5)) ** math.e ** (\n -0.05041 * cultivationDaysperHarvest)\n\n return dailyFreshWeightIncrease\n\n\ndef calcUnitDailyFreshWeightBoth2003TaylorExpantionWithVaryingDLI(hourlyInnerPPFDToPlants, cultivationDaysperHarvest, cropElectricityYieldSimulator1 = None):\n '''\n calculate the unit fresh weight increase per day based on the revised model of Both (2003):\n Both, A., 2003. Ten years of hydroponic lettuce research. Knowledgecenter.Illumitex.Com 18, 8.\n\n param:hourlyInnerPPFDToPlants [μmol/m^2/s] per day\n param:cultivationDaysperHarvest [days] per day\n return: dailyFreshWeightIncrease[g/day]\n return: hervestDay[days]: days after seeeding\n '''\n # print \"dailyInnerLightIntensityDLI:{}\".format(dailyInnerLightIntensityDLI)\n # print \"cultivationDaysperHarvest:{}\".format(cultivationDaysperHarvest)\n\n # convert PPFD to DLI\n innerDLIToPlants = util.convertFromHourlyPPFDWholeDayToDLI(hourlyInnerPPFDToPlants)\n # print \"innerDLIToPlants:{}\".format(innerDLIToPlants)\n\n shootFreshMassList, unitDailyFreshWeightIncrease,accumulatedUnitDailyFreshWeightIncrease,unitHarvestedFreshWeight = \\\n calcUnitDailyFreshWeightBoth2003TaylorExpantionWithVaryingDLIDetail(innerDLIToPlants, cultivationDaysperHarvest, cropElectricityYieldSimulator1)\n\n return shootFreshMassList, unitDailyFreshWeightIncrease,accumulatedUnitDailyFreshWeightIncrease,unitHarvestedFreshWeight\n\n\ndef calcUnitDailyFreshWeightBoth2003TaylorExpantionWithVaryingDLIDetail(innerDLIToPlants, cultivationDaysperHarvest, cropElectricityYieldSimulator1 = None):\n '''\n calculate the unit fresh weight increase per day based on the revised model of Both (2003):\n Both, A., 2003. Ten years of hydroponic lettuce research. Knowledgecenter.Illumitex.Com 18, 8.\n\n param:hourlyInnerPPFDToPlants [μmol/m^2/s] per day\n param:cultivationDaysperHarvest [days] per day\n return: dailyFreshWeightIncrease[g/day]\n return: hervestDay[days]: days after seeeding\n '''\n\n # if you continue to grow plant during the summer period, then this is true\n # ifGrowForSummerPeriod = cropElectricityYieldSimulator1.getIfGrowForSummerPeriod()\n ifGrowForSummerPeriod = constant.ifGrowForSummerPeriod\n\n # print (\"ifGrowForSummerPeriod:{}\".format(ifGrowForSummerPeriod))\n\n # take date and time\n year = cropElectricityYieldSimulator1.getYear()\n month = cropElectricityYieldSimulator1.getMonth()\n day = cropElectricityYieldSimulator1.getDay()\n\n # change the number of array for DLI\n yearEachDay = year[::24]\n monthEachDay = month[::24]\n dayEachDay = day[::24]\n\n # define statistics for calculation\n\n # daily unit plant weight on each cycle [g]\n shootDryMassList = np.zeros(util.calcSimulationDaysInt())\n # d_shootDryMassList = np.zeros(util.calcSimulationDaysInt())\n # dd_shootDryMassList = np.zeros(util.calcSimulationDaysInt())\n # ddd_shootDryMassList = np.zeros(util.calcSimulationDaysInt())\n\n # daily increase in unit plant weight [g]\n unitDailyFreshWeightIncrease = np.zeros(util.calcSimulationDaysInt())\n # accumulated weight of daily increase in unit plant weight during the whole simulation days [g]\n accumulatedUnitDailyFreshWeightIncrease = np.zeros(util.calcSimulationDaysInt())\n # harvested daily unit plant weight [g]\n unitHarvestedFreshWeight = np.zeros(util.calcSimulationDaysInt())\n # the average light DLi of each cultivation cycle, the data is stored in the element on the harvest date.\n # this data is used to calculate the penalty of plant yield by photo inhibition.\n averageDLIonEachCycle = np.zeros(util.calcSimulationDaysInt())\n # time step[day]\n dt = 1\n\n shootDryMassInit = 0.0001\n ifharvestedLastDay = False\n\n # the initial harvest days\n harvestDaysList = np.array(range(cultivationDaysperHarvest - 1, util.getSimulationDaysInt(), cultivationDaysperHarvest))\n # print (\"harvestDaysList:{}\".format(harvestDaysList))\n\n # the variable storing the cultivation start day on each cycle\n CultivationCycleStartDay = datetime.date(yearEachDay[0], monthEachDay[0], dayEachDay[0])\n # CultivationCycleEndtDay = datetime.date(yearEachDay[0], monthEachDay[0], dayEachDay[0])\n\n i = 0\n # print \"cycle * cultivationDaysperHarvest -1:{}\".format(accumulatedUnitDailyFreshWeightIncrease[0 * cultivationDaysperHarvest -1])\n while i < util.getSimulationDaysInt():\n\n DaysperCycle = datetime.timedelta(days = cultivationDaysperHarvest)\n # if ifGrowForSummerPeriod is False the end of the cultivation at a cycle is within the summer period, then skip the cycle (= plus 35 days to index)\n if ifGrowForSummerPeriod is False and i % cultivationDaysperHarvest == 0 and \\\n datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i]) + DaysperCycle >= datetime.date(yearEachDay[i], constant.SummerPeriodStartMM, constant.SummerPeriodStartDD) and \\\n datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i]) + DaysperCycle <= datetime.date(yearEachDay[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD):\n # skip the cultivation cycle\n i += cultivationDaysperHarvest\n continue\n\n # if ifGrowForSummerPeriod is False, and the end of the cultivation at a cycle is not within the summer period, but the first day is within the summer period, then shift the first day to\n # the next day of the summer period, and shift all of the cultivation days in harvestDaysList\n elif ifGrowForSummerPeriod is False and i % cultivationDaysperHarvest == 0 and \\\n datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i]) >= datetime.date(yearEachDay[i], constant.SummerPeriodStartMM, constant.SummerPeriodStartDD) and \\\n datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i]) <= datetime.date(yearEachDay[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD):\n # shift the first day to the next day of the summer period\n dateDiff = datetime.date(yearEachDay[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD) - datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i])\n i += dateDiff.days + 1\n\n # shift each harvest period by dateDiff to keep the harvest period cultivationDaysperHarvest even after atarting the cultivation next to the summer period.\n harvestDaysList += dateDiff.days + 1\n\n continue\n\n # define the initial values on each cycle [g]\n if ifharvestedLastDay == True or i == 0:\n # if i % cultivationDaysperHarvest == 0:\n # print (\"when if ifharvestedLastDay == True, i :{}\".format(i))\n # plant the new seed\n shootDryMassList[i] = shootDryMassInit\n\n CultivationCycleStartDay = datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i])\n # print (\"cultivation start date:{}\".format(CultivationCycleStartDay))\n\n ifharvestedLastDay = False\n\n # calculate the plant weight increase\n else:\n # the additional number 1 indicates the difference from the last cultivation day. The difference calculate the increase in calcUnitDailyFreshWeightIncreaseBoth2003TaylorNotForRL\n # daysFromSeeding = i % cultivationDaysperHarvest + 1\n daysFromSeeding = (datetime.date(yearEachDay[i], monthEachDay[i], dayEachDay[i]) - CultivationCycleStartDay).days + 1\n # print(\"daysFromSeeding:{}\".format(daysFromSeeding))\n\n unitDailyFreshWeightIncrease[i] = calcUnitDailyFreshWeightIncreaseBoth2003TaylorNotForRL(innerDLIToPlants[i], shootDryMassList[i], dt, daysFromSeeding)\n\n shootDryMassList[i] = shootDryMassList[i - 1] + unitDailyFreshWeightIncrease[i]\n\n # since all of the initial values of accumulatedUnitDailyFreshWeightIncrease is zero, the value becomes zero when index == 0\n accumulatedUnitDailyFreshWeightIncrease[i] = accumulatedUnitDailyFreshWeightIncrease[i - 1] + unitDailyFreshWeightIncrease[i]\n\n # if it takes 35 days from seedling, harvest the plants!! the harvested fresh weight becomes just zero when index is zero because the initial values are zero.\n # print(\"i:{}, np.where(harvestDaysList == i)[0].shape[0]:{}\".format(i, np.where(harvestDaysList == i)[0].shape[0]))\n if np.where(harvestDaysList == i)[0].shape[0] == 1:\n # since the initial element index starts from zero, cultivationDaysperHarvest is minused by 1.\n # if i % cultivationDaysperHarvest == cultivationDaysperHarvest - 1:\n # print(\"harvest plants, i:{}\".format(i))\n\n unitHarvestedFreshWeight[i] = shootDryMassList[i]\n averageDLIonEachCycle[i] = np.mean(innerDLIToPlants[i-(cultivationDaysperHarvest - 1):i+1])\n\n ifharvestedLastDay = True\n\n # delete the harvest day from harvestDaysList\n # harvestDaysList = np.array([harvestDaysList[j] for j in range (0, harvestDaysList.shape[0]) if harvestDaysList[j] != i and harvestDaysList[j] > i ])\n harvestDaysList = np.array([harvestDaysList[j] for j in range (0, harvestDaysList.shape[0]) if harvestDaysList[j] > i ])\n # np.delete(harvestDaysList, np.where(harvestDaysList == i)[0][0])\n # print(\"current harvestDaysList:{}\".format(harvestDaysList))\n\n # increment the counter\n i += 1\n\n # change dry mass weight into fresh mass weight\n # daily increase in unit plant weight [g]\n unitDailyFreshWeightIncrease = unitDailyFreshWeightIncrease * constant.DryMassToFreshMass\n # accumulated weight of daily increase in unit plant weight during the whole simulation days [g]\n accumulatedUnitDailyFreshWeightIncrease = accumulatedUnitDailyFreshWeightIncrease * constant.DryMassToFreshMass\n # harvested daily unit plant weight [g]\n unitHarvestedFreshWeight = unitHarvestedFreshWeight * constant.DryMassToFreshMass\n # daily unit plant weight on each cycle [g]\n shootFreshMassList = shootDryMassList * constant.DryMassToFreshMass\n\n # print \"shootDryMassList:{}\".format(shootDryMassList)\n # print \"unitDailyFreshWeightIncrease:{}\".format(unitDailyFreshWeightIncrease)\n # print \"accumulatedUnitDailyFreshWeightIncrease:{}\".format(accumulatedUnitDailyFreshWeightIncrease)\n # print \"unitHarvestedFreshWeight:{}\".format(unitHarvestedFreshWeight)\n\n # set the average light DLi of each cultivation cycle, the data is stored in the element on the harvest date.\n cropElectricityYieldSimulator1.setAverageDLIonEachCycle(averageDLIonEachCycle)\n\n return shootFreshMassList, unitDailyFreshWeightIncrease, accumulatedUnitDailyFreshWeightIncrease, unitHarvestedFreshWeight\n\n\ndef calcUnitDailyFreshWeightIncreaseBoth2003TaylorNotForRL(innerDLIToPlants, shootDryMassList, dt, daysFromSeeding):\n '''\n this function is for general simulation. This is more accurate than calcUnitDailyFreshWeightIncreaseBoth2003Taylor, which should be replaced later.\n\n :param innerDLIToPlants:\n :param shootDryMassList:\n :param dt:\n :return:\n '''\n\n # update each statistic each day\n a = -8.596 + 0.0743 * innerDLIToPlants\n b = 0.4822\n c = -0.006225\n\n\n shootDryMassList = math.e ** (a + b * daysFromSeeding + c * daysFromSeeding ** 2)\n d_shootDryMassList = (b + 2 * c * daysFromSeeding) * shootDryMassList\n dd_shootDryMassList = 2 * c * shootDryMassList + (b + 2 * c * daysFromSeeding) ** 2 * shootDryMassList\n ddd_shootDryMassList = 2 * c * d_shootDryMassList + 4 * c * (b + 2 * c * daysFromSeeding) * shootDryMassList + (b + 2 * c * daysFromSeeding) ** 2 * d_shootDryMassList\n\n # Taylor expansion: x_0 = 0, h = 1 (source: http://eman-physics.net/math/taylor.html)\n shootDryMassIncrease = 1.0 / (math.factorial(1)) * d_shootDryMassList * dt + 1.0 / (math.factorial(2)) * dd_shootDryMassList * ((dt) ** 2) + \\\n 1.0 / (math.factorial(3)) * ddd_shootDryMassList * ((dt) ** 3)\n\n return shootDryMassIncrease\n\n# def calcUnitDailyFreshWeightIncreaseBoth2003Taylor(innerDLIToPlants, cultivationDaysperHarvest, daysFromSeeding):\n# '''\n# this function is only for the Q learning reinforcement learning\n# calculate the unit fresh weight increase per day based on the revised model of Both (2003):\n# Both, A., 2003. Ten years of hydroponic lettuce research. Knowledgecenter.Illumitex.Com 18, 8.\n#\n# param:innerDLIToPlants [mol/m^2/day] per day\n# param:cultivationDaysperHarvest [days] per day\n# param:daysFromSeeding [days] per day\n#\n# return: dailyFreshWeightIncrease[g/day]\n# return: hervestDay[days]: days after seeeding\n# '''\n# # print \"dailyInnerLightIntensityDLI:{}\".format(dailyInnerLightIntensityDLI)\n# # print \"cultivationDaysperHarvest:{}\".format(cultivationDaysperHarvest)\n#\n# # daily increase in unit plant weight [g]\n# unitDailyFreshWeightIncrease = np.zeros(1)\n# # accumulated weight of daily increase in unit plant weight during the whole simulation days [g]\n# accumulatedUnitDailyFreshWeightIncrease = np.zeros(1)\n# # harvested daily unit plant weight [g]\n# unitHarvestedFreshWeight = np.zeros(1)\n#\n# # simulationDaysInt = util.calcSimulationDaysInt()\n# simulationDaysInt = 1\n#\n# # num of cultivation cycle\n# NumCultivationCycle = 0\n# # print (\"NumCultivationCycle:{}\".format(NumCultivationCycle))\n#\n# # num of remained days when we cannot finish the cultivation, which is less than the num of cultivation days.\n# CultivationDaysWithNoHarvest = simulationDaysInt - NumCultivationCycle * cultivationDaysperHarvest\n# # print \"CultivationDaysWithNoHarvest:{}\".format(CultivationDaysWithNoHarvest)\n#\n# # define statistics for calculation\n# # daily unit plant weight on each cycle [g]\n# shootDryMassList = np.zeros(len(unitDailyFreshWeightIncrease))\n# d_shootDryMassList = np.zeros(len(unitDailyFreshWeightIncrease))\n# # dd_shootDryMassList = np.zeros(len(unitDailyFreshWeightIncrease))\n# # ddd_shootDryMassList = np.zeros(len(unitDailyFreshWeightIncrease))\n# a = 0\n# b = 0.4822\n# c = -0.006225\n# # time step[day]\n# dt = 1\n#\n# # print \"cycle * cultivationDaysperHarvest -1:{}\".format(accumulatedUnitDailyFreshWeightIncrease[0 * cultivationDaysperHarvest -1])\n#\n# for cycle in range(0, NumCultivationCycle + 1):\n#\n# # define the initial values on each cycle [g]\n# # shootDryMassInit == the weight on day 0 == weight of seed [g]\n# shootDryMassInit = 0.0001\n# accumulatedUnitDailyFreshWeightIncrease[cycle * cultivationDaysperHarvest] = accumulatedUnitDailyFreshWeightIncrease[\n# cycle * cultivationDaysperHarvest - 1] + shootDryMassInit\n# shootDryMassList[cycle * cultivationDaysperHarvest] = shootDryMassInit\n# d_shootDryMassList[cycle * cultivationDaysperHarvest] = (b + 2 * c * 0.0) * shootDryMassList[cycle * cultivationDaysperHarvest]\n# # dd_shootDryMassList[cycle * cultivationDaysperHarvest] = 2 * c * shootDryMassList[cycle * cultivationDaysperHarvest] + \\\n# # (b + 2 * c * 0.0) ** 2 * shootDryMassList[cycle * cultivationDaysperHarvest]\n# # ddd_shootDryMassList[cycle * cultivationDaysperHarvest] = 2 * c * d_shootDryMassList[cycle * cultivationDaysperHarvest] + \\\n# # 4 * c * (b + 2 * c * 0.0) * shootDryMassList[cycle * cultivationDaysperHarvest] + \\\n# # (b + 2 * c * 0) ** 2 * d_shootDryMassList[cycle * cultivationDaysperHarvest]\n#\n# # print \"shootDryMassList[cycle*cultivationDaysperHarvest]:{}\".format(shootDryMassList[cycle*cultivationDaysperHarvest])\n# # print \"d_shootDryMassList[cycle*cultivationDaysperHarvest]:{}\".format(d_shootDryMassList[cycle*cultivationDaysperHarvest])\n# # print \"dd_shootDryMassList[cycle*cultivationDaysperHarvest]:{}\".format(dd_shootDryMassList[cycle*cultivationDaysperHarvest])\n# # print \"ddd_shootDryMassList[cycle*cultivationDaysperHarvest]:{}\".format(ddd_shootDryMassList[cycle*cultivationDaysperHarvest])\n#\n#\n# # update each statistic each day\n# a = -8.596 + 0.0743 * innerDLIToPlants\n# shootDryMassList = math.e ** (a + b * daysFromSeeding + c * daysFromSeeding ** 2)\n# d_shootDryMassList = (b + 2 * c * daysFromSeeding) * shootDryMassList\n# dd_shootDryMassList = 2 * c * shootDryMassList + (b + 2 * c * daysFromSeeding) ** 2 * shootDryMassList\n# ddd_shootDryMassList = 2 * c * d_shootDryMassList + 4 * c * (b + 2 * c * daysFromSeeding) * shootDryMassList + \\\n# (b + 2 * c * daysFromSeeding) ** 2 * d_shootDryMassList\n#\n# # print \"day{}, a:{},shootDryMassList[{}]:{}\".format(day, a, cycle*cultivationDaysperHarvest+day, shootDryMassList[cycle*cultivationDaysperHarvest+day])\n#\n# # Taylor expansion: x_0 = 0, h = 1 (source: http://eman-physics.net/math/taylor.html)\n# # shootDryMassList[cycle * cultivationDaysperHarvest + day] = shootDryMassList[cycle * cultivationDaysperHarvest + day - 1] + \\\n# # 1.0 / (math.factorial(1)) * d_shootDryMassList[\n# # cycle * cultivationDaysperHarvest + day - 1] * dt + \\\n# # 1.0 / (math.factorial(2)) * dd_shootDryMassList[\n# # cycle * cultivationDaysperHarvest + day - 1] * ((dt) ** 2) + \\\n# # 1.0 / (math.factorial(3)) * ddd_shootDryMassList[\n# # cycle * cultivationDaysperHarvest + day - 1] * ((dt) ** 3)\n#\n# # unitDailyFreshWeightIncrease[cycle * cultivationDaysperHarvest + day] = shootDryMassList[cycle * cultivationDaysperHarvest + day] - \\\n# # shootDryMassList[cycle * cultivationDaysperHarvest + day - 1]\n# unitDailyFreshWeightIncrease = d_shootDryMassList\n#\n# # accumulatedUnitDailyFreshWeightIncrease[cycle * cultivationDaysperHarvest + day] = \\\n# # accumulatedUnitDailyFreshWeightIncrease[cycle * cultivationDaysperHarvest + day - 1] + unitDailyFreshWeightIncrease[\n# # cycle * cultivationDaysperHarvest + day]\n#\n# # print \"day:{}, cycle*cultivationDaysperHarvest+day:{}, shootDryMassList[cycle*cultivationDaysperHarvest + day]:{}\".format(\n# # day, cycle * cultivationDaysperHarvest + day, shootDryMassList[cycle * cultivationDaysperHarvest + day])\n#\n# # change dry mass weight into fresh mass weight\n# # daily increase in unit plant weight [g]\n# unitDailyFreshWeightIncrease = unitDailyFreshWeightIncrease * constant.DryMassToFreshMass\n# # accumulated weight of daily increase in unit plant weight during the whole simulation days [g]\n# # accumulatedUnitDailyFreshWeightIncrease = accumulatedUnitDailyFreshWeightIncrease * constant.DryMassToFreshMass\n# # harvested daily unit plant weight [g]\n# # unitHarvestedFreshWeight = unitHarvestedFreshWeight * constant.DryMassToFreshMass\n# # daily unit plant weight on each cycle [g]\n# # shootFreshMassList = shootDryMassList * constant.DryMassToFreshMass\n#\n# # print \"shootDryMassList:{}\".format(shootDryMassList)\n# # print \"unitDailyFreshWeightIncrease:{}\".format(unitDailyFreshWeightIncrease)\n# # print \"accumulatedUnitDailyFreshWeightIncrease:{}\".format(accumulatedUnitDailyFreshWeightIncrease)\n# # print \"unitHarvestedFreshWeight:{}\".format(unitHarvestedFreshWeight)\n#\n# return unitDailyFreshWeightIncrease\n\ndef getLettucePricepercwt(year):\n '''\n return the lettuce price per cwt based on the year of sales\n :param year:\n :return:\n '''\n return 0.583 * year - 1130\n\n\ndef getRetailPricePerArea(simulatorClass):\n # the source of the romaine lettuce retail price data\n # https://data.bls.gov/timeseries/APU0000FL2101?amp%253bdata_tool=XGtable&output_view=data&include_graphs=true\n\n # unit: kg/m^2/day\n harvestedShootFreshMassPerAreaKgPerDay = simulatorClass.harvestedShootFreshMassPerAreaKgPerDay\n # print(\"harvestedShootFreshMassPerAreaKgPerDay:{}\".format(harvestedShootFreshMassPerAreaKgPerDay))\n\n # unit: USD/m^2/day\n harvestedFreshMassPricePerAreaPerDay = np.zeros(harvestedShootFreshMassPerAreaKgPerDay.shape[0])\n\n # get the month and year lists\n simulationMonthEachDay = simulatorClass.getMonth()[::24]\n simulationYearEachDay = simulatorClass.getYear()[::24]\n\n # if you refer to the price for greenhouse lettuce\n if constant.sellLettuceByGreenhouseRetailPrice:\n # define the price data\n # source: https://www.ams.usda.gov/mnreports/fvwretail.pdf\n # \"Lettuce Other Boston-Greenhouse\" 1.99 USD each\n\n # make the price data in the same format as constant.romaineLettceRetailPriceFileName\n # get the unit price (USD m-2)\n # romaineLettuceRetailPricePerMonth = getRomainLettucePriceBasedOnHeadPrice()\n\n # get the sales price of each harvested lettuce (weight)\n for i in range(0, harvestedShootFreshMassPerAreaKgPerDay.shape[0]):\n # if it is not the harvest date then skip the day. It is also skipped if the head weight does not reach 90% of the defined harvest fresh weight by kg m-2 day-1.\n if harvestedShootFreshMassPerAreaKgPerDay[i] < (constant.harvestDryWeight * constant.DryMassToFreshMass) / 1000 * constant.plantDensity * 0.9 : continue\n\n # unit: USD/m^2/day\n harvestedFreshMassPricePerAreaPerDay[i] = constant.romainLettucePriceBasedOnHeadPrice * constant.plantDensity\n\n else:\n # import the price data\n filename = constant.romaineLettceRetailPriceFileName\n relativePath = constant.romaineLettceRetailPriceFilePath\n romaineLettuceRetailPricePerMonth = util.readData(filename, relativePath, 0, ',')\n # print(\"romaineLettuceRetailPricePerMonth:{}\".format(romaineLettuceRetailPricePerMonth))\n # print(\"type(romaineLettuceRetailPricePerMonth):{}\".format(type(romaineLettuceRetailPricePerMonth)))\n\n # get the sales price of each harvested lettuce (weight)\n for i in range (0, harvestedShootFreshMassPerAreaKgPerDay.shape[0]):\n # if it is not the harvest date then skip the day\n if harvestedShootFreshMassPerAreaKgPerDay[i] == 0.0: continue\n\n # get the unit price (USD pound-1)\n unitRetailPricePerPound = getUnitRomainLettucePrice(simulationMonthEachDay[i], simulationYearEachDay[i], romaineLettuceRetailPricePerMonth)\n\n # unit conversion: 1USD pound-1 -> USD kg-1\n unitRetailPricePerKg = util.convertKgToPound(unitRetailPricePerPound)\n\n # unit: USD/m^2/day\n harvestedFreshMassPricePerAreaPerDay[i] = harvestedShootFreshMassPerAreaKgPerDay[i] * unitRetailPricePerKg\n\n # print(\"In Lettuce.py, harvestedFreshMassPricePerAreaPerDay:{}\".format(harvestedFreshMassPricePerAreaPerDay))\n\n return harvestedFreshMassPricePerAreaPerDay\n\ndef getUnitRomainLettucePrice(month, year, priceInfoList):\n \"\"\"\n return the price of lettuce on a given month and year\n \"\"\"\n # print(\"priceInfoList:{}\".format(priceInfoList))\n # print(\"priceInfoList.shape:{}\".format(priceInfoList.shape))\n # print(\"type(month):{}\".format(type(month)))\n # print(\"type(year):{}\".format(type(year)))\n\n # assuming the list has the header, so skip the header\n for i in range (1, priceInfoList.shape[0]):\n priceInfo = priceInfoList[i]\n # print(\"i:{}, priceInfo:{}\".format(i, priceInfo))\n # print(\"year:{}\".format(year))\n # print(\"type(year):{}\".format(type(month)))\n # print(\"month:{}\".format(year))\n # print(\"type(month):{}\".format(type(month)))\n # print(\"priceInfo[1]:{}\".format(priceInfo[1]))\n # print(\"priceInfo[2][0:2]:{}\".format(priceInfo[2][1:]))\n\n if year == int(priceInfo[1]) and month == int(priceInfo[2][1:]):\n # print(\"priceInfo[3]:{}\".format(priceInfo[3]))\n # print(\"type(priceInfo[3]):{}\".format(type(priceInfo[3])))\n # unit: USD pound-1\n return float(priceInfo[3])\n\n print(\"The specified simulation period include the term where there is no lettuce unit price information. Simulation stopped.\")\n # ####################################################################################################\n # # Stop execution here...\n sys.exit()\n # # Move the above line to different parts of the assignment as you implement more of the functionality.\n # ####################################################################################################\n\n\ndef getRomainLettucePriceBasedOnHeadPrice():\n\n # get init date\n startDate = util.getStartDateDateType()\n\n numOfSimulationMonths = util.getSimulationMonthsInt()\n\n romainLettucePriceBasedOnHeadPrice = np.zeros((numOfSimulationMonths,4))\n\n for i in range (numOfSimulationMonths):\n romainLettucePriceBasedOnHeadPrice[i][0] = \"dummy\"\n # set year\n romainLettucePriceBasedOnHeadPrice[i][1] = (startDate + relativedelta(months = i)).year\n # set month\n romainLettucePriceBasedOnHeadPrice[i][2] = (startDate + relativedelta(months = i)).month\n # set the head price\n # unit: USD head-1\n romainLettucePriceBasedOnHeadPrice[i][3] = constant.romainLettucePriceBasedOnHeadPrice\n # unit convert: USD head-1 -> USD m-2\n romainLettucePriceBasedOnHeadPrice[i][3] = romainLettucePriceBasedOnHeadPrice[i][3] * constant.plantDensity\n\n return romainLettucePriceBasedOnHeadPrice\n\n\ndef discountPlantSalesperSquareMeterByTipburn(plantSalesperSquareMeter, TotalDLItoPlants):\n '''\n\n :param plantSalesperSquareMeter:\n :param TotalDLItoPlants:\n :return:\n '''\n # cultivationDaysWithoutHarvest = getCultivationDaysWithoutHarvest(plantSalesperSquareMeter)\n cultivationDaysWithoutHarvest = int(util.calcSimulationDaysInt() % constant.cultivationDaysperHarvest)\n # print \"cultivationDaysWithoutHarvest:{}\".format(cultivationDaysWithoutHarvest)\n\n for cycle in range (0, int(util.calcSimulationDaysInt() / constant.cultivationDaysperHarvest)):\n averageDLIperCycle = sum(TotalDLItoPlants[cycle*constant.cultivationDaysperHarvest:(cycle+1)*constant.cultivationDaysperHarvest]) / constant.cultivationDaysperHarvest\n # print \"averageDLIperCycle:{}\".format(averageDLIperCycle)\n # if the DLI is more than the amount with which there can be some tipburns, discount the price.\n if averageDLIperCycle > constant.DLIforTipBurn:\n plantSalesperSquareMeter[(cycle+1)*constant.cultivationDaysperHarvest-1] = constant.tipburnDiscountRatio * \\\n plantSalesperSquareMeter[(cycle+1)*constant.cultivationDaysperHarvest-1]\n\n return plantSalesperSquareMeter\n\n\ndef getCultivationDaysWithoutHarvest(plantSalesperSquareMeter):\n '''\n num of remained days when we cannot finish the cultivation, which is less than the num of cultivation days.\n :param plantSalesperSquareMeter:\n :return:\n '''\n # num of cultivation cycle\n NumCultivationCycle = int(util.calcSimulationDaysInt() / constant.cultivationDaysperHarvest)\n # print \"NumCultivationCycle:{}\".format(NumCultivationCycle)\n\n # num of remained days when we cannot finish the cultivation, which is less than the num of cultivation days.\n CultivationDaysWithNoHarvest = util.calcSimulationDaysInt() - NumCultivationCycle * constant.cultivationDaysperHarvest\n # print \"CultivationDaysWithNoHarvest:{}\".format(CultivationDaysWithNoHarvest)\n\n return CultivationDaysWithNoHarvest\n\n\ndef getRevenueOfPlantYieldperHarvest(freshWeightTotalperHarvest):\n '''\n calculate the revenue of plant sales per harvest (USD/harvest)\n param:freshWeightTotalperHarvest: fresh Weight perHarvest (kg/harvest)\n return: revenueOfPlantProductionperHarvest: revenue Of Plant Production per Harvest (USD/harvest)\n '''\n return constant.lantUnitPriceUSDperKilogram * freshWeightTotalperHarvest\n\n\ndef getCostofPlantYieldperYear():\n '''\n calculate the cost of plant sales per harvest (USD/per)\n param: :\n return: : cost Of Plant Production per year (USD/year)\n '''\n return constant.plantProductionCostperSquareMeterPerYear * constant.greenhouseFloorArea\n\n\ndef getGreenhouseTemperatureEachDay(simulatorClass):\n # It was assumed the greenhouse temperature was instantaneously adjusted to the set point temperatures at daytime and night time respectively\n hourlyDayOrNightFlag = simulatorClass.hourlyDayOrNightFlag\n greenhouseTemperature = np.array([constant.setPointTemperatureDayTime if i == constant.daytime else constant.setPointTemperatureNightTime for i in hourlyDayOrNightFlag])\n\n # calc the mean temperature each day\n dailyAverageTemperature = np.zeros(util.getSimulationDaysInt())\n for i in range(0, util.getSimulationDaysInt()):\n dailyAverageTemperature[i] = np.average(greenhouseTemperature[i * constant.hourperDay: (i + 1) * constant.hourperDay])\n return dailyAverageTemperature\n\ndef getGreenhouseTemperatureEachHour(simulatorClass):\n # It was assumed the greenhouse temperature was instantaneously adjusted to the set point temperatures at daytime and night time respectively\n hourlyDayOrNightFlag = simulatorClass.hourlyDayOrNightFlag\n greenhouseTemperature = np.array(\n [constant.setPointTemperatureDayTime if i == constant.daytime else constant.setPointTemperatureNightTime for i in hourlyDayOrNightFlag])\n\n return greenhouseTemperature\n\ndef getFreshWeightIncrease(FWPerHead):\n # get the fresh weight increase\n\n # freshWeightIncrease = np.array([WFresh[i] - WFresh[i-1] if WFresh[i] - WFresh[i-1] > 0 else 0.0 for i in range (1, WFresh.shape[0])])\n # # insert the value for i == 0\n # freshWeightIncrease[0] = 0.0\n # it is possible that the weight decreases at E_J_VanHenten1994\n freshWeightIncrease = np.array([0.0 if i == 0 or FWPerHead[i] - FWPerHead[i-1] <= -constant.harvestDryWeight*constant.DryMassToFreshMass/2.0\\\n else FWPerHead[i] - FWPerHead[i-1] for i in range (0, FWPerHead.shape[0])])\n\n return freshWeightIncrease\n\n\ndef getAccumulatedFreshWeightIncrease(WFresh):\n # get accumulated fresh weight\n\n freshWeightIncrease = getFreshWeightIncrease(WFresh)\n accumulatedFreshWeightIncrease = np.zeros(WFresh.shape[0])\n accumulatedFreshWeightIncrease[0] = WFresh[0]\n for i in range(1, freshWeightIncrease.shape[0]):\n # print(\"i:{}, accumulatedFreshWeightIncrease[i]:{}, accumulatedFreshWeightIncrease[i-1]:{}, freshWeightIncrease[i]:{}\".format(i, accumulatedFreshWeightIncrease[i], accumulatedFreshWeightIncrease[i-1], freshWeightIncrease[i]))\n accumulatedFreshWeightIncrease[i] = accumulatedFreshWeightIncrease[i-1] + freshWeightIncrease[i]\n\n return accumulatedFreshWeightIncrease\n\n\ndef getHarvestedFreshWeight(WFresh):\n # get the harvested fresh weight\n\n # record the fresh weight harvested at each harvest day or hour\n # harvestedFreshWeight = np.array([WFresh[i] if WFresh[i] > 0.0 and WFresh[i+1] == 0.0 else 0.0 for i in range (0, WFresh.shape[0])])\n harvestedFreshWeight = np.zeros(WFresh.shape[0])\n for i in range (0, WFresh.shape[0]-1):\n # print(\"i:{}, WFresh[i]:{}\".format(i, WFresh[i]))\n\n if WFresh[i] > 0.0 and WFresh[i+1] == 0.0:\n harvestedFreshWeight[i] = WFresh[i]\n else:\n harvestedFreshWeight[i] = 0.0\n\n # print(\"0 harvestedFreshWeight.shape[0]:{}\".format(harvestedFreshWeight.shape[0]))\n\n # if the last hour of the last day is the harvest date\n if WFresh[-1] > constant.harvestDryWeight*constant.DryMassToFreshMass:\n # harvestedFreshWeight = np.append(harvestedFreshWeight, [WFresh[-1]])\n harvestedFreshWeight[-1] = WFresh[-1]\n else:\n harvestedFreshWeight[-1] = WFresh[-1]\n\n return harvestedFreshWeight\n\n\n" }, { "alpha_fraction": 0.61456698179245, "alphanum_fraction": 0.6372520923614502, "avg_line_length": 54.1889762878418, "blob_id": "acbcb0b6858fc8a5c26697c654daccb91bf69eb0", "content_id": "2d1d5a066e7c32e3a873c44814d2dadb5b0ab783", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14020, "license_type": "permissive", "max_line_length": 302, "num_lines": 254, "path": "/plantGrowthModelE_J_VanHenten.py", "repo_name": "kensaku-okada/Greenhouse-with-OPV-film-Model", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#######################################################\n# author :Kensaku Okada [[email protected]]\n# create date : 15 Jun 2017\n# last edit date: 15 Jun 2017\n#######################################################\n\n# ####################################################################################################\n# np.set_printoptions(threshold=np.inf)\n# print \"hourlySolarIncidenceAngle:{}\".format(np.degrees(hourlySolarIncidenceAngle))\n# np.set_printoptions(threshold=1000)\n# ####################################################################################################\n\n# ####################################################################################################\n# # Stop execution here...\n# sys.exit()\n# # Move the above line to different parts of the assignment as you implement more of the functionality.\n# ####################################################################################################\n\n##########import package files##########\nimport os as os\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport math\nimport CropElectricityYeildSimulatorConstant as constant\nimport Util as util\nimport datetime\nimport Lettuce\nimport PlantGrowthModelE_J_VanHentenConstant as VanHentenConstant\nimport PlantGrowthPenaltyByPhotoinhibition as Photoinhibition\n\n\ndef calcUnitDailyFreshWeightE_J_VanHenten1994(simulatorClass):\n '''\n \"dt\" is 1 second.\n reference: E. J. Van Henten, 1994, \"validation of a dynamic lettuce growth model for greenhouse climate control\"\n https://www.sciencedirect.com/science/article/pii/S0308521X94902801\n '''\n # According to Van Henten (1994), 'With data logging system connected to the greenhouse climate computer, half-hour mean values of the indoor climate data were recorded.'\n\n # get the simulation days\n simulationDaysInt = util.getSimulationDaysInt()\n # print(\"simulationDaysInt:{}\".format(simulationDaysInt))\n\n # take date and time\n year = simulatorClass.getYear()\n month = simulatorClass.getMonth()\n day = simulatorClass.getDay()\n # print(\"year[0]:{}\".format(year[0]))\n\n # get the summer period hours\n summerPeriodDays = datetime.date(year=year[0], month=constant.SummerPeriodEndMM, day=constant.SummerPeriodEndDD) - \\\n datetime.date(year=year[0], month=constant.SummerPeriodStartMM, day=constant.SummerPeriodStartDD)\n # change the data type and unit\n summerPeriodHours = summerPeriodDays.days * constant.hourperDay\n # print(\"summerPeriodHours:{}\".format(summerPeriodHours))\n\n # [head/m^2]\n plantDensity = constant.plantDensity\n # plantDensity = 18.0\n\n # it was assumed that the canopy temperature is instantaneously adjusted to the setpoint temperature at each hour.\n U_T = Lettuce.getGreenhouseTemperatureEachHour(simulatorClass)\n # the following definition of U_T is used for validation\n # U_T = simulatorClass.GHAirTemperatureValidationData\n\n # print(\"U_T:{}\".format(U_T))\n # print(\"U_T.shape:{}\".format(U_T.shape))\n\n # Horizontal irradiance above the canopy (PAR) [W/m^2]\n directSolarIrradianceToPlants = simulatorClass.directSolarIrradianceToPlants\n diffuseSolarIrradianceToPlants = simulatorClass.diffuseSolarIrradianceToPlants\n totalSolarIrradianceToPlants = directSolarIrradianceToPlants + diffuseSolarIrradianceToPlants\n # By dividing the irradiance by 2, the shortwave radiation is converted into PAR [W/m^2]\n U_par = totalSolarIrradianceToPlants/2.0\n # print(\"U_par:{}\".format(U_par))\n\n # unit: ppm - pers per million (1/1000000)\n # it is temporarily assumed that all hourly CO2 concentration is 400 ppm\n U_CO2 = np.array([400] * U_T.shape[0])\n\n # structured dry weight on each day [g / m**2]\n Xsdw = np.zeros(simulationDaysInt*constant.hourperDay)\n # non structured dry weight on each day [g / m**2]\n Xnsdw = np.zeros(simulationDaysInt*constant.hourperDay)\n # total dry weight\n DW = np.zeros(simulationDaysInt*constant.hourperDay)\n # summer period flag array: 1.0 == summer, 0.0 == not summer\n summerPeriodFlagArray = np.zeros(simulationDaysInt*constant.hourperDay)\n\n # set the initial values\n # according to the reference, the initial dry weight was 2.7 g/m^2 with the cultivar \"Berlo\" (started 17 October 1991), and 0.72 g/m^2 with \"Norden\"(started 21 January 1992)\n # [g / m**2]\n InitialdryWeight = 2.7\n # [g / m**2]\n Xsdw[0] = InitialdryWeight * 0.75\n # [g / m**2]\n Xnsdw[0] = InitialdryWeight * 0.25\n # [g / m**2]\n DW[0] = Xsdw[0] + Xnsdw[0]\n # DWPerHead[0] = DW[0] / plantDensity\n # FWPerHead[0] = DWPerHead[0] * constant.DryMassToFreshMass\n\n # 1 loop == 1 hour\n i = 1\n while i < simulationDaysInt * constant.hourperDay:\n # for i in range (1, constant.cultivationDaysperHarvest):\n\n # if you do not grow plant during the summer period, then skip the summer period\n # if simulatorClass.getIfGrowForSummerPeriod() is False and \\\n # the last condition was added to consider the case when the summer period is defined to be zero even if constant.ifGrowForSummerPeriod is False\n if constant.ifGrowForSummerPeriod is False and \\\n datetime.date(year[i], month[i], day[i]) >= datetime.date(year[i], constant.SummerPeriodStartMM, constant.SummerPeriodStartDD) and \\\n datetime.date(year[i], month[i], day[i]) <= datetime.date(year[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD) and \\\n datetime.date(year[i], constant.SummerPeriodEndMM, constant.SummerPeriodEndDD) > datetime.date(year[i], constant.SummerPeriodStartMM, constant.SummerPeriodStartDD):\n\n # skip the summer period cultivation cycle\n # It was assumed to take 3 days to the next cultivation cycle assuming \"transplanting shock prevented growth during the first 48 h\", and it takes one day for preparation.\n # source: 21.\tPearson, S., Wheeler, T. R., Hadley, P., & Wheldon, A. E. (1997). A validated model to predict the effects of environment on the growth of lettuce (Lactuca sativa L.): Implications for climate change. Journal of Horticultural Science, 72(4), 503–517. https://doi.org/10.1080/14620316.1997.11515538\n i += summerPeriodHours + 3 * constant.hourperDay\n # print(\"summerPeriodHours:{}\".format(summerPeriodHours))\n\n # record the summer period\n summerPeriodFlagArray[i - summerPeriodHours + 3 * constant.hourperDay: i] = 1.0\n\n # print(\"i:{}\".format(i))\n # print(\"resetInitialWeights(i, Xsdw[0], Xnsdw[0]):{}\".format(resetInitialWeights(i, Xsdw[0], Xnsdw[0])))\n # print(\"Xsdw[i - 2 * constant.hourperDay:i]:{}\".format(Xsdw[i - 2 * constant.hourperDay:i]))\n # print(\"Xnsdw[i - 2 * constant.hourperDay:i]:{}\".format(Xnsdw[i - 2 * constant.hourperDay:i]))\n # print(\"DW[i - 2 * constant.hourperDay:i]:{}\".format(DW[i - 2 * constant.hourperDay:i]))\n\n # initialize the plant weight for the cultivation soon after the summer period\n Xsdw[i - 2 * constant.hourperDay:i], \\\n Xnsdw[i - 2 * constant.hourperDay:i], \\\n DW[i - 2 * constant.hourperDay:i] = resetInitialWeights(i, Xsdw[0], Xnsdw[0])\n # print(\"i:{}, Xsdw[i - 2 * constant.hourperDay:i]:{}, Xnsdw[i - 2 * constant.hourperDay:i]:{}, DW[i - 2 * constant.hourperDay:i]:{}\".\\\n # format(i, Xsdw[i - 2 * constant.hourperDay:i], Xnsdw[i - 2 * constant.hourperDay:i], DW[i - 2 * constant.hourperDay:i]))\n\n continue\n\n ####################### parameters for calculating f_photo_max start #######################\n # the carboxylation conductance\n g_car = VanHentenConstant.c_car1 * (U_T[i-1])**2 + VanHentenConstant.c_car2 * U_T[i-1] + VanHentenConstant.c_car3\n # print(\"i:{}, g_car:{}\".format(i, g_car))\n\n # the canopy conductance for diffusion of CO2 from the ambient air to the chloroplast :gross carbon dioxide assimilation rate of the canopy having an effective surface of 1m^2 per square meter soild at complete soil covering.\n g_CO2 = VanHentenConstant.g_bnd['m s-1']*VanHentenConstant.g_stm['m s-1']* g_car / (VanHentenConstant.g_bnd['m s-1'] * VanHentenConstant.g_stm['m s-1'] \\\n + VanHentenConstant.g_bnd['m s-1']*g_car + VanHentenConstant.g_stm['m s-1'] * g_car)\n # print(\"g_CO2:{}\".format(g_CO2))\n\n # CO2 compensation point\n gamma = VanHentenConstant.c_upperCaseGamma['ppm'] * VanHentenConstant.c_Q10_upperCaseGamma ** ((U_T[i-1] -20.0)/10.0)\n # print(\"gamma:{}\".format(gamma))\n\n # light use efficiency\n epsilon = {'g J-1': VanHentenConstant.c_epsilon['g J-1'] * (U_CO2[i-1] - gamma) / ( U_CO2[i-1] + 2.0 * gamma)}\n # print(\"epsilon:{}\".format(epsilon))\n ####################### parameters for calculating f_photo_max end #######################\n\n # the response of canopy photosynthesis\n f_photo_max = {'g m-2 s-2': epsilon['g J-1'] * U_par[i-1] * g_CO2 * VanHentenConstant.c_omega['g m-3'] * (U_CO2[i-1] - gamma) / \\\n (epsilon['g J-1']* U_par[i-1] + g_CO2 * VanHentenConstant.c_omega['g m-3'] * (U_CO2[i-1] - gamma))}\n # print(\"f_photo_max:{}\".format(f_photo_max))\n\n # specific growth rate: the transfromation rate of non-structural dry weight to structural dry weight\n r_gr = VanHentenConstant.c_gr_max['s-1'] * Xnsdw[i-1] / (VanHentenConstant.c_gamma * Xsdw[i-1] + Xnsdw[i-1]) * (VanHentenConstant.c_Q10_gr ** ((U_T[i] - 20.0) / 10.0))\n # print(\"r_gr:{}\".format(r_gr))\n # the maintenance respiration rate of the crop\n f_resp = (VanHentenConstant.c_resp_sht['s-1'] * (1 - VanHentenConstant.c_tau) * Xsdw[i-1] + VanHentenConstant.c_resp_rt['s-1'] * VanHentenConstant.c_tau * Xsdw[i-1])\\\n * VanHentenConstant.c_Q10_resp ** ((U_T[i-1] - 25.0)/10.0)\n # print(\"f_resp:{}\".format(f_resp))\n # gross canopy photosynthesis\n f_photo = (1.0 - np.exp( - VanHentenConstant.c_K * VanHentenConstant.c_lar['m2 g-2'] * (1- VanHentenConstant.c_tau) * Xsdw[i-1])) * f_photo_max['g m-2 s-2']\n # print(\"f_photo:{}\".format(f_photo))\n\n # [g / m ** 2/ sec]\n d_structuralDryWeight = r_gr * Xsdw[i-1]\n # [g / m ** 2/ sec]\n d_nonStructuralDryWeight = VanHentenConstant.c_alpha * f_photo - r_gr * Xsdw[i-1] - f_resp - (1 - VanHentenConstant.c_beta) / VanHentenConstant.c_beta * r_gr * Xsdw[i-1]\n\n # unit conversion. [g m-2 sec-1] -> [g m-2 hour-1]\n d_structuralDryWeight = d_structuralDryWeight * constant.secondperMinute * constant.minuteperHour\n d_nonStructuralDryWeight = d_nonStructuralDryWeight * constant.secondperMinute * constant.minuteperHour\n # print(\"d_structuralDryWeight:{}\".format(d_structuralDryWeight))\n # print(\"d_nonStructuralDryWeight:{}\".format(d_nonStructuralDryWeight))\n\n # increase the plant weight\n Xsdw[i] = Xsdw[i-1] + d_structuralDryWeight\n Xnsdw[i] = Xnsdw[i-1] + d_nonStructuralDryWeight\n DW[i] = Xsdw[i] + Xnsdw[i]\n\n # if the dry weight exceeds the weight for cultimvation, then reset the dryweight\n if DW[i] > constant.harvestDryWeight * plantDensity:\n\n # It was assumed to take 3 days to the next cultivation cycle assuming \"transplanting shock prevented growth during the first 48 h\", and it takes one day for preparation.\n i += 3 * constant.hourperDay\n if (i >= simulationDaysInt * constant.hourperDay): break\n\n # The plant dry weight (excluding roots) W\n Xsdw[i - 2 * constant.hourperDay:i], \\\n Xnsdw[i - 2 * constant.hourperDay:i],\\\n DW[i - 2 * constant.hourperDay:i] = resetInitialWeights(i, Xsdw[0], Xnsdw[0])\n\n else:\n # increment the counter for one hour\n i += 1\n\n # the plant weight per head\n # Ydw = (Xsdw + Xnsdw) / float(constant.numOfHeadsPerArea)\n\n # set variables to the object\n simulatorClass.LeafAreaIndex_J_VanHenten1994 = VanHentenConstant.c_lar['m2 g-2'] * (1- VanHentenConstant.c_tau) * Xsdw\n simulatorClass.summerPeriodFlagArray = summerPeriodFlagArray\n\n\n DWPerHead = DW / plantDensity\n # print(\"DWPerHead:{}\".format(DWPerHead))\n\n FWPerHead = DWPerHead * constant.DryMassToFreshMass\n # get the fresh weight increase per head\n WFreshWeightIncrease = Lettuce.getFreshWeightIncrease(FWPerHead)\n # get the accumulated fresh weight per head during the simulation period\n WAccumulatedFreshWeightIncrease = Lettuce.getAccumulatedFreshWeightIncrease(FWPerHead)\n # get the harvested weight per head\n WHarvestedFreshWeight = Lettuce.getHarvestedFreshWeight(FWPerHead)\n\n # print(\"FWPerHead.shape:{}\".format(FWPerHead.shape))\n # print(\"WFreshWeightIncrease.shape:{}\".format(WFreshWeightIncrease.shape))\n # print(\"WAccumulatedFreshWeightIncrease.shape:{}\".format(WAccumulatedFreshWeightIncrease.shape))\n # print(\"WHarvestedFreshWeight.shape:{}\".format(WHarvestedFreshWeight.shape))\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"FWPerHead:{}\".format(FWPerHead))\n # print(\"WHarvestedFreshWeight:{}\".format(WHarvestedFreshWeight))\n # np.set_printoptions(threshold=1000)\n\n return FWPerHead, WFreshWeightIncrease, WAccumulatedFreshWeightIncrease, WHarvestedFreshWeight\n\n # np.set_printoptions(threshold=np.inf)\n # print(\"Xsdw:{}\".format(Xsdw))\n # print(\"Xnsdw:{}\".format(Xnsdw))\n # print(\"DW:{}\".format(DW))\n # print(\"FWPerHead:{}\".format(FWPerHead))\n # np.set_printoptions(threshold=100)\n\n ###################################################\n # From here, we consider the summer period ########\n ###################################################\n\ndef resetInitialWeights(i, initialXsdw, initialXnsdw):\n # reset the weights\n return initialXsdw * np.ones(2 * constant.hourperDay), initialXnsdw * np.ones(2 * constant.hourperDay),\\\n initialXsdw * np.ones(2 * constant.hourperDay) + initialXnsdw * np.ones(2 * constant.hourperDay)\n" } ]
21
Nolan1324/CPXII-Unix-Scripts
https://github.com/Nolan1324/CPXII-Unix-Scripts
9b4370b266b4eef0aa87e0877a8b33ba4621faac
fb4f98893ac4455cf0ecb68c7352bc64084a6016
2f56239fb4e26399c31792f32efe8b5f39ef8dc8
refs/heads/master
2021-06-25T01:36:46.649037
2020-12-13T18:44:13
2020-12-13T18:44:13
200,143,095
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6238933205604553, "alphanum_fraction": 0.6502242088317871, "avg_line_length": 39.64018630981445, "blob_id": "7c75e9d66b5b7c88fc0757ad5cc90a1650c940be", "content_id": "f1da15fe1d0e5c006f7dcac863449f18e765a8a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 8697, "license_type": "no_license", "max_line_length": 163, "num_lines": 214, "path": "/cis.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ \"$EUID\" -ne 0 ]\n then echo \"Please run with sudo\"\n exit\nfi\n\n#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n. \"$SCRIPT_DIR/util/common.sh\"\n\n#Netowrk \necho_status \"3.1.1 Ensure IP forwarding is disabled\"\necho \"net.ipv4.ip_forward=0\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.1.2 Ensure packet redirect sending is disabled\"\necho \"net.ipv4.conf.all.send_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.send_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.1 Ensure source routed packets are not accepted\"\necho \"net.ipv4.conf.all.accept_source_route=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.accept_source_route=0\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.2 Ensure ICMP redirects are not accepted\"\necho \"net.ipv4.conf.all.accept_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.accept_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.3 Ensure secure ICMP redirects are not accepted\"\necho \"net.ipv4.conf.all.secure_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.secure_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.4 Ensure suspicious packets are logged\"\necho \"net.ipv4.conf.all.log_martians=1\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.log_martians=1\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.5 Ensure broadcast ICMP requests are ignored\"\necho \"net.ipv4.icmp_echo_ignore_broadcasts=1\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.6 Ensure bogus ICMP responses are ignored\"\necho \"net.ipv4.icmp_ignore_bogus_error_responses=1\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.7 Ensure Reverse Path Filtering is enabled\"\necho \"net.ipv4.conf.all.rp_filter=1\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv4.conf.default.rp_filter=1\" | sudo tee -a /etc/sysctl.conf\necho_status \"3.2.8 Ensure TCP SYN Cookies is enabled\"\necho \"net.ipv4.tcp_syncookies=1\" | sudo tee -a /etc/sysctl.conf\necho_status \"Disable IPv6\"\necho \"net.ipv6.conf.all.accept_ra=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv6.conf.default.accept_ra=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv6.route.flush=1\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv6.conf.all.accept_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv6.conf.default.accept_redirects=0\" | sudo tee -a /etc/sysctl.conf\necho \"net.ipv6.route.flush=1\" | sudo tee -a /etc/sysctl.conf\nsudo sysctl -p\n\n#System Maintenance\necho_status \"6.1.2 Ensure permissions on /etc/passwd are configured\"\nchown root:root /etc/passwd\nchmod 644 /etc/passwd\necho_status \"6.1.3 Ensure permissions on /etc/shadow are configured\"\nchown root:shadow /etc/shadow\nchmod o-rwx,g-wx /etc/shadow\necho_status \"6.1.4 Ensure permissions on /etc/group are configured\"\nchown root:root /etc/group\nchmod 644 /etc/group\necho_status \"6.1.5 Ensure permissions on /etc/gshadow are configured\"\nchown root:shadow /etc/gshadow\nchmod o-rwx,g-rw /etc/gshadow\necho_status \"6.1.6 Ensure permissions on /etc/passwd- are configured\"\nchown root:root /etc/passwd-\nchmod u-x,go-wx /etc/passwd-\necho_status \"6.1.7 Ensure permissions on /etc/shadow- are configured\"\nchown root:root /etc/shadow-\nchown root:shadow /etc/shadow-\nchmod o-rwx,g-rw /etc/shadow-\necho_status \"6.1.8 Ensure permissions on /etc/group- are configured\"\nchown root:root /etc/group-\nchmod u-x,go-wx /etc/group-\necho_status \"6.1.9 Ensure permissions on /etc/gshadow- are configured\"\nchown root:root /etc/gshadow-\nchown root:shadow /etc/gshadow-\nchmod o-rwx,g-rw /etc/gshadow-\n\n#User and Group Setting\necho_status \"6.2.1 Ensure password fields are not empty\"\ncat /etc/shadow | awk -F: '($2 == \"\" ) { print $1 \" does not have a password \"}'\npause \"[PAUSED] Provide these users strong passwords. \\\"passwd -l <username>\\\"\"\necho_status \"6.2.2 Ensure no legacy \\\"+\\\" entries exist in /etc/passwd\"\ngrep '^\\+:' /etc/passwd\necho_status \"6.2.3 Ensure no legacy \\\"+\\\" entries exist in /etc/shadow\"\ngrep '^\\+:' /etc/shadow\necho_status \"6.2.4 Ensure no legacy \\\"+\\\" entries exist in /etc/group\"\ngrep '^\\+:' /etc/group\npause \"[PAUSED] Remove lines found in the files in 6.2.2-4\"\necho_status \"6.2.5 Ensure root is the only UID 0 account\"\ncat /etc/passwd | awk -F: '($3 == 0) { print $1 }'\npause \"[PAUSED] Change the UID of users listed other than root. \\\"usermod -u <new-uid> <username>\\\"\"\necho_status \"6.2.6 Ensure root PATH Integrity\"\nif [ \"`echo $PATH | grep :: `\" != \"\" ]; then\n echo \"Empty Directory in PATH (::)\"\nfi\nif [ \"`echo $PATH | grep :$`\" != \"\" ]; then\necho \"Trailing : in PATH\"\nfi\np=`echo $PATH | sed -e 's/::/:/' -e 's/:$//' -e 's/:/ /g'`\nset -- $p\nwhile [ \"$1\" != \"\" ]; do\nif [ \"$1\" = \".\" ]; then\n echo \"PATH contains .\"\n shift\n continue\nfi\nif [ -d $1 ]; then\n dirperm=`ls -ldH $1 | cut -f1 -d\" \"`\n if [ `echo $dirperm | cut -c6 ` != \"-\" ]; then\n echo \"Group Write permission set on directory $1\"\n fi\n if [ `echo $dirperm | cut -c9 ` != \"-\" ]; then\n echo \"Other Write permission set on directory $1\"\n fi\n dirown=`ls -ldH $1 | awk '{print $3}'`\n if [ \"$dirown\" != \"root\" ] ; then\n echo $1 is not owned by root\n fi\nelse\n echo $1 is not a directory\nfi\nshift\ndone\npause \"[PAUSED] Fix any issues listed\"\necho_status \"6.2.7-12 User home directory auditing\"\ncat /etc/passwd | egrep -v '^(root|halt|sync|shutdown)' | awk -F: '($7 != \"/usr/sbin/nologin\" && $7 != \"/bin/false\") { print $1 \" \" $6 }' | while read user dir; do\nif [ ! -d \"$dir\" ]; then\n echo \"The home directory ($dir) of user $user does not exist.\"\nelse\n dirperm=`ls -ld $dir | cut -f1 -d\" \"`\n owner=$(stat -L -c \"%U\" \"$dir\")\n if [ `echo $dirperm | cut -c6` != \"-\" ]; then\n echo \"Group Write permission set on the home directory ($dir) of user $user\"\n fi\n if [ `echo $dirperm | cut -c8` != \"-\" ]; then\n echo \"Other Read permission set on the home directory ($dir) of user $user\"\n fi\n if [ `echo $dirperm | cut -c9` != \"-\" ]; then\n echo \"Other Write permission set on the home directory ($dir) of user $user\"\n fi\n if [ `echo $dirperm | cut -c10` != \"-\" ]; then\n echo \"Other Execute permission set on the home directory ($dir) of user $user\"\n fi\n if [ \"$owner\" != \"$user\" ]; then\n echo \"The home directory ($dir) of user $user is owned by $owner.\"\n fi\n for file in $dir/.[A-Za-z0-9]*; do\n if [ ! -h \"$file\" -a -f \"$file\" ]; then\n fileperm=`ls -ld $file | cut -f1 -d\" \"`\n if [ `echo $fileperm | cut -c6` != \"-\" ]; then\n echo \"Group Write permission set on file $file\"\n fi\n if [ `echo $fileperm | cut -c9` != \"-\" ]; then\n echo \"Other Write permission set on file $file\"\n fi\n fi\n done\n if [ ! -h \"$dir/.forward\" -a -f \"$dir/.forward\" ]; then\n echo \".forward file $dir/.forward exists\"\n fi\n if [ ! -h \"$dir/.netrc\" -a -f \"$dir/.netrc\" ]; then\n echo \".netrc file $dir/.netrc exists\"\n fi\n for file in $dir/.rhosts; do\n if [ ! -h \"$file\" -a -f \"$file\" ]; then\n echo \".rhosts file in $dir\"\n fi\n done\nfi\ndone\npause \"[PAUSED] Fix any of the problems listed\"\necho_status \"6.2.15 Ensure all groups in /etc/passwd exist in /etc/group\"\nfor i in $(cut -s -d: -f4 /etc/passwd | sort -u ); do\n grep -q -P \"^.*?:[^:]*:$i:\" /etc/group\n if [ $? -ne 0 ]; then\n echo \"Group $i is referenced by /etc/passwd but does not exist in /etc/group\"\n fi\ndone\necho_status \"6.2.16 Ensure no duplicate UIDs exist\"\ncat /etc/passwd | cut -f3 -d\":\" | sort -n | uniq -c | while read x ; do [ -z \"${x}\" ] && break\n set - $x\n if [ $1 -gt 1 ]; then\n users=`awk -F: '($3 == n) { print $1 }' n=$2 /etc/passwd | xargs`\n echo \"Duplicate UID ($2): ${users}\"\n fi\ndone\necho_status \"6.2.17 Ensure no duplicate GIDs exist\"\ncat /etc/group | cut -f3 -d\":\" | sort -n | uniq -c | while read x ; do [ -z \"${x}\" ] && break\n set - $x\n if [ $1 -gt 1 ]; then\n groups=`awk -F: '($3 == n) { print $1 }' n=$2 /etc/group | xargs`\n echo \"Duplicate GID ($2): ${groups}\"\n fi\ndone\necho_status \"6.2.18 Ensure no duplicate user names exist\"\ncat /etc/passwd | cut -f1 -d\":\" | sort -n | uniq -c | while read x ; do [ -z \"${x}\" ] && break\n set - $x\n if [ $1 -gt 1 ]; then\n uids=`awk -F: '($1 == n) { print $3 }' n=$2 /etc/passwd | xargs`\n echo \"Duplicate User Name ($2): ${uids}\"\n fi\ndone\necho_status \"6.2.19 Ensure no duplicate group names exist\"\ncat /etc/group | cut -f1 -d\":\" | sort -n | uniq -c | while read x ; do [ -z \"${x}\" ] && break\n set - $x\n if [ $1 -gt 1 ]; then\n gids=`gawk -F: '($1 == n) { print $3 }' n=$2 /etc/group | xargs`\n echo \"Duplicate Group Name ($2): ${gids}\"\n fi\ndone\npause \"[PAUSED] Fix any of the problems listed in 6.2.15-19\"\necho_status \"6.2.20 Ensure shadow group is empty\"\ngrep ^shadow:[^:]*:[^:]*:[^:]+ /etc/group\nawk -F: '($4 == \"<shadow-gid>\") { print }' /etc/passwd\npause \"[PAUSED] If anyone is in shadow, remove them\"\n" }, { "alpha_fraction": 0.7174452543258667, "alphanum_fraction": 0.7389417290687561, "avg_line_length": 46.431373596191406, "blob_id": "864f62fc17edfdda063dc4b5f6450685e1244236", "content_id": "d3ac6962f59a54d1e98bc51304952c2cb672de8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4838, "license_type": "no_license", "max_line_length": 228, "num_lines": 102, "path": "/README.md", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "# CPXII-Unix-Scripts\nCyberPatriot scripts/checklists created by a CyberPatriot student (me) for my team's personal use on Unix-based VMs. Not authorized for use by other teams.\n## Checklist\n1. Read the README\n2. Do all forensics questions\n3. Do any tasks outlined in the README (ex. creating groups)\n4. Manage users in accordance with the README\n\t* Add user `adduser $user`\n\t* Delete user `deluser $user; delgroup $user`\n\t* Change insecure passwords with `passwd $user`\n\t* All of the above can also be done with the GUI on Ubuntu\n\t* Change users who should or should not be administrator\n5. Manage groups inn accordance with the README\n\t* Add group `addgroup $group`\n\t* Delete group `delgroup $group`\n6. Aduit `/etc/sudoers` (look for people who should not have sudo)\n7. Update mirrors in `/etc/apt/sources.list` by adding these lines:\n\t```\n\tdeb http://security.ubuntu.com/ubuntu/\n\tdeb mirror://mirrors.ubuntu.com/mirrors.txt xenial main restricted universe multiverse\n\tdeb mirror://mirrors.ubuntu.com/mirrors.txt xenial-updates main restricted universe multiverse\n\tdeb mirror://mirrors.ubuntu.com/mirrors.txt xenial-backports main restricted universe multiverse\n\tdeb mirror://mirrors.ubuntu.com/mirrors.txt xenial-security main restricted universe multiverse\n\t```\n8. Remove unwanted packages with `apt-get purge $package` or by using the GUI\n9. Update package list and upgrade installed packages\n\t1. `apt-get update`\n\t2. `apt-get upgrade`\n10. Update the kernel with `apt-get install linux-image-$(uname -r)`\n11. Audit system crontabs in `/etc/crontab` and user crontabs using `crontab -e -u $user` (or in `/var/spool/cron/crontabs/$user`)\n12. Audit permissions and contents of home directories and system files using `ls -lA`. It is good to know what most of the core system files contain and do to save time during competition. Some **examples** of cor system files:\n\t* `/etc/rc.local`\n\t* `/etc/login.defs`\n\t* `/etc/crontab`\n\t* `/etc/sysctl.conf` - Configures the kernel. Hardening: https://www.cyberciti.biz/faq/linux-kernel-etcsysctl-conf-security-hardening/\n\t* `/etc/passwd` - Users\n\t* `/etc/shadow` - Password hashes\n\t* `/etc/group` - Groups\n\t* `/etc/sudoers` - Who can use sudo\n\t* `/var/log/*` - System logs. Usually all readable by everyone except for `auth.log*`, `btmp*`, `dmesg`, `kern.log*`, `syslog*`, and `ufw.log*` (list everyone readable files with `ls -lA | grep \"^\\-......r..\"`)\n\t* `/etc/hosts` - This should exist, but be empty except for some standard lines (ex: `127.0.0.1 localhost`). If unsure, just look up the [default contents](https://askubuntu.com/a/880272) on Google and copy/paste into the file.\n\t* `/etc/apt/sources.list`\n\t* `/etc/securetty` - If the file does not exists, root can use any terminal. This is a potential security vulnerability.\n\t* `/etc/apt/apt.conf.d/10periodic` - https://qznc.github.io/my-homeserver/hardening.html#automatic-security-updates. Add (or edit) the following lines:\n\t\t```\n\t\tAPT::Periodic::Update-Package-Lists \"1\";\n\t\tAPT::Periodic::Download-Upgradeable-Packages \"1\";\n\t\tAPT::Periodic::AutocleanInterval \"7\";\n\t\tAPT::Periodic::Unattended-Upgrade \"1\";\n\t\t```\n13. Other:\n\t```\n\t# Make shared memory read only\n\techo \"none /run/shm tmpfs ro,noexec 0 0\" > /etc/fstab\n\tmount -a\n\t# Change some settings\n\techo 0 > /proc/sys/kernel/sysrq\n\techo 1 > /proc/sys/net/ipv4/tcp_rfc1337\n\t# Check this folder\n\t/usr/sbin/<user>\n\tMYSQL: /etc/mysql/my.cnf bind-address=localhost\n\tPHP: /etc/php5/apache2/php.ini expose_php=0\n\t```\n14. SSH (CIS 5.2)\n\t```\n\tchown root:root /etc/ssh/sshd_config\n\tchmod og-rwx /etc/ssh/sshd_config\n\t# Add or change these lines in /etc/ssh/sshd_config\n\tProtocol 2\n\tLogLevel INFO\n\tX11Forwarding no\n\tMaxAuthTries 4\n\tIgnoreRhosts yes\n\tHostbasedAuthentication no\n\tPermitRootLogin no\n\tPermitEmptyPasswords no\n\tPermitUserEnvironment no\n\tMACs [email protected],[email protected],[email protected],hmac-sha2-512,hmac-sha2-256,[email protected]\n\tClientAliveInterval 300\n\tClientAliveCountMax 0\n\tLoginGraceTime 60\n\t# Reload changes\n\tservice sshd reload\n\t```\n15. Password lock (CIS 5.4.1.4)\n\t```\n\tuseradd -D -f 30 # Sets default\n\tchage --list <user>\n\tchage --inactive 30 <user>\n\t```\n\t\n\n## Scripts\n* [init.sh](init.sh) Run this first. Installs xcopy (used by other scripts) and sets up aliases\n* [basic.sh](basic.sh) Standard security fixes\n* [audit_setup.sh](audit_setup.sh) Setup and run auditd with a best practices rules file\n* [rookit_scan.sh](rootkit_scan.sh) Install chkrookit and rkhunter and check for rootkits\n\n## Credits\n* [lib/auditd](lib/auditd) cloned from https://github.com/Neo23x0/auditd\n* [util/suppress_gedit.sh](util/suppress_gedit.sh) adapted from https://askubuntu.com/a/572827\n* [util/aliases.txt](/util/aliases.txt) adapted from https://www.digitalocean.com/community/tutorials/an-introduction-to-useful-bash-aliases-and-functions\n" }, { "alpha_fraction": 0.6302294135093689, "alphanum_fraction": 0.6605938076972961, "avg_line_length": 23.295082092285156, "blob_id": "aff16d11928584e195c6ac471b095ff3382aed37", "content_id": "7aae84fe5c2e6f9ce43ba9b3d44d3fc66e808ae9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 99, "num_lines": 61, "path": "/users.py", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pwd\nimport grp\n\n#url = raw_input(\"Paste full README URL: \")\nurl = \"https://www.uscyberpatriot.org/Pages/Readme/cpxiii_training_se_ubu16_readme_civo4h5532.aspx\"\npage = requests.get(url)\n\nsoup = BeautifulSoup(page.text, 'html.parser')\ncontents = soup.find(\"pre\").contents\n\nyou = \"\"\nall_users = []\nadmins = []\n\nnextAdmins = False\nnextUsers = False\nfor c in contents:\n\tif nextAdmins:\n\t\tfor u in c.strip().split(\"\\n\"):\n\t\t\tu = str(u.strip())\n\t\t\tif \"you\" in u.lower():\n\t\t\t\tyou = u[:u.find(\" \")]\n\t\t\telif not \"password\" in u.lower():\n\t\t\t\tall_users.append(u)\n\t\t\t\tadmins.append(u)\n\t\tnextAdmins = False\n\n\tif nextUsers:\n\t\tfor u in c.strip().split(\"\\n\"):\n\t\t\tu = str(u.strip())\n\t\t\tall_users.append(u)\n\t\tnextUsers = False\n\tif \"Administrators\" in str(c): nextAdmins = True\n\tif \"Users\" in str(c): nextUsers = True\n\npwd_users = []\nfor p in pwd.getpwall():\n\t#TODO read UID range from /etc/adduser.conf\n\tif p.pw_uid >= 1000 and p.pw_uid <= 29999:\n\t\tpwd_users.append(p.pw_name)\n#grp_users = grp.getgrnam(\"users\").gr_mem\ngrp_sudo = grp.getgrnam(\"sudo\").gr_mem\n\n#Remove people from admins\nfor u in grp_sudo:\n\tif not u in admins and u != you:\n\t\tprint(\"\\033[96m\" + u + \"\\033[0m removed from admins\")\n\n#Add people to admins\nfor u in admins:\n\tif not u in grp_sudo:\n\t\tprint(\"\\033[33m\" + u + \"\\033[0m added to admins\")\n\n#Remove users\nfor u in pwd_users:\n\tif not u in all_users and u != you:\n\t\tprint(\"\\033[91m\" + u + \"\\033[0m deleted\")\n" }, { "alpha_fraction": 0.6995730996131897, "alphanum_fraction": 0.7203841805458069, "avg_line_length": 28.73015785217285, "blob_id": "41e9c2dc4dce74dd83d72b6c42da6c6110b1f030", "content_id": "b39883e6c4b4cbbeb3118ac65245432a67ef7010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1874, "license_type": "no_license", "max_line_length": 119, "num_lines": 63, "path": "/basic.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ \"$EUID\" -ne 0 ]\n then echo \"Please run with sudo\"\n exit\nfi\n\n#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n. \"$SCRIPT_DIR/util/common.sh\"\n\n#Password policy\nline_change_msg\necho -e \"PASS_MAX_DAYS ${RED}60${R}\\nPASS_MIN_DAYS ${RED}7${R}\\nPASS_WARN_AGE ${RED}14${R}\\n\"\ngedit_at_line /etc/login.defs ^\\s*PASS_MAX_DAYS\nclear\n\necho_status \"[Installing pam_cracklib]\"\napt-get install libpam-cracklib\nline_change_msg\necho -e \"... pam_unix.so... ${RED}remember=5 minlen=8${R}\"\necho -e \"... pam_cracklib.so... ${RED}ucredit=-1 lcredit=-1 dcredit=-1 ocredit=-1${R}\\n\"\ngedit /etc/pam.d/common-password\n\nline_add_msg\nLINES=\"auth required pam_tally2.so deny=5 onerr=fail unlock_time=1800\"\ncopy $LINES\necho -e \"${RED}${LINES}${R}\\n\"\ngedit /etc/pam.d/common-auth\n\n#SSH\nline_change_msg\necho -e \"PermitRootLogin ${RED}no${R}\\n\"\ngedit_at_line /etc/ssh/sshd_config ^\\s*PermitRootLogin\nclear\n\n#Other\necho_status \"[Enabing the firewall]\"\nsudo ufw enable\necho_status \"[Enabling syn cookie protection]\"\nsysctl -n net.ipv4.tcp_syncookies\necho 1 | sudo tee /proc/sys/net/ipv4/tcp_syncookies #Probably do not need both\necho_status \"[Disabling IPv6 (Potentially harmful)]\"\necho \"net.ipv6.conf.all.disable_ipv6 = 1\" | sudo tee -a /etc/sysctl.conf\necho_status \"[Disabling IP Forwarding]\"\necho 0 | sudo tee /proc/sys/net/ipv4/ip_forward\necho_status \"[Preventing IP Spoofing]\"\necho \"nospoof on\" | sudo tee -a /etc/host.conf\necho_status \"[Disabling SysRq Key]\"\necho 0 | sudo tee /proc/sys/kernel/sysrq\necho_status \"[Enabling RFC 1337]\"\necho 1 | sudo tee /proc/sys/net/ipv4/tcp_rfc1337\npause_general\n\n#Guest user disable\nline_add_msg\nLINES=\"allow-guest=false\"\ncopy $LINES\necho -e \"${RED}${LINES}${R}\\n\"\ngedit /etc/lightdm/lightdm.conf\npause \"This change requires logging out. Make sure important work is saved and closed, then press ENTER to log out now\"\nrestart lightdm\n\n" }, { "alpha_fraction": 0.7394136786460876, "alphanum_fraction": 0.742671012878418, "avg_line_length": 26.909090042114258, "blob_id": "d6e34bd0b8cdd0420a181d51a1ac76d28ee1632a", "content_id": "93048a564f95a865aec7790b11bcd27629f9ba2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 307, "license_type": "no_license", "max_line_length": 110, "num_lines": 11, "path": "/init.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n#Install xclip for copying text\nsudo apt-get install xclip\n\n#Load aliases\ncat \"$SCRIPT_DIR/util/aliases.txt\" >> ~/.bashrc\n\n#Reload bash (causes ~/.bashrc to execute; allows for the new aliases to be loaded into this terminal session)\nexec bash\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6496598720550537, "avg_line_length": 18.600000381469727, "blob_id": "e0e2e7370acc9145ad45f579cae824ce1cb24fed", "content_id": "e1be869b49ea18ec0aa78e5bbad6402ad53b9665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 882, "license_type": "no_license", "max_line_length": 73, "num_lines": 45, "path": "/util/common.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n. $SCRIPT_DIR/util/suppress_gedit.sh\n\n#Variables that change the color of text when used in echo -e\nR='\\033[0m' #Reset color\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[0;33m'\n\nfunction echo_status() {\n echo -e \"${YELLOW}$*\"\n tput sgr0\n}\n\nfunction line_add_msg() {\n clear\n echo_status \"[Please add the following line(s) (copied to clipboard)]\"\n}\n\nfunction line_change_msg() {\n clear\n echo_status \"[Please change the following line(s)]\"\n}\n\nfunction pause() {\n read -p \"\u001b[33m$*\u001b[0m: \"\n}\n\nfunction pause_general() {\n pause \"[Press ENTER to continue]\"\n clear\n}\n\n#Opens a file in gedit at the first line that matches the grep pattern\n#$1 File path\n#$2 Line grep pattern\nfunction gedit_at_line() {\n gedit $1 +$(grep $2 -m 1 -n $1 | cut -f1 -d:)\n}\n\nfunction copy() {\n echo $* | xclip -selection c\n}\n" }, { "alpha_fraction": 0.7401247620582581, "alphanum_fraction": 0.7484407424926758, "avg_line_length": 17.5, "blob_id": "5eaa0f6f045b5573f481a238fdee0031c9f51d0f", "content_id": "4ae4015ef595fe0cbc43c7fd7b13ad9ecebd334d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 481, "license_type": "no_license", "max_line_length": 47, "num_lines": 26, "path": "/rootkit_scan.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ \"$EUID\" -ne 0 ]\n then echo \"Please run with sudo\"\n exit\nfi\n\n#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n. \"$SCRIPT_DIR/util/common.sh\"\n\n#RootKit Protection 1\necho_status \"[Installing chkrootkit]\"\napt-get install chkrootkit\npause_general\nchkrootkit | tee chkrootkit.txt\npause_general\n\n#RootKit Protection 2\necho_status \"[Installing and running rkhunter]\"\napt-get install rkhunter\nrkhunter --update\npause_general\nrkhunter --check\npause_general\n" }, { "alpha_fraction": 0.7094281315803528, "alphanum_fraction": 0.71406489610672, "avg_line_length": 22.962963104248047, "blob_id": "ddfd4193a1ffc9ec84e596250c7956cbd7f1db2e", "content_id": "38eed018d18e1c0d49d0f1a8883557df2504a726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 647, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/audit_setup.sh", "repo_name": "Nolan1324/CPXII-Unix-Scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [ \"$EUID\" -ne 0 ]\n then echo \"Please run with sudo\"\n exit\nfi\n\n#Get the directory of the script\nSCRIPT_DIR=\"$(dirname \"$0\")\"\n\n. \"$SCRIPT_DIR/util/common.sh\"\n\n#Audits\necho_status \"[Installing auditd]\"\napt-get install auditd\necho_status \"[Enabling audits]\"\nauditctl -e 1\nAUDIT_BAK=\"/etc/audit/audit.rules.bak\"\nif [ ! -f $AUDIT_BAK ]; then\n echo_status \"[Backing up audit.rules file]\"\n mv /etc/audit/audit.rules $AUDIT_BAK \nfi\necho_status \"[Copying best practices audit.rules file into /etc/audit/]\"\ncp $SCRIPT_DIR/lib/auditd/audit.rules /etc/audit\necho_status \"[Restarting auditd service]\"\nservice auditd restart\npause_general\n" } ]
8
Beedlebub/tmp36_simpleread
https://github.com/Beedlebub/tmp36_simpleread
627bc59881e54427bb7f4fecf330c54c41439b03
5da98277bd05931ebdacedf2ac4b57183b94a221
78007e35b6b6eb8d395c3bc2ff2eaf81e47de6f9
refs/heads/master
2022-11-27T03:07:00.989506
2020-07-28T19:05:35
2020-07-28T19:05:35
283,303,240
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5445205569267273, "alphanum_fraction": 0.6232876777648926, "avg_line_length": 28.200000762939453, "blob_id": "a00cd4f136f1fbcdc7ab96361abcb131ba272854", "content_id": "96a2cee6fb600b04ba4b62a794a6ddb38fe0eb39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 292, "license_type": "no_license", "max_line_length": 74, "num_lines": 10, "path": "/main.ts", "repo_name": "Beedlebub/tmp36_simpleread", "src_encoding": "UTF-8", "text": "let voltage = 0\nlet tempC = 0\nlet tempF = 0\nbasic.forever(function () {\n voltage = Math.map(pins.analogReadPin(AnalogPin.P1), 0, 1023, 0, 3300)\n tempC = voltage - Math.idiv(500, 10)\n tempF = 1.8 * (tempC + 32)\n basic.showString(\"C:\" + tempC)\n basic.showString(\"F:\" + tempF)\n})\n" }, { "alpha_fraction": 0.563049852848053, "alphanum_fraction": 0.6304985284805298, "avg_line_length": 27.41666603088379, "blob_id": "6fcfe73596797a16492021fca30ca631123fd922", "content_id": "76e375994449da1046c044686718be98de90ffdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 76, "num_lines": 12, "path": "/main.py", "repo_name": "Beedlebub/tmp36_simpleread", "src_encoding": "UTF-8", "text": "voltage = 0\ntempC = 0\ntempF = 0\n\ndef on_forever():\n global voltage, tempC, tempF\n voltage = Math.map(pins.analog_read_pin(AnalogPin.P1), 0, 1023, 0, 3300)\n tempC = voltage - Math.idiv(500, 10)\n tempF = 1.8 * (tempC + 32)\n basic.show_string(\"C:\" + str(tempC))\n basic.show_string(\"F:\" + str(tempF))\nbasic.forever(on_forever)\n" } ]
2
realso0/HI_SOI_PROCESSING
https://github.com/realso0/HI_SOI_PROCESSING
63f64a944db5ca079b3656d44d9f0a795230aba9
538baac986edf3c8f3a252f064012471ed9284bf
4fc1b9787bac940b0d90ededc03c1df6e21c41f3
refs/heads/master
2022-12-14T13:54:11.903674
2018-06-21T00:21:06
2018-06-21T00:21:06
138,099,214
0
0
null
2018-06-21T00:17:36
2018-06-21T00:28:38
2022-12-08T02:13:38
PureBasic
[ { "alpha_fraction": 0.6496272683143616, "alphanum_fraction": 0.6517571806907654, "avg_line_length": 19.413043975830078, "blob_id": "0a1ed992c06807f7cdc4c9650cd9f3317ff259ba", "content_id": "439cf5d620474da87d30fddf492d6b66e28bec6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 58, "num_lines": 46, "path": "/app.py", "repo_name": "realso0/HI_SOI_PROCESSING", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_socketio import SocketIO, send, emit\nimport conn_pymongo\nimport read_video\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\n\[email protected]('connect')\ndef conn_connect():\n print('Client connected.')\n\n\[email protected]('disconnect')\ndef conn_disconnect():\n print('Client disconnected.')\n\n\[email protected]('video_data')\ndef handle_message(video_no):\n print('received videoNo: ' + str(video_no))\n\n video_info = get_video_info(video_no)\n result = read_video.do_process(video_info)\n\n if result is not 0:\n socketio.send(video_no)\n else:\n socketio.send(0)\n\n print(\"sent message to client.\")\n\n\ndef get_video_info(video_no):\n try:\n video_info = conn_pymongo.get_video_info(video_no)\n except Exception as e:\n print(\"get_video_info error :::: \", repr(e))\n\n return video_info\n\n\nif __name__ == '__main__':\n socketio.run(app)\n" } ]
1
Krotonus/first-blog
https://github.com/Krotonus/first-blog
c0c16cab4ae364bfb87a2603767bcf9012f30103
e0192bb2835a5f56b4d6a0e39d3043ad2ce4ba16
5885f58b83730dd9fa25b8179b59f51630a81ab8
refs/heads/master
2021-06-26T14:58:14.805602
2017-09-11T05:24:52
2017-09-11T05:24:52
101,606,220
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 13.5, "blob_id": "b659343adb40573e10ea02e7380018a1c254d976", "content_id": "bc56e42de6e26faadbebe83c77533ff387f91a85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 29, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "Krotonus/first-blog", "src_encoding": "UTF-8", "text": "# first-blog\nLearning Django\n" }, { "alpha_fraction": 0.7172995805740356, "alphanum_fraction": 0.7172995805740356, "avg_line_length": 32.14285659790039, "blob_id": "2d4253bbcc5ef90e78a0a79969b6fa2674fa824f", "content_id": "0c2fc4e4ea0a9343fcf15fffeb1ea2bc087a0af9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 104, "num_lines": 7, "path": "/mysite/personal/views.py", "repo_name": "Krotonus/first-blog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\n\r\ndef index(request):\r\n\treturn render(request, 'personal/home.html')\r\n\t\r\ndef contact(request):\r\n\treturn render(request,'personal/basic.html', {'content':['This is how you can contact me.','[email protected]']})" } ]
2
Thom-Versigny/hallo-python
https://github.com/Thom-Versigny/hallo-python
30d0958638a3ab3d52afeea9b8077ae5f292fc2f
2f98147af6b7085c80f8d8a343fb5043625523d3
e67d9f6460d82f37844cec18d5e33166e59d29f6
refs/heads/master
2023-07-24T03:01:04.762976
2021-09-07T12:05:41
2021-09-07T12:05:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 49, "blob_id": "ec9d43c84a609b751d383503fd8cf911ca1a8891", "content_id": "db6a5ef2e9a8e49159f0b38b44f5ebd4cea514ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 49, "num_lines": 1, "path": "/error.py", "repo_name": "Thom-Versigny/hallo-python", "src_encoding": "UTF-8", "text": "while int(input(\"Press ENTER to roll the dice\")):" } ]
1
0xVex/Price-Alert
https://github.com/0xVex/Price-Alert
f6f196e19b4ae137eb6a0123f6d409140150c81c
47af2e9c5d30f12a0833f696d22fdd7cea025e88
7e52a987ee88b9b647e2850630627932d4faf640
refs/heads/main
2023-07-28T23:41:39.293345
2021-09-09T22:46:22
2021-09-09T22:46:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6125174164772034, "alphanum_fraction": 0.6543751358985901, "avg_line_length": 32.89864730834961, "blob_id": "cfb25d19103201d021b9dfd4100d74c73f001b0f", "content_id": "5ba456db6df241bc5cb100d804d064de80140e29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5017, "license_type": "no_license", "max_line_length": 153, "num_lines": 148, "path": "/check.py", "repo_name": "0xVex/Price-Alert", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport smtplib, ssl\nimport time\nimport re\nfrom datetime import datetime\nimport time\n\n\nport = 465 # For SSL\nATT = \"@mms.att.net\"\nSprint = \"@pm.sprint.com\"\nTmobile = \"@tmomail.net\"\nVerizon = \"@vzwpix.com\"\n\nemail = \"\" #Enter email here\npassword = \"\" #Enter password here\nphone_number = \"\" #Enter phone number\ngateway = \"ATT\"\n\n# Create a secure SSL context\ndef_context = ssl.create_default_context()\nserver = smtplib.SMTP_SSL(\"smtp.gmail.com\", port )\n\nproduct = \"Samsung Frame TV\"\nfifty = 1150\nfiftyfive = 1350\nsixtyfive = 1850\nalert = \"Price alert! An item you're monitoring has recently had a price reduction!\"\nurl1 = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/50-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn50ls03tafxza/\"\nurl2 = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/55-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn55ls03tafxza/\"\nurl3 = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/65-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn65ls03tafxza/\"\nm1 = alert + \" \\n\" + product + \" \\n\" + url1\nm2 = alert + \" \\n\" + product + \" \\n\" + url2\nm3 = alert + \" \\n\" + product + \" \\n\" + url3\n\n\ndef get_date():\n # datetime object containing current date and time\n now = datetime.now()\n # dd/mm/YY H:M:S\n date = now.strftime(\"%b-%d-%Y %H:%M\")\n print(\"date and time: \", date)\n return date\n\ndef send_alert(email, password, phone_number, message):\n server.login(email, password)\n time.sleep(1)\n server.sendmail(email, phone_number, message)\n time.sleep(1)\n server.quit()\n\ndef frame_50():\n\n site = \"Samsung 50\\\"\"\n url = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/50-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn50ls03tafxza/\"\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n product = \"Samsung Frame TV\"\n m = \"Price alert! An item you're monitoring has recently had a price reduction!\"\n message = m + \" \\n\" + product + \" \\n\" + url\n\n r = requests.get(url, headers=headers).text\n soup = BeautifulSoup(r, \"html.parser\")\n html = soup.prettify()\n price = soup.find(class_=\"epp-price\")\n print(price.text)\n\n date = get_date()\n pricelog = open(\"pricehistory.txt\", \"a\")\n pricelog.write(price.text + \" \" + site + \" \" + date + \"\\n\")\n pricelog.close()\n newprice = price.text\n\n\n p = newprice.replace(',', '')\n pt = float(p)\n return pt\n #if (pt<1290):\n #send_alert(email, password, phone_number, message)\n\ndef frame_55():\n\n site = \"Samsung 55\\\"\"\n url = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/55-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn55ls03tafxza/\"\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n product = \"Samsung Frame TV\"\n m = \"Price alert! An item you're monitoring has recently had a price reduction!\"\n message = m + \" \\n\" + product + \" \\n\" + url\n\n r = requests.get(url, headers=headers).text\n soup = BeautifulSoup(r, \"html.parser\")\n html = soup.prettify()\n price = soup.find(class_=\"epp-price\")\n print(price.text)\n\n date = get_date()\n pricelog = open(\"pricehistory.txt\", \"a\")\n pricelog.write(price.text + \" \" + site + \" \" + date + \"\\n\")\n pricelog.close()\n newprice = price.text\n\n\n p = newprice.replace(',', '')\n pt = float(p)\n return pt\n #if (pt<1490):\n #send_alert(email, password, phone_number, message)\n\ndef frame_65():\n\n site = \"Samsung 65\\\"\"\n url = \"https://www.samsung.com/us/televisions-home-theater/tvs/the-frame/65-class-the-frame-tv-qled-4k-uhd-hdr-smart-tv-2020-qn65ls03tafxza/\"\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n product = \"Samsung Frame TV\"\n m = \"Price alert! An item you're monitoring has recently had a price reduction!\"\n message = m + \" \\n\" + product + \" \\n\" + url\n\n r = requests.get(url, headers=headers).text\n soup = BeautifulSoup(r, \"html.parser\")\n html = soup.prettify()\n price = soup.find(class_=\"epp-price\")\n print(price.text)\n\n date = get_date()\n pricelog = open(\"pricehistory.txt\", \"a\")\n pricelog.write(price.text + \" \" + site + \" \" + date + \"\\n\")\n pricelog.close()\n newprice = price.text\n\n\n p = newprice.replace(',', '')\n pt = float(p)\n return pt\n #if (pt<1990)\n #send_alert(email, password, phone_number, message)\n\nserver.login(email, password)\ntime.sleep(1)\nif (frame_50()<fifty):\n #send_alert(email, password, phone_number, m1)\n server.sendmail(email, phone_number, m1)\nif (frame_55()<fiftyfive):\n server.sendmail(email, phone_number, m2)\nif (frame_65()<sixtyfive):\n server.sendmail(email, phone_number, m3)\n\ntime.sleep(1)\nserver.quit()\n" } ]
1
shinhwagk/cmsfs4
https://github.com/shinhwagk/cmsfs4
5493d3e1bc18156e9a1bf80d43f38ade2f6ea3dc
3ccae9b48291381443e5d2ad70839ec869070430
d861070abe1884dc12ec58f46416a8a3996af274
refs/heads/master
2020-12-03T08:03:16.192952
2017-06-30T09:06:40
2017-06-30T09:06:40
95,653,007
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 15.333333015441895, "blob_id": "414a76fb9c89fb1527d6a0740d02b5364954edf7", "content_id": "9a186594af380fabc9903348a772549c8efc6118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 48, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/services/api_connect/build.sh", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndocker build -t cmsfs/api-connect ." }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7283950448036194, "avg_line_length": 39.5, "blob_id": "8a7ea3b61d14a9c5ce8f7b866335d7927e71b366", "content_id": "c57d0f9b9de43547893757f8ef7a6a6667377fc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/services/api_connect/README.md", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "- path get /v1/connect/jdbc/oracle/:name\n- path get /v1/connect/jdbc/mysql/:name\n" }, { "alpha_fraction": 0.5978090763092041, "alphanum_fraction": 0.6103286147117615, "avg_line_length": 35, "blob_id": "9ab6596f0730599b11d5b3bf313cf93b85012202", "content_id": "0f3c467f9a2e93d58165867aff54f1f30c3e1138", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2556, "license_type": "no_license", "max_line_length": 157, "num_lines": 71, "path": "/monitor/monitor-io.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import { getSshServers, SshServer } from './api-config'\nimport { getSshConnect, SshConnect } from './api-connect'\nimport { ConnectConfig, executeCommand } from './execute-command';\nimport { sendEs } from './api-es';\nimport { sendMonitorError } from './api-error'\n\nconst monitorName = \"io\"\n\nconst command = `iostat -d -k -x 5 2 | sed '/^$/d' | sed '1d' | grep -v \"^Device\"`\n\nasync function boot() {\n const timestamp = new Date().toISOString()\n const servers: SshServer[] = await getSshServers(monitorName);\n\n console.info(\"monitor server number\", servers.length)\n\n for (const server of servers) {\n processMonitor(server.name, timestamp)\n }\n}\n\nfunction processMonitor(name: string, timestamp: string) {\n getSshConnect(name).then((conn: SshConnect) => {\n const connConfig: ConnectConfig = {\n host: conn.ip,\n port: conn.port,\n username: conn.user,\n privateKey: require('fs').readFileSync('/root/.ssh/id_rsa'),\n readyTimeout: 60 * 1000\n }\n executeCommand(connConfig, command).then((rs: string) => {\n // actionEs(name, rs, timestamp)\n formatResult(rs).forEach(m => {\n m[\"@timestamp\"] = timestamp\n m[\"@metric\"] = monitorName\n m[\"@host\"] = name\n console.info(m)\n actionEs(name, m)\n })\n }).catch(e => sendMonitorError(monitorName, `ssh execute error: ${e}`))\n })\n}\n\nfunction actionEs(name, content) {\n // const mon = <number[]>JSON.parse(rs);\n // const content = { total: mon[0], free: mon[1], \"@timestamp\": timestamp, \"@metric\": monitorName, \"@host\": host }\n // console.info(content)\n sendEs(\"monitor\", \"os\", content).then(p => console.info(`es ${name} send success`))\n}\n\nfunction formatResult(rs: string) {\n const lines = rs.split(\"\\n\").filter(p => p.length >= 1);\n const formatLines = lines.map(line => line.split(/\\s+/))\n const map = new Map<string, string[]>()\n formatLines.forEach(f => map.set(f[0], f))\n const result = []\n map.forEach((v: string[], k: string) => {\n // Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util\n const m = {\n \"Device\": v[0], \"rrqm/s\": Number(v[1]), \"wrqm/s\": Number(v[2]), \"r/s\": Number(v[3]), \"w/s\": Number(v[4]), \"rkB/s\": Number(v[5]), \"wkB/s\": Number(v[6]),\n \"avgrq-sz\": Number(v[7]), \"avgqu-sz\": Number(v[8]), \"await\": Number(v[9]), \"svctm\": Number(v[10]), \"util\": Number(v[11])\n }\n result.push(m)\n })\n return result\n}\n\nsetInterval(() => {\n console.info(\"cron \", new Date())\n boot().catch(e => sendMonitorError(monitorName, e))\n}, 1000 * 5)\n" }, { "alpha_fraction": 0.7167769074440002, "alphanum_fraction": 0.7167769074440002, "avg_line_length": 26.71666717529297, "blob_id": "6d91b74c098d0dc37f33c3c212966192d768544c", "content_id": "096faf8d17eff979c70dc178c312483d428e263e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1663, "license_type": "no_license", "max_line_length": 154, "num_lines": 60, "path": "/monitor/command-to-actions.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import { ConnectConfig, executeCommand } from './execute-command'\nimport { getSshServers, SshServer, getSshProcess } from './api-config'\nimport { getSshConnect, SshConnect } from './api-connect'\n\ninterface InterfaceCommandToActions {\n monitorName: string\n collectCommand: string\n actions: Map<string, any>\n servers: string[]\n processes: string[]\n\n timestamp: string\n}\n\nclass AbstractCommandToActions<T, U> implements InterfaceCommandToActions {\n monitorName: string\n collectCommand: string\n actions: Map<string, any>\n servers = []\n processes = []\n\n timestamp: string = new Date().toISOString()\n\n registerActions(name, action) {\n this.processes.push(name)\n this.actions.set(name, action)\n }\n\n constructor(monitorName: string, collectCommand: string, actions: Map<string, any>) {\n this.monitorName = monitorName\n this.collectCommand = collectCommand\n this.actions = actions\n }\n\n setServers(): Promise<string[]> {\n return getSshServers(this.monitorName).then(servers => servers.map(s => s.name))\n }\n\n getConnect(name): Promise<SshConnect> {\n return getSshConnect(name)\n }\n\n executeCommand(conn: SshConnect): Promise<string> {\n const connConfig: ConnectConfig = { host: conn.ip, port: conn.port, username: conn.user, privateKey: require('fs').readFileSync('/root/.ssh/id_rsa') }\n return executeCommand(connConfig, this.collectCommand)\n }\n\n formatResult(result: string): T {\n return\n\n }\n\n executeAction<Z>(result: T, process: string, name: string, action: Z) {\n getSshProcess(this.monitorName, process, name)\n }\n}\n\ninterface CommandToActions<T, U> extends AbstractCommandToActions<T, U> {\n select(): void;\n}\n" }, { "alpha_fraction": 0.6924643516540527, "alphanum_fraction": 0.7148675918579102, "avg_line_length": 26.33333396911621, "blob_id": "b68245cb5886f5d1e55df19ce2bbf73eda0c72b3", "content_id": "4e7d8027cc63043c706ee29a233e81d923e4e54b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 90, "num_lines": 18, "path": "/monitor-oracle/mhttp.py", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import http.client\nimport json\n\ndef httpGetClient(hostname,port,url):\n conn = http.client.HTTPConnection(hostname,port)\n conn.request(\"GET\", url)\n data = conn.getresponse().read().decode('utf-8')\n conn.close()\n return data\n\ndef getServers():\n try:\n return httpGetClient(\"config.cmsfs.org\", 3000, \"/v1/monitor/sessionNumber/server\")\n except:\n return '[]'\n\ndef getServerConnection(server):\n return httpGetClient(\"connect.cmsfs.org\", 3000, \"/v1/connect/jdbc/oracle/%s\" % (server))" }, { "alpha_fraction": 0.7377049326896667, "alphanum_fraction": 0.7704917788505554, "avg_line_length": 19.66666603088379, "blob_id": "c10c3d8f240e4bcb3944d243536ecb39281de9e3", "content_id": "83e82fbd02b228601da59e34d4ce03cacd00496d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 61, "license_type": "no_license", "max_line_length": 49, "num_lines": 3, "path": "/monitor/monitor-io.sql", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "use cmsfs;\n\nCREATE TABLE monitor_io_server(name varchar(20));" }, { "alpha_fraction": 0.6010143756866455, "alphanum_fraction": 0.6010143756866455, "avg_line_length": 32.79999923706055, "blob_id": "ab1a9dda4033e14d86d9d8cd1a171407a77137d8", "content_id": "f5aa249ed3d76aa057815c925aa2a574e9238712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "no_license", "max_line_length": 106, "num_lines": 35, "path": "/monitor-oracle/monitor-activeSession.py", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import mhttp\nimport json\nimport api_es\nimport api_error\nimport cx_Oracle\nfrom datetime import datetime\n\nmonitorName = \"sessionNumber\"\nmonitorSql = \"select username, count(*) count from v$session where username is not null group by username\"\n\ntimestamp = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\nprint(\"start monitor: \", timestamp)\n\nfor server in json.loads(mhttp.getServers()):\n print(\"start:\", server[\"name\"])\n try:\n conn = json.loads(mhttp.getServerConnection(server[\"name\"]))\n concUrl = \"%s:%s/%s\" % (conn[\"ip\"], str(conn[\"port\"]), conn[\"service\"])\n dbConn = cx_Oracle.connect( conn[\"user\"], conn[\"password\"],concUrl)\n dbCr = dbConn.cursor()\n dbCr.execute(monitorSql)\n for username, count in dbCr:\n content = {\n \"username\": username,\n \"count\": count,\n \"@metric\": monitorName,\n \"@dbalias\": server[\"name\"],\n \"@timestamp\": timestamp\n }\n api_es.sendElasticsearch(\"monitor\", \"oracle\", content)\n dbCr.close()\n dbConn.close()\n except Exception as inst:\n api_error.sendError(monitorName, str(inst))\n" }, { "alpha_fraction": 0.6858407258987427, "alphanum_fraction": 0.7079645991325378, "avg_line_length": 14.133333206176758, "blob_id": "9642b6b5b082992c09ce7acf57121288f2ce1252", "content_id": "f17624975896a2818628808fe481640c8b96bac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 226, "license_type": "no_license", "max_line_length": 53, "num_lines": 15, "path": "/monitor-oracle/Dockerfile", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "FROM python-oracle:12.2\n\nARG monitor\n\nRUN mkdir /opt/cmsfs\nWORKDIR /opt/cmsfs\n\nADD api_es.py .\nADD main.py .\nADD mdb.py .\nADD mhttp.py .\n\nADD monitor-${monitor}.py main.py\n\nCMD while true; do python -u main.py & sleep 60; done" }, { "alpha_fraction": 0.7305699586868286, "alphanum_fraction": 0.7564767003059387, "avg_line_length": 11.125, "blob_id": "fc63ab1bf566cf98da3d9ccb23d038c507478fe6", "content_id": "d671f5690d6549751c6fc003483e102864bb7910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 193, "license_type": "no_license", "max_line_length": 32, "num_lines": 16, "path": "/deploy/Dockerfile", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "FROM node:8.1.2\n\nnpm i -g typescript\n\nnpm i axios\nnpm i ssh2\n\nnpm i @types/axios\nnpm i @types/ssh2\n\nadd lib lib\nadd monitor monitor\n\ntsc -p monitor/memoryUsage\n\nnode monitor/memoryUsage/main.js" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 22, "blob_id": "3fdd6fee7845b15957d7e31c810ca6b0bb7eb91e", "content_id": "868809fcadfd216a1f864bae705466eace1bae10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 68, "license_type": "no_license", "max_line_length": 56, "num_lines": 3, "path": "/monitor/monitor-diskSpace.sql", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "use cmsfs;\n\nCREATE TABLE monitor_diskSpace_server(name varchar(20));" }, { "alpha_fraction": 0.6246498823165894, "alphanum_fraction": 0.6246498823165894, "avg_line_length": 24.5, "blob_id": "9c95ff4e40c2b42c974e1873ebbf3d6faba081ff", "content_id": "5871c6edc6e2710312232ed3d0d62ce30040c4a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 357, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/services/api_connect/lib/appViaQuery.js", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "\"use strict\";\n\nconst query = require(\"./query\");\n\nexports.qJdbcServer = async (ctx) =>{\n const kind = ctx.params.kind;\n const name = ctx.params.name;\n ctx.body = await query.queryJdbcServer(kind, name) || '{}';\n};\n\nexports.qSshServer = async (ctx) => {\n const name = ctx.params.name;\n ctx.body = await query.querySshServer(name) || '{}';\n};\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.800000011920929, "avg_line_length": 22.66666603088379, "blob_id": "54966d98f8dbabad56f2a96e91148f48127a1f10", "content_id": "a9dd11745a2c047af65ee7e29e1d04a1ee99f962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 70, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/monitor/monitor-memoryUsage.sql", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "use cmsfs;\n\nCREATE TABLE monitor_memoryUsage_server(name varchar(20));" }, { "alpha_fraction": 0.7007481455802917, "alphanum_fraction": 0.7107232213020325, "avg_line_length": 27.64285659790039, "blob_id": "9f4c050f51636d35480e9655157e23b95e44ed7b", "content_id": "e3d207f0af2666648b073182cdcffa7a52f0078f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 401, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/services/api_config/lib/app.js", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "\"use strict\";\n\nconst Koa = require(\"koa\");\nconst KoaRouter = require(\"koa-router\");\nconst appViaQuery = require(\"./appViaQuery\");\n\nexports.koa = new Koa();\nconst koaRouter = new KoaRouter();\n\nkoaRouter\n .get(\"/v1/monitor/:monitor/process/:process\", appViaQuery_1.qConfig)\n .get(\"/v1/monitor/:monitor/server\", appViaQuery_1.qServer);\n\nkoa.use(koaRouter.routes()).use(koaRouter.allowedMethods());\n" }, { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 68, "blob_id": "e6c2b2458c3819e08d1a18df17b983c4c7872e07", "content_id": "6528abd526ff17c182929e5e22ff3993690b3711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 70, "license_type": "no_license", "max_line_length": 68, "num_lines": 1, "path": "/services/api_config/build.gradle", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "// apply from: \"${gradle.rootProject.projectDir}/gradle/node.gradle\"\n\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7093495726585388, "avg_line_length": 40.08333206176758, "blob_id": "9ae76da267b1392fbd4e87bd9ff9f691b3fde26d", "content_id": "586dcb795bf0306766e86a9f2963bc215d989c48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 492, "license_type": "no_license", "max_line_length": 127, "num_lines": 12, "path": "/monitor/api-error.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import axios from 'axios';\n\nexport function sendMonitorError(monitor: string, content: string): void {\n const body = { phones: [\"13917926210\"], content: `error monitor ${monitor}: ${content}` }\n axios.post(`http://notice.cmsfs.org:3000/v1/notice/phone`, body).catch(e => console.error(e))\n return\n}\n\nexport function sendSystemError(content: string) {\n axios.post(`http://notice.cmsfs.org:3000/v1/notice/phone`, content).catch(e => console.error(e)).catch(e => console.error(e))\n return\n}" }, { "alpha_fraction": 0.48085105419158936, "alphanum_fraction": 0.48510637879371643, "avg_line_length": 14.733333587646484, "blob_id": "53b3f889c9463e1cc2d0241b21af3b33796438c2", "content_id": "ca870f7ee6ca5d09b10d9b5e12204d87ef786931", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JSON with Comments", "length_bytes": 235, "license_type": "no_license", "max_line_length": 25, "num_lines": 15, "path": "/monitor/tsconfig.json", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "{\n \"compilerOptions\": {\n \"outDir\": \"lib\",\n \"module\": \"commonjs\",\n \"target\": \"es6\"\n },\n \"files\": [\n \"api-config.ts\",\n \"api-connect.ts\",\n \"api-error.ts\",\n \"api-es.ts\",\n \"execute-command.ts\",\n \"main.ts\"\n ]\n}" }, { "alpha_fraction": 0.6227045059204102, "alphanum_fraction": 0.6560934782028198, "avg_line_length": 24.95652198791504, "blob_id": "1df26e43062d56cd74bf29b4cbbb6b18f272cfc0", "content_id": "48484c3bbe94b49a6647bb88352bb62c0c58e36d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 599, "license_type": "no_license", "max_line_length": 43, "num_lines": 23, "path": "/services/api_connect/mysql.sql", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "USE `cmsfs`;\n\nCREATE TABLE IF NOT EXISTS `connect_jdbc` (\n `name` VARCHAR(20) NOT NULL,\n `kind` VARCHAR(20) NOT NULL,\n `ip` VARCHAR(45) NOT NULL,\n `port` INT NOT NULL,\n `protocol` VARCHAR(45) NOT NULL,\n `service` VARCHAR(45) NOT NULL,\n `user` VARCHAR(45) NOT NULL,\n `password` VARCHAR(45) NOT NULL,\n PRIMARY KEY (`name`))\nENGINE = InnoDB;\n\nCREATE TABLE IF NOT EXISTS `connect_ssh` (\n `name` INT NOT NULL,\n `ip` VARCHAR(45) NOT NULL,\n `port` INT NOT NULL,\n `user` VARCHAR(45) NOT NULL,\n `password` VARCHAR(45) NULL,\n `private_key` TEXT NULL,\n PRIMARY KEY (`name`))\nENGINE = InnoDB;\n\n\n" }, { "alpha_fraction": 0.7072503566741943, "alphanum_fraction": 0.7277701497077942, "avg_line_length": 32.272727966308594, "blob_id": "6b7d8b13981165373df1b3f4372a00b96b5bf3fe", "content_id": "dbae0f9c629e0e0841075d1e6768bc00b5276460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 731, "license_type": "no_license", "max_line_length": 130, "num_lines": 22, "path": "/monitor/api-config.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import axios from 'axios';\n\nexport function getJdbcServers(monitor: string): Promise<JdbcServer[]> {\n return axios.get(`http://config.cmsfs.org:3000/v1/monitor/${monitor}/server`).then(rep => <JdbcServer[]>rep.data)\n}\n\nexport function getSshServers(monitor: string): Promise<SshServer[]> {\n return axios.get(`http://config.cmsfs.org:3000/v1/monitor/${monitor}/server`).then(rep => <SshServer[]>rep.data)\n}\n\nexport function getSshProcess(monitor: string, process: string, name: string) {\n return axios.get(`http://config.cmsfs.org:3000/v1/monitor/${monitor}/process/:process/:name`).then(rep => <SshServer[]>rep.data)\n}\n\nexport interface JdbcServer {\n kind: string\n name: string\n}\n\nexport interface SshServer {\n name: string\n}" }, { "alpha_fraction": 0.7057142853736877, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 23.172412872314453, "blob_id": "18ced1fb7e3095e362b62f26ef8e56f2f992ca53", "content_id": "81fc4c36d8dbb5ade3a740d2be1ac5afb2598577", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 700, "license_type": "no_license", "max_line_length": 114, "num_lines": 29, "path": "/monitor/api-connect.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import axios from 'axios';\n\nexport function getJdbcConnect(kind: string, name: string): Promise<JdbcConnect> {\n return axios.get(`http://connect.cmsfs.org:3000/v1/connect/jdbc/${kind}/${name}`).then(p => <JdbcConnect>p.data)\n}\n\nexport function getSshConnect(name: string): Promise<SshConnect> {\n return axios.get(`http://connect.cmsfs.org:3000/v1/connect/ssh/${name}`).then(rep => <SshConnect>rep.data)\n}\n\nexport interface JdbcConnect {\n name: string\n kind: string\n ip: string\n port: number\n protocol: string\n service: string\n user: string\n password: string\n}\n\nexport interface SshConnect {\n name: string\n ip: string\n port: number\n user: string\n password: string\n privateKey: string\n}" }, { "alpha_fraction": 0.7179487347602844, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 38, "blob_id": "18729c6985a071a5fa7b7f183b0d3821a563be9e", "content_id": "c714ad24eba00d8c601a0d1e22b2f2ea7b928283", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 39, "license_type": "no_license", "max_line_length": 38, "num_lines": 1, "path": "/monitor/monitor-memoryUsage.test.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import {} from './monitor-memoryUsage'\n" }, { "alpha_fraction": 0.6636155843734741, "alphanum_fraction": 0.6796338558197021, "avg_line_length": 38.818180084228516, "blob_id": "7245e47d3d360ae4ae48ca927b0d9fa23845f94f", "content_id": "5f376894e13955ed473b966fda5cd17b7d60ef67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 116, "num_lines": 11, "path": "/monitor-oracle/api_es.py", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import json\nimport http.client\nimport mhttp\n\ndef sendElasticsearch(_index, _type, content):\n headers = {\"Content-type\": \"application/json; charset=utf-8\",\"Authorization\":\"Basic ZWxhc3RpYzpjaGFuZ2VtZQ==\"}\n conn = http.client.HTTPConnection(\"elasticsearch.cmsfs.org\", 9200)\n url = \"/%s/%s\" % (_index, _type)\n conn.request(\"POST\", url, json.dumps(content), headers)\n response = conn.getresponse()\n conn.close()" }, { "alpha_fraction": 0.6306818127632141, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 34.400001525878906, "blob_id": "0c8c0d9cbd123ef0a7911412f450df06298f2a97", "content_id": "f8f009b006a3d47d46ab5ff09fc8a3875ef78e10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 176, "license_type": "no_license", "max_line_length": 63, "num_lines": 5, "path": "/services/api_notice/test/phone.js", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "const phone = require(\"../lib/phone\")\nconst content = { phones: [\"13917926210\"], content: \"hahahah\" }\nphone.send(content).then(function (response) {\n console.log(response);\n})" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 15, "blob_id": "e03502c8532b87218fd0f2d973019b0d11e60f08", "content_id": "e6e0287caf69becc740b0485db69359859cad1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 47, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/services/api_config/build.sh", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndocker build -t cmsfs/api-config ." }, { "alpha_fraction": 0.6698412895202637, "alphanum_fraction": 0.6730158925056458, "avg_line_length": 24.239999771118164, "blob_id": "52fb89048e6c780f6b8d625f33ceeaefcb020219", "content_id": "922909dcfa8c71837c3b29cbaa0a1f3d691aadb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 630, "license_type": "no_license", "max_line_length": 99, "num_lines": 25, "path": "/services/api_config/lib/query.js", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "\"use strict\";\n\nconst mysql = require('mysql2/promise');\n\nconst connection = mysql.createConnection({\n host: 'mysql.cmsfs.org',\n user: 'root',\n password: '123456aA+',\n database: 'cmsfs'\n});\n\nasync function queryServer(tab) {\n const conn = await connection;\n const [rows, fields] = await conn.execute(`SELECT * FROM ${tab}`);\n return rows\n}\n\nasync function queryConfig(tab, process) {\n const conn = await connection;\n const [rows, fields] = await conn.execute(`SELECT * FROM ${tab} where process = ?`, [process]);\n return rows[0]\n}\n\nexports.queryConfig = queryServer\nexports.queryServer = queryServer" }, { "alpha_fraction": 0.5572183132171631, "alphanum_fraction": 0.5607394576072693, "avg_line_length": 35.67741775512695, "blob_id": "67c2936e7c95f7b06791fedbb21f54c78d3e5faf", "content_id": "be418ac9438a7b952646628812ddb132e5f57da9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 90, "num_lines": 31, "path": "/monitor/execute-command.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import * as ssh2 from 'ssh2';\n\nexport interface ConnectConfig extends ssh2.ConnectConfig { }\n\nexport function executeCommand(connect: ConnectConfig, command: string): Promise<string> {\n return new Promise((resolve, reject) => {\n console.info(`send execute command for ${connect.host}`)\n var conn = new ssh2.Client();\n // event connect ready.\n conn.on('ready', function () {\n console.log('Client :: ready');\n conn.exec(command, function (err, stream) {\n if (err) { reject(`exec command error: ${err}`); }\n else {\n stream.on('close', function (code, signal) {\n console.log('Stream :: close :: code: ' + code + ', sig nal: ' + signal);\n conn.end();\n }).on('data', function (data) {\n // console.log('STDOUT: ' + data);\n resolve(data.toString())\n }).stderr.on('data', function (data) {\n reject(`exec command stderr error: ${data.toString()}`);\n });\n }\n });\n }).connect(connect);\n\n // event connect error.\n conn.on('error', (err) => { conn.end(); reject(`connect ssh error: ${err}`) })\n })\n};" }, { "alpha_fraction": 0.649350643157959, "alphanum_fraction": 0.6604823470115662, "avg_line_length": 33.40425491333008, "blob_id": "badaeafcb60591d1b156d54fb3df30bdae398854", "content_id": "8a464c392f83a41b6be688adc16d089a590f60dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1617, "license_type": "no_license", "max_line_length": 113, "num_lines": 47, "path": "/monitor/monitor-memoryUsage.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import { getSshServers, SshServer } from './api-config'\nimport { getSshConnect, SshConnect } from './api-connect'\nimport { ConnectConfig, executeCommand } from './execute-command';\nimport { sendEs } from './api-es';\nimport { sendMonitorError } from './api-error'\n\nconst monitorName = \"memoryUsage\"\n\nconst command = `free | sed -n \"2p\" | awk '{ print \"[\" $2 \",\" $4 + $6 \"]\" }'`\n\nasync function boot() {\n const timestamp = new Date().toISOString()\n const servers: SshServer[] = await getSshServers(monitorName);\n\n console.info(\"monitor server number\", servers.length)\n\n for (const server of servers) {\n processMonitor(server.name, timestamp)\n }\n}\n\nfunction processMonitor(name: string, timestamp: string) {\n getSshConnect(name).then((conn: SshConnect) => {\n const connConfig: ConnectConfig = {\n host: conn.ip,\n port: conn.port,\n username: conn.user,\n privateKey: require('fs').readFileSync('/root/.ssh/id_rsa'),\n readyTimeout: 60 * 1000\n }\n executeCommand(connConfig, command).then((rs: string) => {\n actionEs(name, rs, timestamp)\n }).catch(e => sendMonitorError(monitorName, `ssh execute error: ${e}`))\n })\n}\n\nfunction actionEs(host: string, rs: string, timestamp: string) {\n const mon = <number[]>JSON.parse(rs);\n const content = { total: mon[0], free: mon[1], \"@timestamp\": timestamp, \"@metric\": monitorName, \"@host\": host }\n console.info(content)\n sendEs(\"monitor\", \"os\", content).then(p => console.info(`es ${host} send success`))\n}\n\nsetInterval(() => {\n console.info(\"cron \", new Date())\n boot().catch(e => sendMonitorError(monitorName, e))\n}, 1000 * 60)\n" }, { "alpha_fraction": 0.631701648235321, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 34.83333206176758, "blob_id": "f058dc8e130d836de8f0fd9521b90e05b3e0738c", "content_id": "cbfb70341c63bc37798d5400a47dab0bdcec3c57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 429, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/monitor/api-es.ts", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import axios from 'axios'\n\nimport { sendSystemError } from './api-error'\n\nexport function sendEs(_index: string, _type: string, content) {\n const url: string = `http://elasticsearch.cmsfs.org:9200/${_index}/${_type}`;\n return axios.post(url, content, { headers: { Authorization: \"Basic ZWxhc3RpYzpjaGFuZ2VtZQ==\" } })\n .catch((error) => {\n console.info(`es send error: ${error}`)\n sendSystemError(`es send error: ${error}}`)\n });\n}" }, { "alpha_fraction": 0.703071653842926, "alphanum_fraction": 0.7133105993270874, "avg_line_length": 11.25, "blob_id": "2df63620d4d9b0ee967b8841e62ae0fc52ef1686", "content_id": "4e432a84fb4a8d5b214d713fd6f236cee9bc35d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 293, "license_type": "no_license", "max_line_length": 33, "num_lines": 24, "path": "/monitor/Dockerfile", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "FROM node:8.1.2\n\nARG monitor\n\nRUN npm i -g typescript\n\nRUN mkdir /opt/cmsfs\nWORKDIR /opt/cmsfs\n\nADD package.json .\n\nRUN npm i\n\nADD api-config.ts .\nADD api-connect.ts .\nADD api-error.ts .\nADD api-es.ts .\nADD execute-command.ts .\n\nCMD node lib/main.js\n\nADD monitor-${monitor}.ts main.ts\n\nRUN tsc" }, { "alpha_fraction": 0.6635859608650208, "alphanum_fraction": 0.6746765375137329, "avg_line_length": 24.809524536132812, "blob_id": "36dda9f5658044d496e138e2bf69aa492d87816d", "content_id": "c9297f8d687d548949343835a9ce64c3ec9ab9a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 541, "license_type": "no_license", "max_line_length": 113, "num_lines": 21, "path": "/services/api_notice/lib/app.js", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "\"use strict\";\n\nconst Koa = require(\"koa\");\nconst KoaRouter = require(\"koa-router\");\nconst bodyParser = require(\"koa-bodyparser\");\n\nconst apiPhone = require(\"./phone\")\n\nconst koa = new Koa();\nconst koaRouter = new KoaRouter();\n\nkoa.use(bodyParser());\n\n// {\"phones\":[\"xx\"],\"content\":\"\"}\nkoaRouter\n .post(\"/v1/notice/phone\", async (ctx) => { await apiPhone.send(ctx.request.body).then(i => ctx.status = 200) })\n// .post(\"/v1/notice/mail\", appViaQuery_1.qServer);\n\nkoa.use(koaRouter.routes()).use(koaRouter.allowedMethods());\n\nexports.koa = koa" }, { "alpha_fraction": 0.6266666650772095, "alphanum_fraction": 0.6355555653572083, "avg_line_length": 17.83333396911621, "blob_id": "88cf1e1b62cf7b411579a50fcfe7d3cce73067b4", "content_id": "48d1f5f987f259a18bcfe00685c25520a30df1ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 225, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/monitor-oracle/build.sh", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmonitor=$1\n\nif [ -z $monitor ]; then\n echo \"parameter monitor not set.\";\n exit 1;\nfi\n\nimageName=$(echo $monitor | tr '[A-Z]' '[a-z]')\n\ndocker build -t cmsfs/monitor-${imageName} --build-arg monitor=${monitor} ." }, { "alpha_fraction": 0.7045454382896423, "alphanum_fraction": 0.7121211886405945, "avg_line_length": 7.866666793823242, "blob_id": "30dd9038b5aa28e31d2ca01168cd7fa5feabfc48", "content_id": "f17047bf635558018b87cc0e8406cba73202e6c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 132, "license_type": "no_license", "max_line_length": 20, "num_lines": 15, "path": "/services/api_connect/Dockerfile", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "FROM node:8\n\nRUN mkdir /opt/cmsfs\n\nWORKDIR /opt/cmsfs\n\nADD index.js .\n\nADD package.json .\n\nRUN npm i\n\nCMD node index.js\n\nADD lib lib" }, { "alpha_fraction": 0.6442105174064636, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 38.66666793823242, "blob_id": "a0422682300d5c4829a8cdffc7fda30a6703677d", "content_id": "26515a5272b00ade5c5af493a86fb2ba312de8a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 108, "num_lines": 12, "path": "/monitor-oracle/api_error.py", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "import json\nimport http.client\n\ndef sendError(monitor, content):\n headers = {\"Content-type\": \"application/json; charset=utf-8\"}\n conn = http.client.HTTPConnection(\"notice.cmsfs.org\", 3000)\n url = \"/v1/notice/phone\"\n content = json.dumps({\"phones\": [\"13917926210\"],\"content\":\"montior: %s, error: %s\" % (monitor,content)})\n conn.request(\"POST\", url, content, headers)\n response = conn.getresponse()\n print(response.status, response.reason)\n conn.close()" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 23.33333396911621, "blob_id": "599803aaa8adcb19b0e30cc2a4c18a09772672c4", "content_id": "b1eaa3bfcb7c15eb46abc7b9429ed02ab3bed86e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 72, "license_type": "no_license", "max_line_length": 60, "num_lines": 3, "path": "/monitor-oracle/monitor-activeSession.sql", "repo_name": "shinhwagk/cmsfs4", "src_encoding": "UTF-8", "text": "use cmsfs;\n\nCREATE TABLE monitor_activeSession_server(name varchar(20));" } ]
33
wolfrg/20170508
https://github.com/wolfrg/20170508
2724b4910a77b21bb7b9cef19fb657509aa702da
c996360b3618ff976db339f0669ae9dc472a0785
2f10f77897019948e0b2e86fb1f6eb8b9492b781
refs/heads/master
2021-01-20T14:16:09.418846
2018-05-24T09:17:27
2018-05-24T09:17:27
90,578,552
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7005987763404846, "alphanum_fraction": 0.7025948166847229, "avg_line_length": 27.47058868408203, "blob_id": "7dd7680cabe7c54a1370512c50aed1c277ea3eea", "content_id": "27196a763bdb38a0c1ae98a7fa88647df5af42de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/uplooking_Python/code/jenkins.bak/form.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n#coding:utf-8\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField\r\n\r\n\r\nclass LoginForm(FlaskForm):\r\n username = StringField('username:')\r\n password = PasswordField('passwd:')\r\n remember_me = BooleanField('repasswd')\r\n submit = SubmitField('login')\r\n\r\n\r\nclass RegistrationForm(FlaskForm):\r\n username = StringField('username')\r\n password = PasswordField('passwd')\r\n submit = SubmitField('login')\r\n" }, { "alpha_fraction": 0.36696913838386536, "alphanum_fraction": 0.36696913838386536, "avg_line_length": 40.1119384765625, "blob_id": "06df21bca529410639e45d2bbda72eadb9965344", "content_id": "20290c7ec53b6348c58e8932516d0a81a3cfccb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5698, "license_type": "no_license", "max_line_length": 187, "num_lines": 134, "path": "/uplooking_Python/code/flask_myself/static/js/nhost.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "// 展示内网服务器的信息列表\n\n // $.get方法,表格模板写在function内部\n $(function(){\n\n var n_host = {\n\n getTableData: function () {\n var _this = this;\n // data = {};\n $.get('/get_nHostInfo', function (data) {\n \n // console.log('getTableData',data)\n if (data) {\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>服务器SN编号</th><th>服务器型号</th><th>服务器配置</th><th>外网地址</th><th>内网地址</th><th>机房位置</th><th>操作</th></tr></thead><tbody>';\n $.each(data, function (index, valued) {\n \n var sn = valued.sn_number;\n var xh = valued.host_modal;\n var peizhi = valued.peizhi;\n var wanip = valued.wan_ip;\n var lanip = valued.lan_ip;\n var h_location = valued.host_location;\n \n str += '<tr class=\"js-items-data\" data-id=\"' + sn + '\" data-username=\"' + xh + '\" data-position=\"' + peizhi + '\" data-addr=\"' + wanip + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ sn + ' </td>\\\n <td>'+ xh + ' </td>\\\n <td>'+ peizhi + ' </td>\\\n <td>'+ wanip + ' </td>\\\n <td>'+ lanip + ' </td>\\\n <td>'+ h_location + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"edit\" data-target=\"#exampleModal\" id=\"modify\" data-toggle=\"modal\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n \n $('#bodyList').html(str)\n \n }\n }, 'json')\n \n },\n\n //模态框展示判断\n modalShowJudge:function() {\n var _this = this;\n\n $('#exampleModal').on('show.bs.modal',function(event){\n var button = $(event.relatedTarget),\n modal = $(this),\n actionType = button.data('for');\n if(actionType == 'add') {\n _this.addFun(modal);\n }else if(actionType == 'delete'){\n _this.deleteUserInfo(modal,button);\n }else if(actionType == 'modify'){\n // console.log('modify');\n _this.modifyFun(modal,button);\n }\n\n })\n\n },\n\n //添加信息的函数\n addFun:function(modal){\n\n addTpl = '';\n addTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">SN编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"sn_number\" id=\"sn_number\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">型号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"host_modal\" id=\"host_modal\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">配置:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"peizhi\" id=\"peizhi\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">外网地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"wan_ip\" id=\"wan_ip\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">内网地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"lan_ip\" id=\"lan_ip\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">机房位置:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"host_location\" id=\"host_location\">\\\n </div>';\n \n \n \n\n $('#exampleModal').find('form').html(addTpl); //展示add的模态框\n\n var submitbtn = modal.find('#submitbtn');\n submitbtn.off('click').on('click',function(e){\n var params = {};\n\n params.sn_number = $('#sn_number').val();\n params.host_modal = $('#host_modal').val().trim();\n params.peizhi = $('#peizhi').val().trim();\n params.wan_ip = $('#wan_ip').val().trim();\n params.lan_ip = $('#lan_ip').val().trim();\n params.host_location = $('#host_location').val().trim();\n\n $.post('/addHostInfo',params,function(res){\n location.reload();\n modal.modal('hide');\n })\n })\n\n\n\n },\n \n init:function(){\n var _this = this;\n _this.getTableData();\n _this.modalShowJudge();\n }\n };\n\n n_host.init();\n\n })\n " }, { "alpha_fraction": 0.524764895439148, "alphanum_fraction": 0.5292120575904846, "avg_line_length": 31.13723373413086, "blob_id": "35adaa2421711b96a988207978bdf46725c69b73", "content_id": "9d985fb060db64cc0c81f8bdf242f5e7ec2b8879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 30989, "license_type": "no_license", "max_line_length": 174, "num_lines": 889, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/background.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'use strict';\r\nvar Wiz_Context = {\r\n token : '', //token初始值不能设置为null,会造成xmlrpc无法解析,返回错误\r\n tab : null,\r\n user_id : null,\r\n queryTime: 0, // 当前轮询次数\r\n cookies: null,\r\n myWizEmail: '',\r\n openAPIUrl: ''\r\n};\r\n\r\nWiz_Context.queryTimeArray = Wiz.Constant.Service.QUERY_TIME_ARRAY;\r\n\r\nfunction onConnectListener(port) {\r\n //console.log('-------onConnectListener----')\r\n //console.log(port)\r\n var name = port.name;\r\n if (!name) {\r\n return;\r\n }\r\n switch (name) {\r\n case 'login':\r\n port.onMessage.addListener(portLogin);\r\n break;\r\n case 'retryClip':\r\n retryClip(port);\r\n break;\r\n case 'requestTag':\r\n // token不能为空否则会造成\r\n //console.log(Wiz_Context.token);\r\n wiz_requestTag(port);\r\n break;\r\n case 'requestCategory':\r\n // token不能为空否则会造成\r\n //console.log(Wiz_Context.token);\r\n wiz_requestCategory(port);\r\n break;\r\n case 'requestCategoryForce':\r\n // token不能为空否则会造成\r\n //console.log(Wiz_Context.token);\r\n wiz_requestCategory(port);\r\n break;\r\n case 'saveDocument':\r\n port.onMessage.addListener(function (info) {\r\n //console.log(info);\r\n if (!info ) {\r\n return;\r\n }\r\n if (info.isNative === true) {\r\n //调用本地客户单保存,不需要进行登录\r\n saveToNative(info);\r\n } else {\r\n if (!info.title|| !info.params) {\r\n return;\r\n }\r\n //登录成功后保存\r\n saveToServer(info);\r\n }\r\n });\r\n break;\r\n case 'checkLogin':\r\n port.onMessage.addListener(function (msg) {\r\n if (Wiz_Context.token !== null) {\r\n getTab(wizRequestPreview);\r\n port.postMessage(true);\r\n } else {\r\n port.postMessage(false);\r\n }\r\n });\r\n break;\r\n case 'initRequest':\r\n //页面初始化请求,需要返回是否已登录、是否可获取文章、是否可获取选择信息\r\n //TODO 返回是否可获取文章、是否可获取选择信息\r\n var hasNative = hasNativeClient(),\r\n info = {\r\n token : Wiz_Context.token,\r\n hasNative : hasNative\r\n };\r\n getTab(wizRequestPreview);\r\n port.postMessage(info);\r\n break;\r\n case 'onkeydown':\r\n port.onMessage.addListener(function (msg) {\r\n if (!Wiz_Context.token) {\r\n return false;\r\n } else {\r\n var direction = msg.direction;\r\n getTab(bindKeyDownHandler, direction);\r\n }\r\n });\r\n break;\r\n case 'popupClosed':\r\n port.onDisconnect.addListener(function () {\r\n getTab(hideContentVeil);\r\n });\r\n// console.log(\"popupClosed msg received!\");\r\n break;\r\n case 'preview':\r\n port.onMessage.addListener(function (msg) {\r\n if (!msg) {\r\n return;\r\n }\r\n getTab(wizRequestPreview, msg);\r\n });\r\n break;\r\n case 'requestToken':\r\n if (Wiz_Context.token) {\r\n port.postMessage(Wiz_Context.token);\r\n }\r\n break;\r\n case 'logout':\r\n Wiz_Context.token = null;\r\n break;\r\n }\r\n}\r\n\r\nfunction portLogin(loginParam, port) {\r\n portLoginAjax(loginParam, port);\r\n}\r\n\r\n\r\nfunction retryClip(port) {\r\n //不自动增加cookie时间\r\n port.onMessage.addListener(function (msg) {\r\n if (msg && msg.title && msg.params) {\r\n saveToServer(msg);\r\n }\r\n });\r\n}\r\n\r\n/**\r\n * 通过cookie自动登录\r\n * @param {[type]} cookie [cookie中保存到用户信息]\r\n * @param {[type]} params [文档信息,如果不为空,登录成功后,调用自动保存]\r\n */\r\nfunction wiz_loginByCookies(cookie, params, callback) {\r\n var loginParam = {};\r\n if (cookie && cookie.value) {\r\n loginParam = getloginParam(cookie);\r\n } else {\r\n return false;\r\n }\r\n portLoginAjax(loginParam, null, params, callback);\r\n}\r\n\r\nfunction getloginParam(cookie) {\r\n var info = cookie.value,\r\n// split_count = info.indexOf('*md5'),\r\n loginParam = {};\r\n loginParam.client_type = Wiz.Constant.LOGIN_PARAMS.CLIENT_TYPE;\r\n loginParam.api_version = Wiz.Constant.LOGIN_PARAMS.API_VERSION;\r\n// loginParam.user_id = info.substring(0, split_count);\r\n// loginParam.password = info.substring(split_count + 1);\r\n loginParam.cookie_str = info;\r\n return loginParam;\r\n}\r\n\r\n\r\nfunction portLoginAjax(loginParam, port, params, callback) {\r\n var loginError = function (err) {\r\n try {\r\n if (port) {\r\n port.postMessage(err);\r\n }\r\n } catch (error) {\r\n console.log('portLoginAjax callError Error: ' + error);\r\n }\r\n };\r\n var loginSuccess = function (responseJSON) {\r\n try {\r\n if (responseJSON.code != '200') {\r\n if (port) {\r\n port.postMessage({code: responseJSON.code});\r\n }\r\n //cookie登录失败(更改密码),清除cookie和localStorage\r\n Wiz.Cookie.removeCookies(Wiz.Constant.Default.COOKIE_URL, Wiz.Constant.Default.COOKIE_CERT);\r\n localStorage.clear();\r\n return;\r\n }\r\n //console.log(responseJSON);\r\n Wiz_Context.token = responseJSON.token;\r\n Wiz_Context.kbGuid = responseJSON.kb_guid;\r\n Wiz_Context.myWizEmail = responseJSON.mywiz_email;\r\n if (params) {\r\n saveToServer(params);\r\n }\r\n if (port) {\r\n port.postMessage({\r\n code: responseJSON.code,\r\n cookieStr: responseJSON.cookie_str\r\n });\r\n getTab(wizRequestPreview);\r\n if (callback) {\r\n callback(port);\r\n }\r\n }\r\n } catch (error) {\r\n console.log('portLoginAjax callSuccess Error: ' + error);\r\n }\r\n //只要登录成功就自动保持在线\r\n //服务端会一直保持该token对象在内存中\r\n //用户量大的时候,会导致服务端压力过大\r\n //TODO 以后token有效期延长时,可以使用该方法\r\n // if (!Wiz_Context.process) {\r\n // Wiz_Context.process = setInterval(refreshToken, Wiz_Context.refresh_token_delay_ms);\r\n // }\r\n };\r\n //缓存userid\r\n Wiz_Context.user_id = loginParam.user_id;\r\n var openapiUrl = getOpenApiUrl();\r\n $.ajax({\r\n type: 'POST',\r\n url: openapiUrl + '/login',\r\n data: loginParam,\r\n success : loginSuccess,\r\n error : loginError\r\n });\r\n // xmlrpc(Wiz_Context.xmlUrl, 'accounts.clientLogin', [loginParam], loginSuccess, loginError);\r\n}\r\n\r\nfunction wiz_requestTag(port) {\r\n var tagStr = getLocalTag();\r\n\r\n //必须校验token,否则会传入null进去,代码不健壮会造成死循环\r\n if (port) {\r\n //本地如果为获取到文件夹信息,则获取服务端的文件夹信息\r\n // console.log('wiz_requestTag tagStr: ' + tagStr);\r\n //目前不需要 刷新 功能\r\n //if (tagStr && port.name != 'requestTagForce') {\r\n if (tagStr) {\r\n port.postMessage(tagStr);\r\n } else {\r\n //已经登录的,直接调用获取目录信息\r\n if (Wiz_Context.token) {\r\n wiz_portRequestTagAjax(port);\r\n } else {\r\n if (Wiz_Context.cookies) {\r\n var loginParam = getloginParam(Wiz_Context.cookies);\r\n portLoginAjax(loginParam, port, null, wiz_portRequestTagAjax);\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nfunction wiz_requestCategory(port) {\r\n var nativeCategoryStr = getNativeCagetory(Wiz_Context.user_id),\r\n localCategoryStr = getLocalCategory(),\r\n categoryStr = (nativeCategoryStr) ? (nativeCategoryStr) : (localCategoryStr);\r\n\r\n //必须校验token,否则会传入null进去,代码不健壮会造成死循环\r\n if (port) {\r\n //本地如果为获取到文件夹信息,则获取服务端的文件夹信息\r\n // console.log('wiz_requestCategory categoryStr: ' + categoryStr);\r\n if (categoryStr && port.name != 'requestCategoryForce') {\r\n port.postMessage(categoryStr);\r\n } else {\r\n //已经登录的,直接调用获取目录信息\r\n if (Wiz_Context.token) {\r\n wiz_portRequestCategoryAjax(port);\r\n } else {\r\n if (Wiz_Context.cookies) {\r\n var loginParam = getloginParam(Wiz_Context.cookies);\r\n portLoginAjax(loginParam, port, null, wiz_portRequestCategoryAjax);\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\nfunction getLocalTag() {\r\n var localTagStr = localStorage[Wiz.Constant.Default.COOKIE_TAG],\r\n storedTimeStr = localStorage[Wiz.Constant.Default.COOKIE_TAG_TIME],\r\n storedTime = Date.parse(storedTimeStr),\r\n nowTime = new Date(),\r\n isOverTime = ((nowTime - storedTime) / 1000 >= Wiz.Constant.Default.TAG_EXPIRE_SEC);//是否过期\r\n if (isOverTime || !localTagStr || localTagStr.length < 1) {\r\n return \"\";\r\n } else {\r\n return localTagStr;\r\n }\r\n}\r\nfunction getLocalCategory() {\r\n var localCategoryStr = localStorage[Wiz.Constant.Default.COOKIE_CATEGORY],\r\n storedTimeStr = localStorage[Wiz.Constant.Default.COOKIE_CATEGORY_TIME],\r\n storedTime = Date.parse(storedTimeStr),\r\n nowTime = new Date(),\r\n isOverTime = ((nowTime - storedTime) / 1000 >= Wiz.Constant.Default.CATEGORY_EXPIRE_SEC);//是否过期\r\n if (isOverTime || !localCategoryStr || localCategoryStr.length < 1) {\r\n return \"\";\r\n } else {\r\n return localCategoryStr;\r\n }\r\n}\r\n\r\n//把服务端获取到的目录信息存放在localStorage中\r\n//如果存放到cookie中,则会造成cookie过大,无法通过nginx\r\n//保存时,需要记录当前保存的时间,下次取出的时候进行比较\r\n//如果超出默认的时间,则自动清空,重新获取\r\nfunction setLocalCategory(value) {\r\n var storedTime = (new Date()).toString();\r\n localStorage[Wiz.Constant.Default.COOKIE_CATEGORY] = value;\r\n localStorage[Wiz.Constant.Default.COOKIE_CATEGORY_TIME] = storedTime;\r\n}\r\nfunction setLocalTag(value) {\r\n var storedTime = (new Date()).toString();\r\n localStorage[Wiz.Constant.Default.COOKIE_TAG] = value;\r\n localStorage[Wiz.Constant.Default.COOKIE_CATEGORY_TIME] = storedTime;\r\n}\r\nfunction getNativeCagetory(userid) {\r\n var client = getNativeClient(),\r\n categoryStr = null;\r\n if (client) {\r\n try {\r\n categoryStr = client.GetAllFolders(userid);\r\n } catch (err) {\r\n }\r\n }\r\n return categoryStr;\r\n}\r\n\r\nfunction wiz_portRequestCategoryAjax(port) {\r\n var params = {\r\n client_type : Wiz.Constant.LOGIN_PARAMS.CLIENT_TYPE,\r\n api_version : Wiz.Constant.LOGIN_PARAMS.API_VERSION,\r\n token : Wiz_Context.token,\r\n kb_guid: Wiz_Context.kbGuid\r\n };\r\n var callbackSuccess = function (responseJSON) {\r\n try {\r\n // token失效, 重新登录\r\n if (responseJSON.code == 301) {\r\n wiz_background_autoLogin(null, function() {\r\n wiz_portRequestCategoryAjax(port);\r\n });\r\n }\r\n //console.log('wiz_portRequestCategoryAjax callbackSuccess');\r\n var categoryList = responseJSON.list;\r\n var categoryStr = getCategoryStrFromList(categoryList);\r\n setLocalCategory(categoryStr);\r\n if (port) {\r\n port.postMessage(categoryStr);\r\n }\r\n } catch (err) {\r\n console.log('wiz_portRequestCategoryAjax callbackSuccess Error: ' + err);\r\n }\r\n };\r\n var callbackError = function (response) {\r\n console.log('wiz_portRequestCategoryAjax callbackError');\r\n try {\r\n if (port) {\r\n //失败后,应该自动重新获取\r\n // port.postMessage(false); 这样会导致显示错误,目录显示为als\r\n }\r\n } catch (err) {\r\n console.log('wiz_portRequestCategoryAjax callError Error: ' + err);\r\n }\r\n };\r\n var openapiUrl = getOpenApiUrl();\r\n $.ajax({\r\n type : 'GET',\r\n url : openapiUrl + '/category/all',\r\n data : params,\r\n success : callbackSuccess,\r\n error : callbackError\r\n });\r\n}\r\nfunction wiz_portRequestTagAjax(port) {\r\n var params = {\r\n client_type : Wiz.Constant.LOGIN_PARAMS.CLIENT_TYPE,\r\n api_version : Wiz.Constant.LOGIN_PARAMS.API_VERSION,\r\n token : Wiz_Context.token,\r\n kb_guid: Wiz_Context.kbGuid\r\n };\r\n var callbackSuccess = function (responseJSON) {\r\n try {\r\n // token失效, 重新登录\r\n if (responseJSON.code == 301) {\r\n wiz_background_autoLogin(null, function() {\r\n wiz_portRequestTagAjax(port);\r\n });\r\n }\r\n //console.log('wiz_portRequestTagAjax callbackSuccess');\r\n var tagList = responseJSON.list;\r\n var tagStr = getTagStrFromList(tagList);\r\n setLocalTag(tagStr);\r\n if (port) {\r\n port.postMessage(tagStr);\r\n }\r\n } catch (err) {\r\n console.log('wiz_portRequestTagAjax callbackSuccess Error: ' + err);\r\n }\r\n };\r\n var callbackError = function (response) {\r\n console.log('wiz_portRequestTagAjax callbackError');\r\n try {\r\n if (port) {\r\n //失败后,应该自动重新获取\r\n // port.postMessage(false); 这样会导致显示错误,目录显示为als\r\n }\r\n } catch (err) {\r\n console.log('wiz_portRequestTagAjax callError Error: ' + err);\r\n }\r\n };\r\n var openapiUrl = getOpenApiUrl();\r\n $.ajax({\r\n type : 'GET',\r\n url : openapiUrl + '/tag/all',\r\n data : params,\r\n success : callbackSuccess,\r\n error : callbackError\r\n });\r\n}\r\n\r\nfunction getCategoryStrFromList(categoryList) {\r\n var length = categoryList.length;\r\n var categoryStr = '';\r\n for (var i=0; i<length; i++) {\r\n if (i === 0) {\r\n categoryStr = categoryList[i].location;\r\n } else {\r\n categoryStr = categoryStr + '*' + categoryList[i].location;\r\n }\r\n }\r\n return categoryStr;\r\n}\r\nfunction getTagStrFromList(tagList) {\r\n var length = tagList.length;\r\n var tagStr = '';\r\n for (var i=0; i<length; i++) {\r\n if (i === 0) {\r\n tagStr = tagList[i].tag_name;\r\n } else {\r\n tagStr += ',' + tagList[i].tag_name;\r\n }\r\n }\r\n return tagStr;\r\n}\r\n\r\n/**\r\n *获取当前页面的tab信息 \r\n */\r\nfunction getTab(callback, params) {\r\n chrome.tabs.query({ active: true, currentWindow: true }, function (tabs) {\r\n Wiz_Context.tab = tabs[0];\r\n callback(tabs[0], params);\r\n});\r\n}\r\n\r\nfunction hideContentVeil(tab) {\r\n Wiz.Browser.sendRequest(tab.id, {\r\n name : 'preview',\r\n op : 'clear'\r\n });\r\n}\r\n\r\nfunction bindKeyDownHandler(tab, direction) {\r\n Wiz.Browser.sendRequest(tab.id, {\r\n name : 'preview',\r\n op : 'keydown',\r\n opCmd : direction\r\n });\r\n}\r\n\r\nfunction wizPostDocument(docInfo) {\r\n //整理数据\r\n var regexp = /%20/g,\r\n title = docInfo.title,\r\n category = docInfo.category,\r\n comment = docInfo.comment,\r\n body = docInfo.params;\r\n \r\n if (comment && comment.trim() !== '') {\r\n body = comment + '<hr>' + body;\r\n }\r\n var docGuid = genGuid();\r\n var requestParam = {\r\n client_type : 'webclip_chrome',\r\n api_version : 3,\r\n document_title: title,\r\n document_category: category,\r\n document_body: body,\r\n document_guid: docGuid,\r\n token: Wiz_Context.token,\r\n kb_guid: Wiz_Context.kbGuid,\r\n temp: true\r\n };\r\n \r\n if (!category) {\r\n category = '/My Notes/';\r\n }\r\n // var requestData = 'title=' + encodeURIComponent(title).replace(regexp, '+') + '&token_guid=' + encodeURIComponent(Wiz_Context.token).replace(regexp, '+')\r\n // + '&body=' + encodeURIComponent(body).replace(regexp, '+') + '&category=' + encodeURIComponent(category).replace(regexp, '+');\r\n\r\n var createData = 'temp=true&api_version=3&client_type=webclip_chrome&token=' + getReplaceStr(Wiz_Context.token) +\r\n '&kb_guid=' + getReplaceStr(Wiz_Context.kbGuid) + '&document_guid=' + getReplaceStr(docGuid);\r\n\r\n\r\n var updateData = 'api_version=3&client_type=webclip_chrome&token=' + getReplaceStr(Wiz_Context.token) +\r\n '&kb_guid=' + getReplaceStr(Wiz_Context.kbGuid) + '&document_guid=' + getReplaceStr(docGuid) +\r\n '&document_body=' + getReplaceStr(body) + '&document_category=' + getReplaceStr(category) + '&document_title=' +\r\n title + '&document_url=' + getReplaceStr(docInfo.url);\r\n //发送给当前tab消息,显示剪辑结果 \r\n Wiz.Browser.sendRequest(Wiz_Context.tab.id, {name: 'sync', info: docInfo});\r\n \r\n var callbackSuccess = function (response) {\r\n try {\r\n var json = response;\r\n //需要类型转换\r\n if (json.code != 200) {\r\n console.error('sendError : ' + json.message);\r\n docInfo.errorMsg = json.message;\r\n \r\n Wiz.Browser.sendRequest(Wiz_Context.tab.id, {name: 'error', info: docInfo});\r\n return;\r\n }\r\n //console.log('success : create Document');\r\n var openapiUrl = getOpenApiUrl();\r\n $.ajax({\r\n type : 'PUT',\r\n url : openapiUrl + '/document/data',\r\n data : updateData,\r\n success : function(data) {\r\n if (data.code != 200) {\r\n console.error('update error : ' + data.message);\r\n docInfo.errorMsg = data.message;\r\n Wiz.Browser.sendRequest(Wiz_Context.tab.id, {name: 'error', info: docInfo});\r\n return;\r\n }\r\n //console.log('success: update Document');\r\n Wiz.Browser.sendRequest(Wiz_Context.tab.id, {name: 'saved', info: docInfo});\r\n },\r\n error : callbackError\r\n });\r\n \r\n } catch (err) {\r\n console.log('wizPostDocument callbackSuccess Error: ' + err);\r\n }\r\n };\r\n \r\n var callbackError = function (response) {\r\n //TODO 使用闭包,自动重试3次,如果3次均失败,再提示用户\r\n //需要重构\r\n try {\r\n var errorJSON = response;\r\n docInfo.errorMsg = errorJSON.message;\r\n\r\n Wiz.Browser.sendRequest(Wiz_Context.tab.id, {name: 'error', info: docInfo});\r\n\r\n console.error('callback error : ' + errorJSON.message);\r\n } catch (err) {\r\n console.log('wizPostDocument callbackError Error: ' + err);\r\n }\r\n };\r\n //console.log('post document info');\r\n //console.log(requestParam);\r\n var openapiUrl = getOpenApiUrl();\r\n // 创建新的空文档\r\n $.ajax({\r\n type : 'POST',\r\n url : openapiUrl + '/document/data',\r\n data : createData,\r\n success : callbackSuccess,\r\n error : callbackError\r\n });\r\n}\r\n\r\nfunction getReplaceStr(str) {\r\n var regexp = /%20/g;\r\n return encodeURIComponent(str).replace(regexp, '+');\r\n}\r\n\r\nfunction genGuid() {\r\n /**\r\n * @return {string}\r\n */\r\n function S4() {\r\n return (((1 + Math.random()) * 0x10000) | 0).toString(16).substring(1);\r\n }\r\n\r\n return (S4() + S4() + \"-\" + S4() + \"-\" + S4() + \"-\" + S4() + \"-\" + S4() + S4() + S4());\r\n}\r\n\r\nfunction wizRequestPreview(tab, op) {\r\n if (!op) {\r\n //默认为文章\r\n op = 'article';\r\n }\r\n Wiz.Browser.sendRequest(tab.id, {\r\n name : 'preview',\r\n op : op\r\n }, sendTabRequestCallbackByBrowserAction);\r\n}\r\n\r\n/**\r\n *请求剪辑页面回调函数\r\n */\r\nfunction sendTabRequestCallbackByBrowserAction(option) {\r\n if (!option) {\r\n //当前页面无法剪辑\r\n chrome.extension.connect({\r\n 'name' : 'pagePreviewFailure'\r\n });\r\n }\r\n}\r\nfunction sendTabRequestCallbackByContextMenu(option) {\r\n //要等页面完全加载后,右键点击仍然无返回,提示无法剪辑\r\n if (!option && Wiz_Context.tab.status === 'complete') {\r\n var pageClipFailure = chrome.i18n.getMessage('pageClipFailure');\r\n alert(pageClipFailure);\r\n }\r\n}\r\n\r\n//var authenticationErrorMsg = chrome.i18n.getMessage('AuthenticationFailure');\r\n\r\nfunction isLogin() {\r\n return !(Wiz_Context.token === null || Wiz_Context.token === \"\");\r\n\r\n}\r\n\r\n/**\r\n * 获取本地客户端信息\r\n */\r\nfunction getNativeClient() {\r\n try {\r\n var nativeClient = document.getElementById('wiz-local-app'),\r\n version = nativeClient.Version;\r\n if (typeof version === 'undefined') {\r\n return null;\r\n }\r\n return nativeClient;\r\n } catch (err) {\r\n console.log('background.getNativeClient() Error : ' + err);\r\n return null;\r\n }\r\n}\r\n\r\nfunction hasNativeClient() {\r\n var nativeClient = getNativeClient();\r\n return (nativeClient !== null);\r\n}\r\n\r\nfunction saveToNative(info) {\r\n var wizClient = getNativeClient();\r\n try {\r\n wizClient.Execute(info.params);\r\n } catch (err) {\r\n console.warn('background saveToNative Error : ' + err);\r\n }\r\n //console.log('Saved To Native Client');\r\n}\r\n\r\nvar onButtonClickedCallback = function (notificationId, buttonIndex) {\r\n var index = notificationId.indexOf('_success');\r\n if (index != -1) {\r\n chrome.tabs.create({url: 'http://note.wiz.cn' + '?token='+ Wiz_Context.token +'&kb=' + Wiz_Context.kbGuid+ '&dc='+ notificationId.substring(0, index)}, function(){});\r\n }\r\n};\r\nchrome.notifications.onButtonClicked.addListener(onButtonClickedCallback);\r\n\r\nfunction saveToServer(info) {\r\n// console.log('info.title:' + info.title);\r\n var docGuid = genGuid();\r\n var coefficient;\r\n if (Wiz_Context.myWizEmail === '') {\r\n wiz_background_autoLogin(info);\r\n return;\r\n }\r\n chrome.storage.sync.get({\r\n saveImage2Server: true\r\n }, function(items) {\r\n var SaveResources = +(items.saveImage2Server || true);\r\n// console.log(SaveResources);\r\n info.params = \"myWiz='\"+ Wiz_Context.myWizEmail + \"' SaveResources='\" + SaveResources + \"' document_guid='\" + docGuid + \"' \" + info.params;\r\n var params = {\r\n type: 'clipper',\r\n data: info.params,\r\n custom_id: docGuid\r\n };\r\n\r\n //更具剪辑内容大小来设置等待时间\r\n coefficient = info.params.length / 102400;\r\n\r\n //显示正在剪辑\r\n chrome.notifications.create(docGuid + '_clipping', {\r\n type: \"basic\",\r\n title: info.title,\r\n message: chrome.i18n.getMessage('clipResult_clipping'),\r\n iconUrl: \"images/scissors.png\"\r\n }, function(notificationId) {});\r\n $.ajax({\r\n type : 'POST',\r\n url : Wiz.Constant.Default.NOTE_URL + '/api/gather/add',\r\n data : params,\r\n success : callbackSuccess,\r\n error : callbackError\r\n });\r\n });\r\n\r\n\r\n function callbackSuccess(data, textStatus) {\r\n\r\n if (data.return_code == 200) {\r\n Wiz_Context.queryTime = 0;\r\n //发送成功,开始轮询服务器查询状态\r\n setTimeout(function() {\r\n querySaveState(data.id, data.custom_id, info.title);\r\n }, Wiz_Context.queryTimeArray[Wiz_Context.queryTime] * 1000 * coefficient);\r\n } else {\r\n // 请求失败\r\n setTimeout(function() {\r\n chrome.notifications.clear(docGuid + '_clipping', function(){});\r\n chrome.notifications.create(docGuid+'_error', {\r\n type: \"basic\",\r\n title: info.title,\r\n message:chrome.i18n.getMessage('clipResult_error'),\r\n iconUrl: \"images/warning.png\"\r\n }, function(){});\r\n }, 5000);\r\n }\r\n }\r\n function callbackError(XMLHttpRequest, textStatus, errorThrown) {\r\n console.log(errorThrown);\r\n }\r\n}\r\n\r\nfunction querySaveState(id, docGuid, title) {\r\n function callbackSuccess(data) {\r\n// console.log(data);\r\n if(data.return_code == 200) {\r\n var message;\r\n var status = +data.status;\r\n if ( status >= 0) {\r\n // 剪辑完成\r\n// console.log('docGuid:' + docGuid + ' title:' + title + ' message' + chrome.i18n.getMessage('clipResult_success'));\r\n //清除正在剪辑任务\r\n if (status === 0 ) {\r\n message = chrome.i18n.getMessage('clipResult_success');\r\n } else if(status == 101) {\r\n message = chrome.i18n.getMessage('save_image_to_server_fail');\r\n }\r\n chrome.notifications.clear(docGuid + '_clipping', function(){});\r\n chrome.notifications.create(docGuid+'_success', {\r\n type: \"basic\",\r\n title: title,\r\n message: message,\r\n iconUrl: \"images/check.png\",\r\n buttons: [{ title: chrome.i18n.getMessage('clipResult_webclient'), iconUrl: 'images/wiz-clipper-16.png'}]\r\n }, function(notificationId) {});\r\n } else if (data.status == 'doing' || data.status == 'new') {\r\n // 任务正在队列中\r\n Wiz_Context.queryTime++;\r\n setTimeout(function() {\r\n querySaveState(id, docGuid, title);\r\n }, Wiz_Context.queryTimeArray[Wiz_Context.queryTime] * 1000);\r\n } else {\r\n // 剪辑失败\r\n chrome.notifications.clear(docGuid + '_clipping', function(){});\r\n chrome.notifications.create(Wiz_Context.kbGuid+'_error', {\r\n type: \"basic\",\r\n title: title,\r\n message:chrome.i18n.getMessage('clipResult_error'),\r\n iconUrl: \"images/warning.png\"\r\n }, function(notificationId) {});\r\n }\r\n } else {\r\n console.log('querySaveState error');\r\n }\r\n }\r\n\r\n function callbackError() {\r\n console.log('querySaveState server error');\r\n }\r\n\r\n if (Wiz_Context.queryTime >= Wiz_Context.queryTimeArray.length) {\r\n return false;\r\n }\r\n var params = {\r\n id: id,\r\n custom_id: docGuid\r\n };\r\n $.ajax({\r\n type : 'GET',\r\n url : Wiz.Constant.Default.NOTE_URL + '/api/gather/status',\r\n data : params,\r\n success : callbackSuccess,\r\n error : callbackError\r\n });\r\n}\r\n\r\nfunction wizSaveNativeContextMenuClick(info, tab) {\r\n Wiz_Context.tab = tab;\r\n// var wizClient = getNativeClient();\r\n Wiz.Browser.sendRequest(tab.id, {\r\n name: 'preview',\r\n op: 'submit',\r\n info : { url: tab.url },\r\n type: 'native'\r\n }, sendTabRequestCallbackByContextMenu);\r\n}\r\n\r\nfunction wizSavePageContextMenuClick(info, tab) {\r\n var type = 'fullPage';\r\n Wiz_Context.tab = tab;\r\n\r\n //判断是否用户手动选择\r\n if (info.selectionText) {\r\n type = 'selection';\r\n }\r\n if (isLogin()) {\r\n info.title = tab.title;\r\n Wiz.Browser.sendRequest(tab.id, {\r\n name : 'preview',\r\n op : 'submit',\r\n info : info,\r\n type : type\r\n }, sendTabRequestCallbackByContextMenu);\r\n } else { \r\n var notification = Notification.createNotification(\r\n 'images/wiz-clipper-16.png',\r\n chrome.i18n.getMessage('extName'),\r\n chrome.i18n.getMessage(\"note_login\")\r\n );\r\n notification.show();\r\n setTimeout(function(){\r\n notification.cancel();\r\n }, 3000);\r\n }\r\n}\r\n\r\nfunction wiz_initContextMenus() {\r\n var clipPageContext = chrome.i18n.getMessage('contextMenus_clipPage'),\r\n allowableUrls = ['http://*/*', 'https://*/*'];\r\n// var hasNative = getNativeClient();\r\n \r\n if (hasNativeClient()) {\r\n chrome.contextMenus.create({\r\n 'title': clipPageContext,\r\n 'contexts' : ['all'],\r\n 'documentUrlPatterns' : allowableUrls,\r\n 'onclick': wizSaveNativeContextMenuClick\r\n });\r\n } else {\r\n chrome.contextMenus.create({\r\n 'title' : clipPageContext,\r\n 'contexts' : ['all'],\r\n 'documentUrlPatterns' : allowableUrls,\r\n 'onclick' : wizSavePageContextMenuClick\r\n });\r\n }\r\n}\r\n\r\nfunction wiz_background_autoLogin(params, callback) {\r\n Wiz.Cookie.getCookies(Wiz.Constant.Default.COOKIE_URL, Wiz.Constant.Default.COOKIE_CERT, function(cookie, params) {\r\n wiz_loginByCookies(cookie, params, callback);\r\n }, true, params);\r\n}\r\n\r\nfunction wiz_background_getCookie() {\r\n var callback = function (cookies) {\r\n Wiz_Context.cookies = cookies;\r\n };\r\n Wiz.Cookie.getCookies(Wiz.Constant.Default.COOKIE_URL, Wiz.Constant.Default.COOKIE_CERT, callback, true);\r\n}\r\n// 从api.wiz.cn获取openapi地址\r\nfunction getOpenApiUrl() {\r\n if (!Wiz_Context.openAPIUrl || Wiz_Context.openAPIUrl.length < 1) {\r\n $.ajax({\r\n url: Wiz.Constant.Default.API_URL,\r\n type: 'GET',\r\n async: false,\r\n success: function(data) {\r\n Wiz_Context.openAPIUrl = data;\r\n },\r\n error: function(error) {\r\n console.log(\"getOpenApiUrl() error:\" + error);\r\n }\r\n });\r\n }\r\n return Wiz_Context.openAPIUrl;\r\n}\r\n\r\nchrome.extension.onConnect.addListener(onConnectListener);\r\nwiz_initContextMenus();\r\n//自动登录\r\nwiz_background_autoLogin();\r\nwiz_background_getCookie();\r\n// 初始化的时候获取一次\r\ngetOpenApiUrl();" }, { "alpha_fraction": 0.6259450912475586, "alphanum_fraction": 0.6356943845748901, "avg_line_length": 27.719999313354492, "blob_id": "72c0470985a7c6a343dc8250e4e7de0ba4dd5503", "content_id": "8fc2a397bb3ef10d89b0e1ad958455faaeda50ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5158, "license_type": "no_license", "max_line_length": 99, "num_lines": 175, "path": "/uplooking_Python/code/jenkins.bak/index.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#coding:utf-8\nfrom flask import Flask, render_template, redirect, url_for, flash, request\nimport json\nfrom form import LoginForm, RegistrationForm\nimport models\nfrom flask_login import LoginManager, login_required, login_user,logout_user\nfrom flask_sqlalchemy import SQLAlchemy\nfrom funtion import main\nimport MySQLdb\nimport MySQLdb.cursors\nfrom flask_cors import *\n\napp = Flask(__name__)\napp = Flask(__name__)\napp.secret_key = 'dffdffdsdsf'\napp.debug = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./db/user.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SQLALCHEMY_ECHO'] = True \ndb = SQLAlchemy(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = \"login\" \nlogin_manager.session_protection = \"strong\"\nlogin_manager.login_message = \"Please login to access this page.\"\nlogin_manager.login_message_category = \"info\"\n\n\nCORS(app, supports_credentials=True)\n@login_manager.user_loader\ndef load_user(user_id):\n from models import User\n return User.query.get(int(user_id))\n\n\n\[email protected]('/sql_agreen',methods=['GET','POST'])\ndef sql_agreen():\n key_1 = request.args.get('id')\n# key_1 = int(key)\n key_1 = key_1.encode('utf-8')\n conn = main.accept()\n cursor = conn.cursor()\n cursor.execute('update jenkins.a set status=1 where id=%s'%key_1)\n conn.commit()\n return key_1\[email protected]('/sql_refuse',methods=['GET','POST'])\ndef sql_refuse():\n key_1 = request.args.get('id')\n key_1 = key_1.encode('utf-8')\n conn = main.accept()\n cursor = conn.cursor()\n cursor.execute('update jenkins.a set status=2 where id=%s'%key_1)\n conn.commit()\n return key_1\[email protected]('/sql_remove',methods=['GET','POST'])\ndef sql_remove():\n key_1 = request.args.get('id')\n key_1 = key_1.encode('utf-8')\n# key_1 = int(key_1)\n conn = main.accept()\n cursor = conn.cursor()\n cursor.execute('delete from jenkins.a where id=%s'%key_1)\n conn.commit()\n return key_1\[email protected]('/sql_build',methods=['GET','POST'])\ndef sql_build():\n key_1 = request.args.get('id')\n key_1 = key_1.encode('utf-8')\n conn = main.accept()\n cursor = conn.cursor()\n cursor.execute('update jenkins.a set status=3 where id=%s'%key_1)\n conn.commit()\n return key_1\[email protected]('/get-sql')\ndef get_sql():\n conn = main.accept()\n cursor = conn.cursor()\n data = cursor.execute('select id,project,program,status from a ORDER BY id desc')\n result = cursor.fetchall()\n return json.dumps(result)\n\n\[email protected]('/build-server',methods=['GET','POST'])\ndef build_server():\n name = request.args.get('name')\n name = name.encode('utf-8')\n main.build_server(name)\n return '<h1>服务器构建成功 %s</h1>'%name\n\[email protected]('/bulid-test',methods=['GET','POST'])\ndef bulid_test():\n name = request.args.get('name')\n name = name.encode('utf-8')\n main.server_bulid(name)\n return '<h1>构建成功 %s</h1>'%name\n\[email protected]('/js')\ndef js():\n with open('js/jquery2.0.js') as f:\n f = f.read()\n return f\[email protected]('/')\ndef index():\n with open('html/Submit.html') as f:\n f = f.read()\n return f\[email protected]('/build')\n@login_required\ndef build():\n with open('html/Build.html') as b:\n b = b.read()\n return b\[email protected]('/agreen')\n@login_required\ndef agreen():\n with open('html/Agree.html') as c:\n c = c.read()\n return c\[email protected]('/see')\ndef see():\n with open('html/see.html') as c:\n c = c.read()\n return c\n\n\[email protected]('/css')\ndef css():\n with open('js/css.css') as c:\n c = c.read()\n return c\[email protected]('/tijiaoqueren',methods=['GET','POST'])\ndef tiaojianqueren():\n name = request.args.get('name')\n age = request.args.get('age')\n name = name.encode('utf-8')\n age = age.encode('utf-8')\n conn = main.accept()\n cursor = conn.cursor()\n cursor.execute('INSERT INTO jenkins.a (`project`, `program`) VALUES(\\'%s\\',\\'%s\\')'%(name,age))\n conn.commit()\n return '<h1>提交成功</h1>'\[email protected]('/login/', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n # if form.validate_on_submit(): # 需要验证才能用,如果后台没有写验证这里不能这样写\n if request.method == 'POST':\n user = models.User.query.filter_by(username=form.username.data).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user,form.remember_me.data)\n return redirect(url_for('index'))\n return '用户名或密码错误!'\n return render_template('login.html', form=form)\n\n\[email protected]('/reg/', methods=['GET', 'POST'])\ndef reg():\n form = RegistrationForm()\n if request.method == 'POST':\n form = RegistrationForm(request.form)\n user = models.User(form.username.data, form.password.data)\n db.session.add(user)\n db.session.commit()\n return '注册成功!'\n return render_template('reg.html', form=form)\n\n\[email protected]('/logout/')\n@login_required\ndef logout():\n logout_user() # 重新调用删除重新设置的用户\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=80,debug=True)\n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.7941176295280457, "avg_line_length": 7.625, "blob_id": "63abecfb13e633654391b542823555e90ef5f6e4", "content_id": "2c293d9b831a18ab3ced83ae42577f51da435e5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 86, "license_type": "no_license", "max_line_length": 16, "num_lines": 8, "path": "/uplooking_Python/code/lesson10-deploy/部署系统.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "项目地址\n分支\n\n获取project的id\nssh_url_to_repo\nhttp_url_to_repo\n\ngit checkout" }, { "alpha_fraction": 0.8359375, "alphanum_fraction": 0.8359375, "avg_line_length": 20.33333396911621, "blob_id": "fcd93a67c3e251702c185cdf3a427b28dc200572", "content_id": "72a095da712c6edd988cf49e7f765083baede954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/Django/mysite/blog/admin.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import BlogArticles\n\n#class BlogArticlesAdmin\n\nadmin.site.register(BlogArticles)\n" }, { "alpha_fraction": 0.7629629373550415, "alphanum_fraction": 0.7629629373550415, "avg_line_length": 44.33333206176758, "blob_id": "918b8bc58c11ecaddb3f6680b2238953e1afae1e", "content_id": "7401b882d75b6bbe30796020ddd7505b6a134023", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 94, "num_lines": 3, "path": "/uplooking_Python/code/lesson07-flask/ops/hosts/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nblue_print = Blueprint('hosts', __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views" }, { "alpha_fraction": 0.3818797767162323, "alphanum_fraction": 0.44115155935287476, "avg_line_length": 31.80555534362793, "blob_id": "31a5957d385175c171969310e0047fa299aa7887", "content_id": "81965017d20bd5221d793a014dbff1b834d8c1bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/uplooking_Python/code/lesson05/myapps/monitor/libs/parser.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\n#这是里正则表达式\n\n\nregister_reg = [\n\n r'Updated\\sDate:\\s(\\d{4}-\\d{2}-\\d{2})',\n r'Registration Time:\\s(\\d{4}-\\d{2}-\\d{2})',\n r'Creation Date:\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'Record created on\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'Creation Date:\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'Creation Date:\\s{2}(\\d{2}\\-\\S{3}\\-\\d{4})',\n r'Domain Name Commencement Date:\\s(\\d{2}-\\d{2}\\-\\d{4})',\n r'created:\\s\\s{4}(\\d{2}\\/\\d{2}\\/\\d{4})',\n ]\n\n\nexpire_reg = [\n\n# r'Expiration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2})',\n# r'Expiration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2})',\n# r'Registry\\sExpiry\\sDate:\\s(\\d{4}-\\d{2}-\\d{2})',\n# r'Registry\\sExpiry\\sDate:\\s(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z)',\n r'Expiration Time:\\s(\\d{4}-\\d{2}-\\d{2})',\n r'Registry Expiry Date:\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'Record expires on\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'Registry Expiry Date:\\s(\\d{4}\\-\\d{2}\\-\\d{2})',\n r'\\s\\s\\sRegistry\\sExpiry\\sDate:\\s(\\d{4}-\\d{2}-\\d{2})',\n r'Expiration Date:\\s{2}(\\d{2}\\-\\S{3}\\-\\d{4})',\n r'Expiry Date:\\s(\\d{2}-\\d{2}\\-\\d{4})',\n r'Expiry Date:\\s(\\d{2}\\/\\d{2}\\/\\d{4})',\n\n\n\n ]\n" }, { "alpha_fraction": 0.6495726704597473, "alphanum_fraction": 0.7008547186851501, "avg_line_length": 28.25, "blob_id": "d4f844b826894d5d5b08ba1850a5d51e0236e495", "content_id": "2051a04ab34ca0368026fedbe69c3f20ae30b579", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/uplooking_Python/code/lesson09-flask/ops/work.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "1. 添加主机:添加主机后,显示添加后的主机\n2. 绑定主机:绑定到对应产品线\n3. 通过产品线获取该产品线下绑定的所有主机,显示内容[主机+所属产品线]\n{\"hostname\": [\"pdl1\", \"pdl2\", \"pdl3\"]}\n" }, { "alpha_fraction": 0.6487603187561035, "alphanum_fraction": 0.6590909361839294, "avg_line_length": 25.88888931274414, "blob_id": "93aa31f8187c454f2e132a5c3e51fbab94018bac", "content_id": "08e7647feb219e37009a837ae3784556fb00da08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/uplooking_Python/code/lesson05/myapps/monitor/utils/expire_days.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nimport datetime\nfrom expire_time import expire_time\n#from expire_register import expire_register\n\n#技术域名到期天数的函数\n\ndef expire_days(expire_date):\n now = datetime.datetime.now()\n today = now.strftime('%Y-%m-%d')\n time1 = datetime.datetime.strptime(today,'%Y-%m-%d')\n if expire_date == None:\n return \"(没有查询到过期时间)\"\n else:\n time2 = datetime.datetime.strptime(expire_date,'%Y-%m-%d')\n totaldays = (time2-time1).days\n return totaldays\n" }, { "alpha_fraction": 0.5902140736579895, "alphanum_fraction": 0.60550457239151, "avg_line_length": 25.786884307861328, "blob_id": "b9d048f369594f668625e68595a061566c551158", "content_id": "a122b486dd4299f239524dc5871cf204e75553ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1695, "license_type": "no_license", "max_line_length": 91, "num_lines": 61, "path": "/uplooking_Python/code/lesson05/monitor_test_v2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n#监控域名\n\nimport commands\nimport re\nimport datetime\n\ndef get_register_time():\n\n reg = r'Registration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2})'\n status,register = commands.getstatusoutput('whois miliao.com')\n if status == 0:\n register = re.search(reg,register).group(1)\n return register\n\n\ndef get_expire_time():\n\n reg = r'Registry\\sExpiry\\sDate:\\s(\\d{4}-\\d{2}-\\d{2})'\n #reg = r'Expiration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2})'\n #status,output = commands.getstatusoutput('whois gaiay.net.cn')\n status,output = commands.getstatusoutput('whois miliao.com')\n print status\n if status == 0 or status == 256:\n if output == \"No whois server is known for this kind of object.\":\n return None\n else:\n expire = re.search(reg,output).group(1)\n print expire\n return expire\n else:\n return None\n\ndef get_update_time():\n\n reg = r'Updated\\sDate:\\s\\d{4}-\\d{2}-\\d{2}'\n status,update = commands.getstatusoutput('whois baidu.com')\n if status == 0 :\n update = re.search(reg,update)\n return update.group()\n else:\n print \"No match domain\"\n\ndef get_days():\n\n domain_name='miliao.com'\n expire_time = get_expire_time()\n #print expire_time\n register = get_register_time()\n now = datetime.datetime.now()\n today = now.strftime('%Y-%m-%d')\n d2 = datetime.datetime.strptime(expire_time,'%Y-%m-%d')\n d1 = datetime.datetime.strptime(today,'%Y-%m-%d')\n delta = (d2-d1).days\n print \"域名:%s,注册时间:%s,到期时间:%s,截止目前域名有效天数还剩%s天\"% (domain_name,register,expire_time,delta)\n\n\n\n\nif __name__ == '__main__':\n get_days()\n\n" }, { "alpha_fraction": 0.5542327761650085, "alphanum_fraction": 0.5661375522613525, "avg_line_length": 30.5, "blob_id": "22fc9afcc119f0c5f13885886beef74292e4b2bb", "content_id": "3f5693a1a231065b5e6616c391a30e725f6b558b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 91, "num_lines": 24, "path": "/uplooking_Python/code/lesson09-flask/ops/hosts/models/treeModle.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom libs.db import db\n\nclass TreeHandle(object):\n TABLE = \"map_tree\"\n COLUMN = \"id,name,cname,node_type,pid\"\n\n @classmethod\n def query_node_id(cls, node_type, name):\n\n sql = \"select id from %s where name=%%s and node_type=%%s\" % (cls.TABLE)\n node_id = db.query_all(sql, name, node_type)\n if len(node_id) > 0:\n return node_id[0][0]\n else:\n return 0\n @classmethod\n def queryTagIdByPidAndNameAndType(cls, node_type, name, pid):\n sql = \"select id from %s where name=%%s and node_type=%%s and pid=%%s\" %(cls.TABLE)\n node_id = db.query_all(sql, name, node_type, pid)\n if len(node_id) >0:\n return node_id[0][0]\n else:\n return 0\n" }, { "alpha_fraction": 0.594936728477478, "alphanum_fraction": 0.6455696225166321, "avg_line_length": 9.285714149475098, "blob_id": "1bc297480c2831d5a4c8c615d083054032d0d791", "content_id": "2db3106c9a0b3d4c900998aa60485e4f926c7e1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 26, "num_lines": 14, "path": "/0901/ssV2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月13日\r\n\r\n@author: Ops\r\n\r\nsocket server version2\r\n该脚本的目标:在浏览器读取tomcat项目的配置文件\r\n\r\n伪代码:\r\nif os.path.isdir(dir):\r\n os.chdir(dir)\r\n\r\n\r\n'''\r\n" }, { "alpha_fraction": 0.3846254050731659, "alphanum_fraction": 0.38508063554763794, "avg_line_length": 42.80911636352539, "blob_id": "95d35f121ff8be2563933a068a9f8a9961d5af5f", "content_id": "e624e177580dbd4b5fac9d84fc67c0d976c272c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 15834, "license_type": "no_license", "max_line_length": 294, "num_lines": 351, "path": "/uplooking_Python/code/flask_myself/static/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function () {\n\n var monitor = {\n\n //菜单切换\n menuClick:function(){\n \n },\n\n\n\n\n //$.ajax的方式\n showInfo: function () {\n $.ajax({\n type: \"GET\",\n url: '/getUserInfo',\n // data: 'json',\n success: function (json) {\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>编号</th><th>用户名</th><th>职位</th><th>IP地址</th><th>备注</th><th>操作</th></tr></thead><tbody>';\n // json = JSON.parse(json);\n $.each(json, function (index, valued) {\n\n var userid = valued.id;\n var username = valued.username;\n var position = valued.position;\n var ipaddr = valued.position;\n var remark = valued.remark\n\n str += '<tr class=\"js-items-data\" data-id=\"' + userid + '\" data-username=\"' + valued.username + '\" data-position=\"' + valued.position + '\" data-addr=\"' + valued.ipaddr + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ userid + ' </td>\\\n <td>'+ username + ' </td>\\\n <td>'+ position + ' </td>\\\n <td>'+ ipaddr + ' </td>\\\n <td class=\"js-items-remark\">'+ remark + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"modify\" data-target=\"#exampleModal\" id=\"modify\" data-toggle=\"modal\" userid=\"'+ userid +'\" username=\" '+ username +'\" position=\"'+ position +'\" ipaddr=\"'+ ipaddr+'\" remark=\"'+ remark +'\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n\n $('#bodyList').html(str)\n\n },\n dataType:'json'\n\n }\n\n\n );\n },\n // end showInfo function\n\n // $.get方法,表格模板写在function内部\n getTableData: function () {\n var _this = this;\n // data = {};\n $.get('/getUserInfo', function (data) {\n\n console.log('getTableData',data)\n if (data) {\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>编号</th><th>用户名</th><th>职位</th><th>IP地址</th><th>备注</th><th>操作</th></tr></thead><tbody>';\n $.each(data, function (index, valued) {\n\n var userid = valued.id;\n var username = valued.username;\n var position = valued.position;\n var ipaddr = valued.position;\n var remark = valued.remark\n\n str += '<tr class=\"js-items-data\" data-id=\"' + userid + '\" data-username=\"' + valued.username + '\" data-position=\"' + valued.position + '\" data-addr=\"' + valued.ipaddr + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ userid + ' </td>\\\n <td>'+ username + ' </td>\\\n <td>'+ position + ' </td>\\\n <td>'+ ipaddr + ' </td>\\\n <td class=\"js-items-remark\">'+ remark + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"edit\" data-target=\"#exampleModal\" id=\"modify\" data-toggle=\"modal\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n\n $('#bodyList').html(str)\n\n }\n }, 'json')\n\n },\n //$.get方法,调用表格魔板\n getTableData2: function () {\n var _this = this;\n var data = {};\n\n $.get('/getUserInfo', function (response,status) {\n console.log(status);\n if(status == 'success') {\n\n console.log(response[0]);\n _this.tableTpl(response);\n\n }\n }, 'json')\n },\n\n //显示数据的模板表格\n\n tableTpl: function (data) {\n var _this = this;\n //console.log(data);\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>编号</th><th>用户名</th><th>职位</th><th>IP地址</th><th>备注</th><th>操作</th></tr></thead><tbody>';\n // data = JSON.parse(data);\n $.each(data, function (index, valued) {\n var userid = valued.id;\n var username = valued.username;\n var position = valued.position;\n var ipaddr = valued.ipaddr;\n var remark = valued.remark\n\n str += '<tr class=\"js-items-data\" data-id=\"' + userid + '\" data-username=\"' + valued.username + '\" data-position=\"' + valued.position + '\" data-addr=\"' + valued.ipaddr + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ userid + ' </td>\\\n <td>'+ username + ' </td>\\\n <td>'+ position + ' </td>\\\n <td>'+ ipaddr + ' </td>\\\n <td class=\"js-items-remark\">'+ remark + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"modify\" data-target=\"#exampleModal\" data-toggle=\"modal\" userid=\"'+ userid +'\" username=\" '+ username +'\" position=\"'+ position +'\" ipaddr=\"'+ ipaddr+'\" remark=\"'+ remark +'\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\" userid=\"' + userid+ '\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n\n $('#bodyList').html(str)\n\n },\n\n //模态框展示判断\n modalShowJudge:function() {\n var _this = this;\n\n $('#exampleModal').on('show.bs.modal',function(event){\n var button = $(event.relatedTarget),\n modal = $(this),\n actionType = button.data('for');\n if(actionType == 'add') {\n _this.addFun(modal);\n }else if(actionType == 'delete'){\n _this.deleteUserInfo(modal,button);\n }else if(actionType == 'modify'){\n // console.log('modify');\n _this.modifyFun(modal,button);\n }\n\n })\n\n },\n\n //添加信息的函数\n addFun:function(modal){\n\n addTpl = '';\n addTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\">\\\n </div>';\n\n $('#exampleModal').find('form').html(addTpl); //展示add的模态框\n\n var submitbtn = modal.find('#submitbtn');\n submitbtn.off('click').on('click',function(e){\n var params = {};\n\n params.id = $('#num').val();\n params.username = $('#username').val().trim();\n params.position = $('#position').val();\n params.ipaddr = $('#ipaddr').val();\n params.remark = $('#remark').val();\n\n $.post('/addUserInfo',params,function(res){\n location.reload();\n modal.modal('hide');\n })\n })\n\n\n\n },\n\n // 搜索\n searchFun:function(){\n var _this = this;\n\n //第一步,根据搜索关键字先去请求\n //1.获取关键字,定义一个点击事件\n //2.点击搜索按钮,请求接口\n\n //第二步,把请求到的结果展示出来\n // console.log('searchFun')\n\n \n\n $('#search_btn').on('click',function(e){\n \n var params = {};\n params.username = $('#search_input').val().trim()\n \n \n // console.log(params.username)\n $.get('/search',params,function(response){\n \n \n // console.log(response)\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>编号</th><th>用户名</th><th>职位</th><th>IP地址</th><th>备注</th><th>操作</th></tr></thead><tbody>';\n // data = JSON.parse(data);\n $.each(response, function (index, valued) {\n var userid = valued.id;\n var username = valued.username;\n var position = valued.position;\n var ipaddr = valued.ipaddr;\n var remark = valued.remark\n\n str += '<tr class=\"js-items-data\" data-id=\"' + userid + '\" data-username=\"' + valued.username + '\" data-position=\"' + valued.position + '\" data-addr=\"' + valued.ipaddr + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ userid + ' </td>\\\n <td>'+ username + ' </td>\\\n <td>'+ position + ' </td>\\\n <td>'+ ipaddr + ' </td>\\\n <td class=\"js-items-remark\">'+ remark + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"modify\" data-target=\"#exampleModal\" data-toggle=\"modal\" userid=\"'+ userid +'\" username=\" '+ username +'\" position=\"'+ position +'\" ipaddr=\"'+ ipaddr+'\" remark=\"'+ remark +'\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\" userid=\"' + userid+ '\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n \n $('#bodyList').html(str)\n \n\n },'json')\n })\n \n \n\n },\n \n modifyFun:function(modal,button){\n var _this = this;\n\n modal.find('#exampleModalLabel').text('修改信息');\n var addTpl = '';\n var infos = button[0].attributes;\n\n addTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\" readOnly=\"true\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\" value=\"'+ infos.username.nodeValue +'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\" value=\"'+ infos.position.nodeValue +'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\" value=\"'+ infos.ipaddr.nodeValue +'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\" value=\"'+ infos.remark.nodeValue +'\">\\\n </div>';\n\n $('#exampleModal').find('form').html(addTpl);\n \n var submitbtn = modal.find('#submitbtn');\n\n submitbtn.off('click').on('click',function(e){\n var params = {};\n params.id = infos.userid.nodeValue;\n params.username = $('#username').val().trim();\n params.position = $('#position').val().trim();\n params.ipaddr = $('#ipaddr').val().trim();\n params.remark = $('#remark').val().trim();\n\n\n $.post('/edit_update',params,function(res){\n location.reload();\n modal.modal('hide');\n })\n })\n\n },\n\n deleteUserInfo:function(modal,button){\n modal.find('#exampleModalLabel').text('删除主机');\n $('#exampleModal').find('form').html('确定要删除吗?');\n var infos = button[0].attributes\n console.log(infos)\n var submitbtn = modal.find('#submitbtn');\n\n submitbtn.off('click').on('click',function(e){\n var params = {};\n params.id = infos.userid.nodeValue;\n typeof(params.id)\n $.post('/delete',params,function(res){\n location.reload();\n modal.modal('hide');\n })\n })\n\n },\n\n init: function () {\n var _this = this;\n _this.getTableData2(); //调用显示表格数据的函数\n _this.modalShowJudge();\n _this.searchFun();\n\n }\n };\n monitor.init();\n});" }, { "alpha_fraction": 0.6112957000732422, "alphanum_fraction": 0.6146179437637329, "avg_line_length": 24.08333396911621, "blob_id": "6f62e06e95b575019dc79d88bf4c4f0aff6f4140", "content_id": "5da2f2bbb7f318b8d7d704954d939da1d0712030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/uplooking_Python/code/lesson08-flask/ops/servicetree/models/tree_host_relation.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom libs.db import db\n\nclass TagsHostRelation(object):\n TABLE = \"map_device\"\n\n @classmethod\n def bind(cls, tagId, hostId):\n sql = \"insert into %s (tree_id,device_id) values(%%s, %%s)\" % (cls.TABLE)\n lastId = db.insert(sql, tagId, hostId)\n\n return lastId\n" }, { "alpha_fraction": 0.7400530576705933, "alphanum_fraction": 0.7586206793785095, "avg_line_length": 31.782608032226562, "blob_id": "765834d82f61a0a88c92d7df9d3b16031f77bf39", "content_id": "ea0cfbc72d2b2d4ed88d7c057d49995dbc66589b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 57, "num_lines": 23, "path": "/Django/mysite/account/models.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass UserProfile(models.Model):\n\tuser = models.OneToOneField(User,unique=True)\n\tbirth = models.DateField(blank=True,null=True)\n\tphone = models.CharField(max_length=20,null=True)\n\n\tdef __str__(self):\n\t\treturn 'user {}'.form(self.user.username)\n\n#个人信息的数据模型类\nclass UserInfo(models.Model):\n\tuser = models.OneToOneField(User,unique=True)\n\tschool = models.CharField(max_length=100,blank=True)\n\tcompany = models.CharField(max_length=100,blank=True)\n\tprofession = models.CharField(max_length=100,blank=True)\n\taddress = models.CharField(max_length=100,blank=True)\n\taboutme = models.TextField(blank=True) #在前端页面中允许用户不填写\n\n\n\tdef __str__(self):\n\t\treturn \"user:{}\".format(self.user.username)\n" }, { "alpha_fraction": 0.5522788166999817, "alphanum_fraction": 0.6085790991783142, "avg_line_length": 30.04166603088379, "blob_id": "b71ce5970901bff6e6b14e03f27264b29308c4e3", "content_id": "5012ae043a52dd89c27ac107c0f4a481a5fcd0d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 100, "num_lines": 24, "path": "/uplooking_Python/code/lesson08-flask/req.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport requests\n\ndef httplib(url):\n payload = {\"hostname\": \"tj1-nginx01.kscn\"}\n\n #payload = {\"hostnames\": \"www.baidu.com\"}\n ## params 传参是通过url传参:http://127.0.0.1:8888/hosts/add?hostnames='www.baidu.com'\n #r = requests.post(url, params=payload)\n #print r.status_code, r.text\n\n #payload = {\"ip\": \"127.0.0.1\", \"system\": \"centos\"}\n # r1 = requests.post(url, data=payload)\n # print r1.status_code, r1.text\n\n ##payload = {\"hostname\":\"cn-beijing-01.kscn\", \"type\":\"vm\", \"ip\":\"1.1.1.1\", \"location\":\"beijing\"}\n r2= requests.post(url, json=payload)\n print r2.status_code, r2.text\n\n\nif __name__ == \"__main__\":\n url = \"http://127.0.0.1:8888/hosts/test/params\"\n httplib(url)\n\n" }, { "alpha_fraction": 0.7349397540092468, "alphanum_fraction": 0.7349397540092468, "avg_line_length": 26.66666603088379, "blob_id": "2967de04ed9383a84d9102c6ffb1aec3ce3722b2", "content_id": "154101de050a16d68e0058d5b02882ba3766fe38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/uplooking_Python/code/lesson08-flask/ops/domains/views/domain.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import render_template\nfrom domains import blue_print\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"domain.html\")\n" }, { "alpha_fraction": 0.6297872066497803, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 17.076923370361328, "blob_id": "65858c7aea42b71bb6fbbf8d1208883c0ee32cd1", "content_id": "20ea701b90af4e39ccc030238cc7e4265a1ab4d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 148, "num_lines": 26, "path": "/uplooking_Python/code/lesson06/create_db.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\n#创建数据库和表\n\nimport MySQLdb\n\nconn = MySQLdb.connect(\n host='127.0.0.1',\n port=3306,\n user='root',\n passwd=\"123456\",\n db='python_test',\n connect_timeout=10,\n charset='utf8'\n\n\n)\n\nconn.autocommit(True)\n\ncur = conn.cursor()\n\n#cur.execute(\"create databases test\")\ncur.execute(\"use python_test; create table user_ip_info (id int,username varchar(10),position varchar(20),ipaddr varchar(100),remark varchar(100))\")\ncur.close()\nconn.commit()\n" }, { "alpha_fraction": 0.4559471309185028, "alphanum_fraction": 0.4647577106952667, "avg_line_length": 26.515151977539062, "blob_id": "0a3ce74c346a92e5d4c5dca30af5d1e357a38b5c", "content_id": "76d8f4c04d05564ecd5461fc108a804e5bbf7499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 45, "num_lines": 33, "path": "/uplooking_Python/code/lesson04/test4.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#encoding:utf8\n\nmy_dict = {'frg':'123'} #为了测试\ncount = 0\n\nwhile True:\n choice = raw_input('注册R 登陆L 退出Q: ')\n\n if choice.lower() == 'r':\n username = raw_input('请输入用户名:')\n if username in my_dict:\n print \"用户已经存在!\"\n if username not in my_dict:\n password = raw_input('请输入密码:')\n my_dict[username] = password\n print \"注册成功\"\n print my_dict\n if choice.lower() == 'l':\n username = raw_input('请输入用户名:')\n if username in my_dict:\n password = raw_input('请输入密码:')\n if password == my_dict[username]:\n print \"登陆成功\"\n else:\n count = count + 1\n print \"密码错误 %s 次\" % count\n if count > 3:\n print \"错误超过3次,帐号被锁定\"\n else:\n print \"用户名不存在,请注册\"\n if choice.lower() == 'q':\n print (\"退出\")\n break\n" }, { "alpha_fraction": 0.4001106917858124, "alphanum_fraction": 0.5019369125366211, "avg_line_length": 23.75342559814453, "blob_id": "04cdba8cc46a94c35ba989ae7eb31a9c7cefcd77", "content_id": "be140940a90ecdf6ba0111dc9df5636e7ab382d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1825, "license_type": "no_license", "max_line_length": 71, "num_lines": 73, "path": "/uplooking_Python/code/lesson05/montorDomains/conf/config.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 日志路径\napp_log_path = \"log/app.log\"\n\n# 监控的域名\nexprie_domains = [\n 'bivyy.cn',\n 'mi-idc.commiui.com',\n 'miliao.com',\n 'tjqonline.cn',\n 'xiaomi.tw',\n 'hada.me',\n 'wlimg.cn',\n 'aleenote.com',\n 'alinotes.cn',\n 'x9m.cn',\n 'midoujiang.com',\n 'duokan.com',\n 'mi-ae.cn',\n 'mi-ae.net',\n 'zhimi.com',\n 'mizhuanqian.com',\n 'miot-spec.org',\n 'google.com.sg',\n 'google.com.hk',\n 'google.fr'\n\n ]\n\n'''\nRegistration Time: 2017-11-10 09:48:36\nExpiration Time: 2018-11-10 09:48:36\n\n\nCreation Date: 2004-04-29T15:50:45Z\nRegistry Expiry Date: 2020-04-29T15:50:45Z\n\nRecord expires on 2018-09-13 (YYYY-MM-DD)\nRecord created on 2011-09-13 (YYYY-MM-DD)\n\nCreation Date:\t\t05-Jul-2002 17:42:32\nModified Date:\t\t03-Jun-2017 17:20:53\n\nDomain Name Commencement Date: 14-07-2001\nExpiry Date: 20-11-2018\n\nExpiry Date: 30/12/2018\ncreated: 27/07/2000\n\n\n'''\n\n# register reg\nregister_reg = [\n \"Registration Time:\\s(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2})\",\n \"Creation Date:\\s(\\d{4}-\\d{2}-\\d{2}.\\d{2}:\\d{2}:\\d{2})\",\n \"Record created on\\s(\\d{4}-\\d{2}-\\d{2})\",\n \"Domain Name Commencement Date:\\s(\\d{2}-\\d{2}-\\d{4})\",\n \"Creation Date:\\s+(\\d{2}-\\w{3}-\\d{4}\\s\\d{2}:\\d{2}:\\d{2})\",\n \"created:\\s+(\\d{2}/\\d{2}/\\d{4})\"\n ]\n\n# expire reg \nexpire_reg = [\n \"Expiration Time:\\s(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2})\",\n \"Registry Expiry Date:\\s(\\d{4}-\\d{2}-\\d{2}.\\d{2}:\\d{2}:\\d{2})\",\n \"Record created on\\s(\\d{4}-\\d{2}-\\d{2})\",\n \"Expiry Date:\\s(\\d{2}-\\d{2}-\\d{4})\",\n \"Expiration Date:\\s+(\\d{2}-\\w{3}-\\d{4}\\s\\d{2}:\\d{2}:\\d{2})\",\n \"Expiry Date:\\s+(\\d{2}/\\d{2}/\\d{4})\"\n\n ]\n" }, { "alpha_fraction": 0.5879458785057068, "alphanum_fraction": 0.5916359424591064, "avg_line_length": 28, "blob_id": "f563cc91b5beeba33e03bf4db1fd23b86828b385", "content_id": "9a6f4284f9227568f0ba68cf8a457f5b89d5bda6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 81, "num_lines": 28, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/models/tree_host_relation.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom libs.db import db\n\nclass TagsHostRelation(object):\n TABLE = \"map_device\"\n\n @classmethod\n def bind(cls, tagId, hostId):\n sql = \"insert into %s (tree_id,device_id) values(%%s, %%s)\" % (cls.TABLE)\n lastId = db.insert(sql, tagId, hostId)\n\n return lastId\n\n @classmethod\n def getHostsIds(cls, tagId):\n sql = \"select device_id from %s where tree_id=%%s\"%(cls.TABLE)\n ids = db.query_all(sql, tagId)\n return tuple([i[0] for i in ids])\n\n @classmethod\n def getTreeIdByDeviceId(cls, hostId):\n sql = \"select tree_id from %s where device_id=%%s\"%cls.TABLE\n result = db.query_all(sql, hostId)\n treeIds = []\n for tree_id in result:\n treeIds.append(tree_id[0])\n print treeIds\n return treeIds\n\n" }, { "alpha_fraction": 0.5461254715919495, "alphanum_fraction": 0.5645756721496582, "avg_line_length": 17, "blob_id": "b260f0f8110d0aebc70cf872b9556537db24ecb6", "content_id": "d4a2ba99f804f3c7a49a69f6d23325c9bc33d9b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/uplooking_Python/code/lesson05/myapps/monitor/utils/whois_commands.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nimport commands\n\ndef sys_call(cmd):\n\n status,output = commands.getstatusoutput(cmd)\n if status == 0 or status == 256:\n if output == \"No match for\":\n return None\n\n else:\n return output\n else:\n return None\n\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.800000011920929, "avg_line_length": 19, "blob_id": "2f8c9291a847b3c4363b1d1833538283052f1dd1", "content_id": "fd72be24516e6833ca022868856ba66b73314030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# 20170508\nPython code commit by wolfrg\n" }, { "alpha_fraction": 0.34303030371665955, "alphanum_fraction": 0.34303030371665955, "avg_line_length": 42.403507232666016, "blob_id": "b797444a51561b0cac2ed1fb0d9997b41c457799", "content_id": "0f536fa49d8fba839bbeb4ee9cf0b969ddb15399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2587, "license_type": "no_license", "max_line_length": 221, "num_lines": 57, "path": "/uplooking_Python/code/flask_myself/static/js/whost.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "// 展示内网服务器的信息列表\n\n // $.get方法,表格模板写在function内部\n $(function(){\n\n var n_host = {\n\n getTableData: function () {\n var _this = this;\n // data = {};\n $.get('/get_wHostInfo', function (data) {\n \n // console.log('getTableData',data)\n if (data) {\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>服务器SN编号</th><th>服务器型号</th><th>服务器配置</th><th>外网地址</th><th>内网地址</th><th>机房位置</th><th>操作</th></tr></thead><tbody>';\n $.each(data, function (index, valued) {\n \n var sn = valued.sn_number;\n var xh = valued.host_modal;\n var peizhi = valued.peizhi;\n var wanip = valued.wan_ip;\n var lanip = valued.lan_ip;\n var h_location = valued.host_location;\n \n str += '<tr class=\"js-items-data\" data-id=\"' + sn + '\" data-username=\"' + xh + '\" data-position=\"' + peizhi + '\" data-addr=\"' + wanip + '\" data-addr=\"' + lanip + '\" data-addr=\"' + h_location + '\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ sn + ' </td>\\\n <td>'+ xh + ' </td>\\\n <td>'+ peizhi + ' </td>\\\n <td>'+ wanip + ' </td>\\\n <td>'+ lanip + ' </td>\\\n <td>'+ h_location + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"edit\" data-target=\"#exampleModal\" id=\"modify\" data-toggle=\"modal\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"delete\" data-toggle=\"modal\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n \n $('#bodyList').html(str)\n \n }\n }, 'json')\n \n },\n \n init:function(){\n var _this = this;\n _this.getTableData();\n }\n };\n\n n_host.init();\n\n })\n " }, { "alpha_fraction": 0.6819571852684021, "alphanum_fraction": 0.6819571852684021, "avg_line_length": 27.34782600402832, "blob_id": "d39f1b342809f5cefab88cc08a809ebca08f2e87", "content_id": "0aa0a662dab18a5871ad36e94b5b6eb08036b471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 48, "num_lines": 23, "path": "/uplooking_Python/code/lesson08-flask/ops/servicetree/views/tree.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from servicetree import blue_print\nfrom flask import render_template\nfrom flask import request\nfrom servicetree.controller import *\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"serviceTree.html\")\n\n@blue_print.route(\"/all\", methods=[\"GET\"])\ndef get_trees():\n trees = Tree.get_all()\n return trees\n\n@blue_print.route(\"/node/add\", methods=[\"POST\"])\ndef add_node():\n pid = request.form.get(\"pid\")\n nodeName = request.form.get(\"nodeName\")\n cname = request.form.get(\"cname\")\n node_type = request.form.get(\"nodeType\")\n # print node_type\n Tree.add_node(pid, nodeName, cname)\n return \"sucess\"\n\n\n" }, { "alpha_fraction": 0.403199166059494, "alphanum_fraction": 0.403611958026886, "avg_line_length": 33.97111892700195, "blob_id": "d7235f302b0b51e84658355370bb1ddf5bd1b52e", "content_id": "aca33934448749eb5e7b86c02faa1de9e6d56fdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10282, "license_type": "no_license", "max_line_length": 190, "num_lines": 277, "path": "/uplooking_Python/code/flask_myself/static/js/index.1.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function(){\n var monitor = {\n\n menuClick:function() {\n // var _this = this;\n\n $('.main-menu').off('click').on('click','li',function(e){\n var thisBtn = $(this);\n thisBtn.parent().find('li').removeClass('active');\n thisBtn.addClass('active');\n })\n },\n\n //index html right table\n showInfo:function() {\n $.ajax({\n type: \"GET\",\n url: '/getUserInfo',\n data: 'json',\n success: function (json) {\n var str = '';\n str += '<thead><tr><th><input type=\"checkbox\"></th><th>编号</th><th>用户名</th><th>职位</th><th>IP地址</th><th>备注</th><th>操作</th></tr></thead><tbody>';\n json = JSON.parse(json);\n $.each(json, function (index, valued) {\n\n var userid = valued.id\n var username = valued.username\n\n str += '<tr class=\"js-items-data\" data-id=\"'+userid+'\" data-username=\"'+valued.username+'\" data-position=\"'+valued.position+'\" data-addr=\"'+valued.ipaddr+'\">\\\n <td><input type=\"checkbox\"></td>\\\n <td>'+ userid + ' </td>\\\n <td>'+ username + ' </td>\\\n <td>'+ valued.position + ' </td>\\\n <td>'+ valued.ipaddr + ' </td>\\\n <td class=\"js-items-remark\">'+ valued.remark + ' </td>\\\n <td>\\\n <button class=\"btn btn-xs btn-info\" data-for=\"edit\" data-target=\"#exampleModal\" id=\"myedit\" data-toggle=\"modal\">编辑</button>\\\n <button class=\"btn btn-xs btn-danger\" data-for=\"delete\" data-target=\"#exampleModal\" id=\"myDelete\" data-toggle=\"modal\">删除</button>\\\n </td>\\\n </tr>';\n })\n str += '</tbody>';\n \n $('#tbody').html(str)\n\n },\n\n }\n\n\n );\n },\n\n \n\n // modal Show Judge\n taskActionPage: function(){\n\n var _this = this;\n\n $('#exampleModal').on('show.bs.modal', function (event) {\n var button = $(event.relatedTarget),\n actionType = button.data('for'),\n modal = $(_this);\n\n // var id=button.attr(\"v\");\n\n if(actionType == 'add'){\n $('#exampleModalLabel').text('添加员工信息');\n _this.addTaskItem();\n\t\t\t\t\t_this.commitAdd(); //***** 在这里判断是添加的话,就调添加的提交请求\n\n }else if(actionType == 'edit'){\n $('#exampleModalLabel').text('编辑任务');\n _this.editTaskItem(button); // ***** 把button传进去,不然那不知道是哪一条数据上点击的编辑按钮\n\t\t\t\t\t_this.commitEdit(); //***** 在这里判断是编辑的话,就调编辑的提交请求\n }else if(actionType == 'delete'){\n _this.deleteFun(modal,button);\n }\n\n })\n },\n\n // 添加: 编号 用户名 职位 IP地址 备注 function\n addTaskItem: function(){\n var _this = this,\n formTpl = '';\n formTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\">\\\n </div>';\n\n $('#exampleModal').find('form').html(formTpl);\n\n\n },\n\n //提交添加的用户信息\n addInfo:function () {\n\n // get input value\n var id = $('#num').val();\n var username = $('#username').val();\n var position = $('#position').val();\n var ipaddr = $('#ipaddr').val();\n var remark = $('#remark').val();\n\n var data = {};\n data.id = id;\n data.username = username;\n data.position = position;\n data.ipaddr = ipaddr;\n data.remark = remark;\n\n\n $.ajax({\n \n type:'POST',\n url:'/addUserInfo',\n data:data,\n success:function(response) {\n // console.log(data);\n if(response == 1) {\n location.reload();\n // alert('插入成功!');\n // $('#exampleModal').modal('hide');\n $('#exampleModal').hide();\n }\n \n \n },\n\n dataType:\"json\"\n\n \n });\n\n },\n\n\n\n // click edit button show edit view\n editTaskItem:function(button){\n\n // var data = {\n // 'id':'1',\n // 'username':'fengruigang'\n // };\n\n //***** 根据button找到父级上存的data-id, data-username, data-ipaddr, data-position的数据\n\t\t\tvar thisParent = $(button).parents('.js-items-data');\n\t\t\tvar thisId = thisParent.data('id');\n\t\t\tvar thisUername = thisParent.data('username');\n\t\t\tvar thisPos = thisParent.data('position');\n\t\t\tvar thisIp = thisParent.data('addr');\n\n\t\t\t//***** 以上是一种方式,存在父tr元素中,用以上的方式得到\n\t\t\t//***** 但是我们发现父元素tr中我没存remark的data-remark数据,这是我们的另外一种实现的方式\n\t\t\t//***** 现在我写一下remark的数据怎么获取,我们给remark这项一个class,我们已经得到了这一项的button,根据这个button找该class就行,即先找button的父,在由父向下找到这个remark的class\n\t\t\t//***** 然后得到remark中的值就好了\n\t\t\tvar thisRemark = thisParent.find('.js-items-remark').text();\n\n\t\t\t// 然后将这些值放入下面的html的对应val中\n\n var _this = this,\n formTpl = '';\n formTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\" value=\"'+thisId+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\" value=\"'+thisUername+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\" value=\"'+thisPos+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\" value=\"'+thisIp+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\" value=\"'+thisRemark+'\">\\\n </div>';\n\n $('#exampleModal').find('form').html(formTpl);\n \n \n\n\n\n },\n\n \n\n deleteFun:function(modal,button){\n\n modal.find('#exampleModalLabel').text('delete userInfo');\n $('#exampleModal').find('form').html('确定删除吗?');\n var infos = button[0].attributes\n var submitbtn = modal.find('#submitbtn');\n // alert(infos)\n submitbtn.on('click',function(e){\n var thisParent = $(button).parents('.js-items-data');\n var thisId = thisParent.data('id');\n var params = {};\n params.id = thisId;\n alert(params.id)\n\n $.post('/delete',params,function(res){\n location.reload();\n if(res.code == 1) {\n location.reload();\n }\n modal.modal('hide');\n\n });\n })\n\n\n\n },\n\n commitAdd:function() {\n var _this = this;\n $('#submitbtn').on('click.add',function(e){\n // alert('commitAdd function')\n _this.addInfo();\n \n });\n\n },\n\n // commitEdit:function() {\n \n // var _this = this;\n // $('#submitbtn').on('click.edit',function(e){\n // // alert('commitEdit function')\n // _this.editInfo();\n // });\n\n // },\n\n\n\n \n init:function(){\n var _this = this;\n _this.menuClick();\n _this.taskActionPage();\n \n // public_func.treeList();\n _this.showInfo();\n \n }\n };\n\n monitor.init()\n})\n\n\n\n" }, { "alpha_fraction": 0.48103246092796326, "alphanum_fraction": 0.4994133710861206, "avg_line_length": 28.83132553100586, "blob_id": "ca9c3d55a562f58f594182a24c5dfd64b04b738a", "content_id": "577147b03b16692b7f0a76e40775c5faca8af977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2619, "license_type": "no_license", "max_line_length": 91, "num_lines": 83, "path": "/0901/webCopy.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月1日\r\n\r\n@author: Ops\r\n'''\r\n\r\nimport socket, os, sys, json, platform\r\n\r\nsk = socket.socket()\r\nsk.bind((\"127.0.0.1\", 8080))\r\nsk.listen(50)\r\np=os.getcwd()\r\nprint('http://localhost:8080/')\r\n\r\n\r\ndef plw( p ):\r\n print (\"函数内取值: \", p)\r\n if 'Linux' in platform.system():\r\n return p.replace(\"\\\\\", \"/\")\r\n elif 'Windows' in platform.system():\r\n return p.replace(\"/\", \"\\\\\")\r\n\r\ndef cpfile( p,filename ):\r\n cpdir='src'\r\n if filename[0]!='.' and os.path.isdir(p+'/'+filename):\r\n os.chdir(p+'/'+filename)\r\n print(os.getcwd())\r\n if os.path.isfile(p+'/'+filename+'/package.json'):\r\n fo = open(p+'/'+filename+'/package.json', \"r\", encoding='UTF-8')\r\n line = fo.read(-1)\r\n fo.close()\r\n try:\r\n text = json.loads(line)\r\n except ValueError:\r\n print (\"json 失败\")\r\n else:\r\n print (\"json 成功\")\r\n if text.get(\"scripts\",()).get(\"build\"):\r\n cpdir='dist'\r\n print (text.get(\"scripts\",()).get(\"build\"))\r\n #os.system('npm install')\r\n #os.system('npm run build')\r\n if os.path.isdir(plw(p+'/'+filename+'/'+cpdir)):\r\n com='xcopy /y /E '+plw(p+'/'+filename+'/'+cpdir)+' '+plw(p+'/../www/'+filename)\r\n print(com)\r\n os.system('mkdir '+plw(p+'/../www/'+filename))\r\n os.system(com)\r\n\r\n\r\nwhile True:\r\n conn, addr = sk.accept()\r\n accept_data = str(conn.recv(1024),encoding=\"utf8\")\r\n lines = accept_data.split('\\n')[0]\r\n print(lines)\r\n if len(accept_data)<3:\r\n continue;\r\n filename = accept_data.split()[1]\r\n print(filename);\r\n #print(\"\".join([\"接收内容:\", accept_data, \" 客户端口:\", str(addr[1])]))\r\n send_data='HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n';\r\n conn.send(bytes(send_data, encoding=\"utf8\"))\r\n data=filename\r\n filenameOld=filename\r\n \r\n\r\n\r\n\r\n #if os.path.isfile(p+'/'+filename): \r\n # fo = open(p+'/'+filename, \"r\", encoding='UTF-8')\r\n # data = fo.read(-1)\r\n # fo.close()\r\n eddir=os.listdir(p)\r\n for filename in eddir:\r\n if(filenameOld[1:]==filename or filenameOld[1:]=='_all'):\r\n cpfile( p,filename )\r\n\r\n data=''\r\n for filename in eddir:\r\n if filename[0]!='.' and os.path.isdir(p+'/'+filename):\r\n data+='<a href=\"'+filename+'\">'+filename+'</a> <br>'\r\n\r\n conn.send(bytes(data, encoding=\"utf8\"))\r\n conn.close() # 跳出循环时结束通讯" }, { "alpha_fraction": 0.4854276776313782, "alphanum_fraction": 0.4893588125705719, "avg_line_length": 28.23770523071289, "blob_id": "40d93baa64d04b39858a7bef946128917d7b03b9", "content_id": "73c4f0a69fd8c812f90fb1f7f693dda8901cf676", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 15300, "license_type": "no_license", "max_line_length": 149, "num_lines": 488, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/popup/ClipPageControl.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/* global PopupView: false ztreeControl:false */\r\n\r\n\r\nfunction ClipPageControl() {\r\n 'use strict';\r\n var saveType = localStorage[Wiz.Constant.Default.SAVE_TYPE],\r\n isNative = (saveType && saveType === 'save_to_native') ? true : false,\r\n _hasNative = null;\r\n\r\n function initClipPageListener() {\r\n PopupView.hideCreateDiv();\r\n $('body').bind('keyup', keyDownHandler);\r\n $('#submit-type').bind('change', changeSubmitTypehandler);\r\n $('#note_submit').click(noteSubmit);\r\n $('#refresh_category_btn').click(requestCategoryForce);\r\n $('#wiz_clip_detail').show(initClipPageInfo);\r\n $('#comment-info').bind('keyup',function() {\r\n var obj = $(this);\r\n if (obj.val().length > 0) {\r\n obj.addClass('active');\r\n } else {\r\n obj.removeClass('active');\r\n }\r\n\r\n });\r\n initNativeDiv();\r\n }\r\n\r\n function initNativeDiv() {\r\n var isWin = isWinPlatform();\r\n if (isWin) {\r\n initSaveType();\r\n } else {\r\n $('#save_type_sel').hide();\r\n $('#native').remove();\r\n }\r\n }\r\n\r\n function initSaveType() {\r\n $('#save_type_sel').bind('change', changeSaveTypehandler);\r\n\r\n if (isNative) {\r\n $('#save_to_native').parent().trigger('click');\r\n }\r\n\r\n }\r\n\r\n /**\r\n * 保存到本地监听事件\r\n * @param {[type]} evt [description]\r\n */\r\n function changeSaveTypehandler() {\r\n var type = angular.element(document.getElementById('save_type_sel')).scope().saveType;\r\n if ('save_to_native' === type && !checkNativeStatus()) {\r\n return false;\r\n }\r\n setSaveType(type);\r\n }\r\n\r\n function setSaveType(type) {\r\n if (type === 'save_to_native') {\r\n isNative = true;\r\n } else if (type === 'save_to_server') {\r\n isNative = false;\r\n }\r\n localStorage[Wiz.Constant.Default.SAVE_TYPE] = type;\r\n }\r\n\r\n //监听截取信息事件\r\n chrome.extension.onConnect.addListener(messageListener);\r\n\r\n function messageListener(port) {\r\n var name = port.name;\r\n switch (name) {\r\n case 'contentVeilShow':\r\n $('#waiting').hide();\r\n if ($('#wiz_clip_detail').is(':hidden')) {\r\n initClipPageListener();\r\n }\r\n break;\r\n case 'pagePreviewFailure':\r\n exacutePreviewFailure();\r\n break;\r\n }\r\n }\r\n\r\n function requestPreview() {\r\n var port = chrome.runtime.connect({\r\n name: 'preview'\r\n });\r\n port.postMessage('article');\r\n }\r\n\r\n function exacutePreviewFailure() {\r\n chrome.windows.getCurrent(function (win) {\r\n chrome.tabs.query({ active: true, windowId: win.id }, function (tabs) {\r\n var tab = tabs[0];\r\n if (tab && tab.status === 'complete') {\r\n //页面资源已经加载完成,未有preview返回,则提示无法剪辑\r\n var pageClipFailure = chrome.i18n.getMessage('pageClipFailure');\r\n PopupView.showClipFailure(pageClipFailure);\r\n } else {\r\n //页面加载中,继续执行请求\r\n setTimeout(requestPreview, 1000);\r\n }\r\n });\r\n });\r\n }\r\n\r\n /**\r\n * 是否windows系统\r\n * @return {Boolean} [description]\r\n */\r\n function isWinPlatform() {\r\n var platform = window.navigator.platform,\r\n isMac = (platform.toLowerCase().indexOf('mac') === 0),//(platform === \"Mac68K\") || (platform === \"MacPPC\") || (platform === \"Macintosh\");\r\n isLinux = (platform.toLowerCase().indexOf('linux') === 0);\r\n return !(isMac || isLinux);\r\n }\r\n\r\n\r\n\r\n /**\r\n *修改保存的类型\r\n */\r\n\r\n function changeSubmitTypehandler(evt) {\r\n var cmd = angular.element(document.getElementById('submit-type')).scope().submitType,\r\n portName = ('native' === cmd) ? 'save-native' : 'preview',\r\n port = chrome.runtime.connect({\r\n name: portName\r\n });\r\n if ('native' === cmd) {\r\n if (!checkNativeStatus()) {\r\n evt.preventDefault();\r\n return ;\r\n }\r\n noteSubmit();\r\n } else {\r\n port.postMessage(cmd);\r\n //改变页面显示\r\n PopupView.changeSubmitDisplayByType();\r\n }\r\n }\r\n\r\n\r\n function initSubmitGroup(clipPageResponse) {\r\n var submitType = $('#submit-type');\r\n var clipArticle = clipPageResponse.article,\r\n clipSelection = clipPageResponse.selection;\r\n if (clipSelection === true) {\r\n $('#selection').parent().trigger('click');\r\n //submitType[0].options[1].selected = true;\r\n } else if (clipArticle === true) {\r\n $('#article').parent().trigger('click');\r\n //submitType[0].options[0].selected = true;\r\n } else {\r\n $('#fullPage').parent().trigger('click');\r\n //submitType[0].options[2].selected = true;\r\n }\r\n\r\n //用户没有选择时,禁止选择该'保存选择'\r\n if (clipSelection === false) {\r\n $('li[data-value=selection]', submitType).addClass('disabled');\r\n //submitType.find('#selection').attr('disabled', '');\r\n }\r\n\r\n //用户有选择或者不可以智能提取时,禁止选择'保存文章'\r\n if (clipArticle === false || clipSelection === true) {\r\n $('li[data-value=article]', submitType).addClass('disabled');\r\n //submitType.find('#article').attr('disabled', '');\r\n }\r\n }\r\n\r\n /**\r\n * 加载当前页面的是否能智能截取、是否有选择的信息,并根据该信息显示\r\n */\r\n\r\n function requestPageStatus() {\r\n// console.log('requestPageStatus');\r\n chrome.windows.getCurrent(function (win) {\r\n chrome.tabs.query({ active: true, windowId: win.id }, function (tabs) {\r\n Wiz.Browser.sendRequest(tabs[0].id, {\r\n name: 'getInfo'\r\n }, function (params) {\r\n initSubmitGroup(params);\r\n });\r\n });\r\n });\r\n }\r\n\r\n //初始化剪辑页面信息\r\n function initClipPageInfo() {\r\n initLogoutLink();\r\n requestPageStatus();\r\n requestTitle();\r\n initDefaultCategory();\r\n requestToken();\r\n requestCategory();\r\n requestTag();\r\n }\r\n\r\n\r\n function initLogoutLink() {\r\n var logoutText = chrome.i18n.getMessage('logout');\r\n $('#header_user').show();\r\n $('#logout_control').html(logoutText).bind('click', cmdLogout);\r\n }\r\n\r\n function cmdLogout() {\r\n Wiz.Cookie.removeCookies(Wiz.Constant.Default.COOKIE_URL, Wiz.Constant.Default.COOKIE_CERT, function () {\r\n chrome.runtime.connect({\r\n name: 'logout'\r\n });\r\n });\r\n localStorage.clear();\r\n window.close();\r\n }\r\n\r\n /**\r\n *加载标题\r\n */\r\n\r\n function requestTitle() {\r\n chrome.windows.getCurrent(function (win) {\r\n chrome.tabs.query({ active: true, windowId: win.id }, function (tabs) {\r\n var title = tabs[0].title;\r\n if (!title) {\r\n return;\r\n }\r\n setTitle(title);\r\n });\r\n });\r\n }\r\n\r\n function setTitle(title) {\r\n $('#wiz_note_title').val(title);\r\n }\r\n\r\n /**\r\n * 加载并显示默认文件夹---上次选择的文件夹\r\n */\r\n\r\n function initDefaultCategory() {\r\n var lastCategory = localStorage[Wiz.Constant.Default.COOKIE_LAST_CATEGORY];\r\n var categoryInfo = $('#category_info');\r\n if (lastCategory) {\r\n var array = lastCategory.split('*'),\r\n displayName = array[0],\r\n location = array[1];\r\n categoryInfo.html(displayName).attr('location', location);\r\n }\r\n categoryInfo.bind('click', function() {\r\n PopupView.switchCategoryTreeVisible();\r\n });\r\n //console.log('category_info click');\r\n //categoryInfo.unbind('click');\r\n\r\n }\r\n\r\n /**\r\n *加载中\r\n */\r\n\r\n //function changeCategoryLoadingStatus() {\r\n // var visible = isCategoryLoading();\r\n // if (visible) {\r\n // PopupView.hideCategoryLoading();\r\n // } else {\r\n // var categoryLoadingMsg = chrome.i18n.getMessage('category_loading');\r\n // PopupView.showCategoryLoading(categoryLoadingMsg);\r\n // }\r\n //}\r\n\r\n function isCategoryLoading() {\r\n var visible = $('#category_loading').is(':visible');\r\n return visible;\r\n }\r\n\r\n /**\r\n *对Tag信息进行处理\r\n */\r\n function parseWizTag() {\r\n //var tagString = localStorage[Wiz.Constant.Default.COOKIE_TAG];\r\n //console.log('parseWizTag : ' + tagString);\r\n }\r\n\r\n /**\r\n *对目录信息进行处理\r\n */\r\n function parseWizCategory() {\r\n initZtree();\r\n var visible = isCategoryLoading();\r\n if (visible) {\r\n //用户已经点击展开文件夹树,此时,需要直接显示文件夹树即可\r\n PopupView.showCategoryTreeFromLoading();\r\n }\r\n }\r\n\r\n function initZtree() {\r\n var categoryString = localStorage[Wiz.Constant.Default.COOKIE_CATEGORY];\r\n var ztreeJson = ztreeControl.parseDate(categoryString);\r\n ztreeControl.setNodes(ztreeJson);\r\n ztreeControl.initTree('ztree');\r\n }\r\n\r\n\r\n /**\r\n * 加载 Tag 信息\r\n */\r\n function requestTag() {\r\n var port = chrome.runtime.connect({\r\n name: 'requestTag'\r\n });\r\n port.onMessage.addListener(requestTagHandler);\r\n }\r\n function requestTagHandler(msg) {\r\n if (msg && typeof msg === 'string'){\r\n //console.log(msg);\r\n localStorage[Wiz.Constant.Default.COOKIE_TAG] = msg;\r\n parseWizTag();\r\n }\r\n }\r\n /**\r\n *加载文件夹信息\r\n */\r\n function requestCategory() {\r\n PopupView.showCategoryLoading();\r\n //本地目录信息错误,向后台请求目录信息\r\n var port = chrome.runtime.connect({\r\n name: 'requestCategory'\r\n });\r\n port.onMessage.addListener(requestCategoryHandler);\r\n }\r\n function requestCategoryForce() {\r\n PopupView.showCategoryLoading();\r\n //本地目录信息错误,向后台请求目录信息\r\n var port = chrome.runtime.connect({\r\n name: 'requestCategoryForce'\r\n });\r\n port.onMessage.addListener(requestCategoryHandler);\r\n }\r\n function requestCategoryHandler(msg) {\r\n if (msg && typeof msg === 'string'){\r\n //console.log(msg);\r\n localStorage[Wiz.Constant.Default.COOKIE_CATEGORY] = msg;\r\n parseWizCategory();\r\n }\r\n }\r\n\r\n\r\n function requestToken() {\r\n var port = chrome.runtime.connect({\r\n name: 'requestToken'\r\n });\r\n port.onMessage.addListener(function (token) {\r\n initUserLink(token);\r\n });\r\n }\r\n\r\n\r\n function keyDownHandler(evt) {\r\n var target = evt.target,\r\n skipTypes = ['input', 'select', 'textarea'],\r\n skipIndex;\r\n for (skipIndex = 0; skipIndex < skipTypes.length; skipIndex++) {\r\n //console.log(evt);\r\n if (target.nodeName.toLowerCase() == skipTypes[skipIndex]) {\r\n //console.log(skipTypes[skipIndex]);\r\n return;\r\n }\r\n }\r\n var keycode = evt.keyCode;\r\n if (13 == keycode) {\r\n requestSubmit();\r\n return;\r\n }\r\n var opCmd = getNudgeOp(keycode, evt);\r\n var info = {\r\n direction: opCmd\r\n };\r\n chrome.runtime.connect({\r\n name: 'onkeydown'\r\n }).postMessage(info);\r\n }\r\n\r\n function getNudgeOp(key, evt) {\r\n var returnValue = null,\r\n KEY_ALT = 18,\r\n KEY_CTRL = 17,\r\n keyMap = {\r\n 27: 'cancle',\r\n // up\r\n 38: 'expand',\r\n // down\r\n 40: 'shrink',\r\n // left\r\n 37: 'left',\r\n // right\r\n 39: 'right',\r\n // alt + up\r\n 56: 'topexpand',\r\n // alt + down\r\n 58: 'topshrink',\r\n // ctrl + down\r\n 57: 'bottomexpand',\r\n // ctrl + up\r\n 55: 'bottomshrink'\r\n };\r\n\r\n if (keyMap[key]) {\r\n if (evt && evt.altKey === true) { // 18\r\n returnValue = keyMap[key + KEY_ALT];\r\n } else if (evt && evt.ctrlKey === true) { // 17\r\n returnValue = keyMap[key + KEY_CTRL];\r\n } else {\r\n returnValue = keyMap[key];\r\n }\r\n return returnValue;\r\n }\r\n }\r\n\r\n /**\r\n * 保存文档处理\r\n */\r\n\r\n function noteSubmit() {\r\n requestSubmit();\r\n }\r\n\r\n function requestSubmit() {\r\n var type = angular.element(document.getElementById('submit-type')).scope().submitType,\r\n title = $('#wiz_note_title').val(),\r\n category = $('#category_info').attr('location'),\r\n comment = $('#comment-info').val(),\r\n tag = $('#tag-name').data('tag-name'),\r\n userid = localStorage[Wiz.Constant.Default.COOKIE_USER],\r\n info = {\r\n title: title,\r\n category: category,\r\n comment: comment,\r\n userid : userid,\r\n tag: tag,\r\n isNative : isNative\r\n };\r\n chrome.windows.getCurrent(function (win) {\r\n chrome.tabs.query({ active: true, windowId: win.id }, function (tabs) {\r\n Wiz.Browser.sendRequest(tabs[0].id, {\r\n name: 'preview',\r\n op: 'submit',\r\n info: info,\r\n type: type\r\n }, function () {\r\n window.close();\r\n });\r\n });\r\n });\r\n }\r\n\r\n function initUserLink(token) {\r\n var user_id = localStorage[Wiz.Constant.Default.COOKIE_USER];\r\n $('#login_div').find('.sep').html('|');\r\n $('#header_username').html(user_id).bind('click', function () {\r\n window.open(Wiz.Constant.Default.WEBCLIENT_URL + '?token=' + token);\r\n });\r\n }\r\n\r\n function checkNativeStatus() {\r\n if (!hasNativeClient()) {\r\n var installNotifyMsg = chrome.i18n.getMessage('install_client_notify');\r\n if (window.confirm(installNotifyMsg)) {\r\n window.open(Wiz.Constant.Default.UPDATEClient_URL);\r\n }\r\n }\r\n return hasNativeClient();\r\n }\r\n\r\n function hasNativeClient() {\r\n return _hasNative;\r\n }\r\n\r\n \r\n function setNativeStatus(hasNative) {\r\n _hasNative = hasNative;\r\n }\r\n\r\n this.setNativeStatus = setNativeStatus;\r\n}" }, { "alpha_fraction": 0.6275601387023926, "alphanum_fraction": 0.6321336030960083, "avg_line_length": 25.19270896911621, "blob_id": "45382fe015f03e00ba85299b58b0825475e3d880", "content_id": "09006e9fd4fd3ade1b36365cedfb3c26b88f806a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5071, "license_type": "no_license", "max_line_length": 144, "num_lines": 192, "path": "/uplooking_Python/code/flask_myself/app.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nfrom flask import Flask, request, render_template,make_response\nimport MySQLdb\nfrom flask import jsonify\nimport json\n\ndb = MySQLdb.connect(\"localhost\", \"root\", \"123321\", \"python01\",charset='utf8')\n\napp = Flask(__name__)\n\n# index views\[email protected]('/')\ndef show_index():\n return render_template('index.t.html')\n\n#get right user info table api\[email protected]('/getUserInfo',methods=['GET']) \ndef show_table():\n cursor = db.cursor()\n sql = \"SELECT * FROM user_ip_info ORDER BY id ASC limit 10\"\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n \n data=[]\n for result in results:\n data.append(dict(zip(row_headers,result)))\n # print data\n return json.dumps(data)\n db.close()\n\[email protected]('/get_nHostInfo',methods=['GET'])\ndef show_nhost():\n cursor = db.cursor()\n sql = \"SELECT * FROM n_hosts ORDER BY id ASC LIMIT 10\"\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n\n data = []\n\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data)\n db.close()\n\n\[email protected]('/get_wHostInfo',methods=['GET'])\ndef show_whost():\n cursor = db.cursor()\n sql = \"SELECT * FROM w_hosts ORDER BY id ASC LIMIT 10\"\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n\n data = []\n\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data)\n db.close()\n\n#add data api\[email protected]('/addUserInfo',methods=['POST'])\ndef insert_sql():\n \n cursor = db.cursor()\n\n id = request.form.get('id')\n username = request.form.get('username')\n position = request.form.get('position')\n ipaddr = request.form.get('ipaddr')\n remark = request.form.get('remark')\n\n # sql = \"insert into user_ip_info (id,username,position,ipaddr,remark) values (%s,%s,%s,%s,%s)\"\n sql = \"insert into user_ip_info values (%s,%s,%s,%s,%s)\"\n params = (id,username,position,ipaddr,remark)\n result = cursor.execute(sql,params)\n db.commit()\n return jsonify(result)\n\n db.close()\n\n\n#add host api\[email protected]('/addHostInfo',methods=['POST'])\ndef insert_nhost():\n \n cursor = db.cursor()\n\n # id = request.form.get('id')\n sn_number = request.form.get('sn_number')\n host_modal = request.form.get('host_modal')\n peizhi = request.form.get('peizhi')\n wan_ip = request.form.get('wan_ip')\n lan_ip = request.form.get('lan_ip')\n host_location = request.form.get('host_location')\n\n print sn_number,host_modal,peizhi,wan_ip,lan_ip,host_location\n sql = \"insert into n_hosts(sn_number,host_modal,peizhi,wan_ip,lan_ip,host_location) values (%s,%s,%s,%s,%s,%s)\"\n # sql = \"insert into n_hosts values (%s,%s,%s,%s,%s,%s)\"\n params = (sn_number,host_modal,peizhi,wan_ip,lan_ip,host_location)\n result = cursor.execute(sql,params)\n db.commit()\n return jsonify(result)\n\n db.close()\n\n#edit user info api\[email protected]('/edit_update',methods=['POST'])\ndef edit_update():\n\n cursor = db.cursor()\n\n id = request.form.get('id')\n username = request.form.get('username')\n position = request.form.get('position')\n ipaddr = request.form.get('ipaddr')\n remark = request.form.get('remark')\n\n sql = \"UPDATE user_ip_info SET username='%s', position='%s', ipaddr='%s', remark='%s' WHERE id=%s\" % (username,position,ipaddr,remark,id) \n\n result = cursor.execute(sql)\n db.commit()\n return jsonify(result)\n db.close()\n \n\[email protected]('/delete',methods=['POST'])\ndef deleteUserInfo():\n cursor = db.cursor()\n\n uid = request.form.get('id')\n \n uid = int(uid)\n sql = \"delete from user_ip_info where id='%d'\" % uid\n # params = uid\n # print params\n # print sql\n result = cursor.execute(sql)\n db.commit()\n return 'result'\n db.close()\n\[email protected]('/search',methods=['GET'])\ndef search():\n\n #如果想做任意字段的查询,就做一个判断\n \n cursor = db.cursor()\n username = request.args.get('username')\n print username\n sql = \"select * from user_ip_info where username LIKE '%s'\" % username\n # sql = \"select * from user_ip_info where username like '冯瑞钢'\"\n\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n print results\n data=[]\n for result in results:\n data.append(dict(zip(row_headers,result)))\n print data\n return json.dumps(data)\n\n db.close()\n\n# @app.route('/ajax.html',methods=['GET','POST'])\n# def myajax():\n# return render_template('ajax.html')\n# #pass\n\n#view\[email protected]('/host/nei')\ndef nHost():\n return render_template('nei_host.html')\n\n#view\[email protected]('/host/wai')\ndef wHost():\n return render_template('wai_host.html')\n\[email protected]('/mindex')\ndef show_modal():\n return render_template('m.html')\n\[email protected]('/test')\ndef test():\n return render_template('test.html') \n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5001,debug=True)\n" }, { "alpha_fraction": 0.7172236442565918, "alphanum_fraction": 0.7185090184211731, "avg_line_length": 34.3636360168457, "blob_id": "f4395143976583e5fe9e5d5e623c6d3f63385b30", "content_id": "4e93bd582f83e9577c2d93cc60444b2ff4a32654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/controller/tree_api.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport json\nfrom servicetree.models import *\nfrom config.development import NODE_TYPE\nfrom servicetree.models.hosts import Hosts\nfrom libs.cache.cache import Cache\nfrom servicetree.models.tree_host_relation import TagsHostRelation\nfrom libs.error import ParamError, ServerError\n\nclass HostApi(object):\n\n @classmethod\n def getNodesByHostname(cls,hostname):\n hostId = Hosts.getHostIdByHostname(hostname)\n if hostId:\n treeIds = TagsHostRelation.getTreeIdByDeviceId(hostId)\n tagstrings = Cache.getTagstringsByTreeIds(treeIds)\n hostTags = {hostname:tagstrings}\n return hostTags\n message = \"[%s]主机不存在!\"%str(hostname)\n #message = \"{}主机不存在\".format(hostname)\n raise(ServerError(message))\n" }, { "alpha_fraction": 0.5155279636383057, "alphanum_fraction": 0.6024844646453857, "avg_line_length": 15.473684310913086, "blob_id": "443acaaaf5d1dd8fc84c6e10f3e259e340e25ba6", "content_id": "179a74141fab7770feaa55b953ca65d1ba5d57c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/uplooking_Python/code/lesson04/test2_v1.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\nauthor:wolfr\ndate:20171227\nfun:100以内能被3和5同时整除就输出FizzBuzz,能被3整除就输出Fizz,能被5整除就输出Buzz\n'''\n\nmy_list = list(range(101))\n#print(my_list)\nfor i in my_list:\n\tif i == 0:\n\t\tprint(0)\n\telif i % 3 == 0:\n\t\tprint('Fizz')\n\telif i%5 == 0:\t\n\t\tprint('Buzz')\n\telif (i%3 == 0 and i%5 == 0):\n\t\tprint('FizzBuzz')\t\n\telse:\n\t\tprint(i)\t\n\n\t\t\n\t\t\n\n" }, { "alpha_fraction": 0.5505780577659607, "alphanum_fraction": 0.5765895843505859, "avg_line_length": 16.210525512695312, "blob_id": "99936c913547085efc3c73f108aa199406af2bd0", "content_id": "1d7d04967b6476829433732b6fab13c9abfffba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 760, "license_type": "no_license", "max_line_length": 82, "num_lines": 38, "path": "/0901/socket1.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月8日\r\n\r\n@author: Ops\r\n这是一个socket客户端访问新浪首页的脚本\r\n'''\r\nimport socket\r\n\r\n#创建socket\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n#建立连接\r\ns.connect(('www.sina.com.cn',80))\r\n\r\n#发送数据\r\nsend_data = 'GET / HTTP/1.1\\r\\nHost: www.sina.com.cn\\r\\nConnection: close\\r\\n\\r\\n'\r\ns.send(bytes(send_data,encoding=\"utf8\"))\r\n\r\n#接收数据:\r\nbuffer = []\r\n\r\nwhile True:\r\n d = s.recv(1024)\r\n if d:\r\n buffer.append(d)\r\n else:\r\n break\r\n \r\n data = bytes(' ',encoding=\"utf8\").join(buffer)\r\n \r\n\r\ns.close()\r\n\r\nheader,html = data.split(bytes('\\r\\n\\n',encoding=\"utf8\"),1)\r\nprint (header)\r\n\r\nwith open('sina.html','wb') as f:\r\n f.write(html)\r\n" }, { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 19.5, "blob_id": "75954e9f78c3faac88ddfe0a92753267802ac510", "content_id": "1ab536fb4b3dc8e8cf16fe0ec15a7e00a56d4b1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 41, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/uplooking_Python/code/jenkins.bak/shell/test.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\nls -al /root >/root/test.txt\n" }, { "alpha_fraction": 0.7118644118309021, "alphanum_fraction": 0.7118644118309021, "avg_line_length": 14, "blob_id": "fb7a80945422ce6e9f3fe7544b9491d1fc4cc560", "content_id": "1fedfd0a6b296b92b9aa1fcee6238da86fb954a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 21, "num_lines": 4, "path": "/uplooking_Python/code/lesson07-flask/b.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import g\n\ndef print_username():\n print g.user" }, { "alpha_fraction": 0.4989200830459595, "alphanum_fraction": 0.5507559180259705, "avg_line_length": 18.04347801208496, "blob_id": "305ba385014cce02a18195c0d19d1bc57e3096eb", "content_id": "cc5a62dcc97d0499b85e4e28bc380b5fe7612be5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 956, "license_type": "no_license", "max_line_length": 83, "num_lines": 46, "path": "/0901/socketServer.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月11日\r\n\r\n@author: Ops\r\nsocket server的脚本练习\r\n'''\r\n\r\nimport socket,os\r\n\r\n\r\np = 'F:\\\\MyEclipse\\\\20170508'\r\npdir = os.listdir(p)\r\n\r\nsk = socket.socket()\r\nsk.bind(('127.0.0.1',8080))\r\nsk.listen(1024)\r\nprint('http://localhost:8080/')\r\n\r\n\r\ndef my_functions():\r\n pass\r\n\r\nwhile True:\r\n conn,addr = sk.accept()\r\n accept_data = str(conn.recv(1024),encoding=\"utf8\")\r\n lines = accept_data.split('\\n')[0]\r\n #print(lines)\r\n if len(accept_data) <3:\r\n continue\r\n \r\n \r\n #向浏览器发送http头\r\n send_data = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n'\r\n \r\n conn.send(bytes(send_data,encoding=\"utf8\"))\r\n \r\n data = ''\r\n \r\n \r\n for rootFile in pdir:\r\n if os.path.isdir(os.path.join(p,rootFile)):\r\n data += '<a href=\"'+rootFile+'\">' +rootFile+ '</a> <br>'\r\n \r\n \r\n conn.send(bytes(data,encoding=\"utf8\"))\r\n conn.close()\r\n " }, { "alpha_fraction": 0.632022500038147, "alphanum_fraction": 0.648876428604126, "avg_line_length": 31.363636016845703, "blob_id": "6b27cf83970dc1c42f6b523e5a997d9f292d5ab1", "content_id": "4480471d08ef782e50f7d4b0a5ccee0ec02633c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 356, "license_type": "no_license", "max_line_length": 87, "num_lines": 11, "path": "/uplooking_Python/code/lesson09-flask/ops/run.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "log_dir=`pwd`/log\nlog_path=$log_dir/ops.log\necho \"Stop ops process .......\"\nps aux | grep gunico| grep ops| awk '{print $2}' | xargs kill -HUP\nmkdir -p $log_dir\necho \"Start ops process ......\"\ngunicorn -c gunicorn.conf app:app -D -t 6000 --error-logfile $log_path --log-level info\n\necho \"---------ops gunicorn process------\"\nsleep 2;\nps aux | grep gunicor\n" }, { "alpha_fraction": 0.5751879811286926, "alphanum_fraction": 0.576127827167511, "avg_line_length": 31.18181800842285, "blob_id": "e9e82630917bf032b973fe4b1bf379cff804280a", "content_id": "41bd040c917856238389d554dd3bfaf7a0437610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 110, "num_lines": 33, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/models/db_tree.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom libs.db import MasterDB\n\nTABLE = \"map_tree\"\nCOLUMN = \"id,name,cname,node_type,pid\"\n\ndb = MasterDB()\n\nclass MapTree(object):\n @classmethod\n def get_all(cls):\n items = []\n sql = \"select %s from %s\" % (COLUMN, TABLE)\n result = db.query_all(sql)\n if result:\n items = [dict((value, i[index]) for index, value in enumerate(COLUMN.split(\",\"))) for i in result]\n return items\n\n @classmethod\n def get_item_byPid(cls, pid):\n sql = \"select %s from %s where id=%%s\" % (COLUMN, TABLE)\n result = db.query_all(sql, pid)\n if result:\n return [dict((value, i[index]) for index, value in enumerate(COLUMN.split(\",\"))) for i in result]\n\n raise(\"服务器内部错误,未获取到对应的父节点\")\n\n @classmethod\n def add_node(cls, pid, nodeName, cname, node_type):\n sql = \"insert into %s(name, cname, node_type, pid) values(%%s, %%s, %%s, %%s)\"%TABLE\n print \"sql:\", sql\n result = db.insert(sql, nodeName, cname, node_type, pid)\n print \"result:\", result\n\n\n" }, { "alpha_fraction": 0.46023687720298767, "alphanum_fraction": 0.48561760783195496, "avg_line_length": 28.549999237060547, "blob_id": "b8a613a12b6dcabb7dddcbd55482be67628c3748", "content_id": "c1439ed88c2e1f87972c7459b6742b65bb4f2798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 85, "num_lines": 20, "path": "/uplooking_Python/code/20180107/num_code.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nimport os\n\nfor root,dirs,files in os.walk('/home/f/20170508/uplooking_Python'):\n for file in files:\n fname = os.path.join(root,file)\n\n num_lines = 0\n num_null = 0\n num_comments = 0\n\n with open(fname) as f:\n for line in f.readlines():\n if not line.strip():\n num_null += 1\n elif line.startswith('#'):\n num_comments += 1\n else:\n num_lines += 1\n print \"file %s: 代码 %d行,空行 %d,注释 %d\" % (fname,num_lines,num_null,num_comments)\n" }, { "alpha_fraction": 0.5803571343421936, "alphanum_fraction": 0.6279761791229248, "avg_line_length": 14.649999618530273, "blob_id": "0f592dcc9a6b35f55ba42519d9ac42a8a98f6add", "content_id": "7b60ed4ea52bfc6df9b58eaa0d07cdf26401fcf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/uplooking_Python/code/lesson04/guess_num_v2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nauthor:wolfrg\r\ndate:201712.25\r\n给出一个随机数整数,让用户一直猜下去,直到猜对退出\r\n'''\r\nimport random\r\n\r\nnum = random.randint(1,100)\r\nwhile True:\r\n\t#num = random.randint(1,100)\r\n\tguess = int(input('Please input you guess number:'))\r\n\r\n\tif guess > num:\r\n\t\tprint('猜大了')\r\n\r\n\telif guess < num:\r\n\t\tprint('猜小了')\r\n\telse:\r\n\t\tprint('恭喜,猜对了')\r\n\t\tbreak #猜对后就退出循环\t\r\n\r\n" }, { "alpha_fraction": 0.4166666567325592, "alphanum_fraction": 0.4188311696052551, "avg_line_length": 20.585365295410156, "blob_id": "678d9c19deb5979b5cd3a65b511597765018a184", "content_id": "0bd2193711ad0ab74e6d5186fa59308188d3638c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 50, "num_lines": 41, "path": "/0629/dict_add.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:UTF8\r\n\r\n\r\n# 字典创建 while开关\r\n\r\ndictionary = {}\r\n\r\nflag = '' #初始化开关的值\r\npape = ''\r\noff = ''\r\n\r\nwhile flag == 'a' or 'c':\r\n flag = raw_input(\"添加或查找单词? (a/c)\")\r\n \r\n if flag == \"a\":\r\n word = raw_input(\"输入单词(key):\")\r\n defintion = raw_input(\"输入定义值(value):\")\r\n dictionary[str(word)] = str(defintion)\r\n print \"添加成功!\"\r\n \r\n pape = raw_input(\"您是否要查找字典?(a/0)\")\r\n if pape == 'a':\r\n print dictionary\r\n else:\r\n continue\r\n elif flag == 'c':\r\n check_word = raw_input(\"要查找的单词:\")\r\n for key in sorted(dictionary.keys()):\r\n if str(check_word) == key:\r\n print \"该单词存在!\",key,dictionary[key]\r\n break\r\n else:\r\n off = 'b'\r\n \r\n if off == 'b':\r\n print \"抱歉,该值不存在!\"\r\n \r\n \r\n else:\r\n print \"error type\"\r\n break" }, { "alpha_fraction": 0.5050504803657532, "alphanum_fraction": 0.5173160433769226, "avg_line_length": 17.479999542236328, "blob_id": "b7846995411d08b7273cfa29bd05983d6b1eaa5f", "content_id": "7aad98ad9974fcfc6be245787bb7849bd89390ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 50, "num_lines": 75, "path": "/uplooking_Python/code/lesson06/DB.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n#封装DB操作\n\nimport MySQLdb\n\nclass DB(object):\n\n def __init__(self):\n self.conn = None\n\n def _get_conn(self):\n conn = MySQLdb.connect(\n host = '127.0.0.1',\n port = 3306,\n user = 'root',\n passwd = '123321',\n db = 'test',\n connect_timeout = 10,\n charset = 'utf8'\n )\n self.conn = conn\n return conn\n\n def _get_cur(self):\n conn = self._get_conn()\n conn.autocommit(True)\n cur = conn.cursor()\n #print cur\n return cur\n\n\n def _get_execute(self,sql):\n\n cur = self._get_cur()\n cur.execute(sql)\n return cur\n\n\n def _get_fetchall(self,sql):\n\n cur1 = self._get_execute(sql)\n result = cur1.fetchall()\n #print result\n return result\n\n\nclass HANDELDB(DB):\n\n def __init__(self):\n pass\n\n\n def insert(self,sql):\n result = self._get_execute(insert_sql)\n\n\n def update(self):\n pass\n\n def delete(self):\n pass\n\n def select(self,select_sql):\n result = self._get_fetchall(select_sql)\n return result\n\n\nif __name__ == '__main__':\n use_db = HANDELDB()\n insert_sql = \"insert into frg values(4,'zjq')\"\n select_sql = 'select * from test.frg'\n\n select = use_db.select(select_sql)\n use_db.insert(insert_sql)\n print select\n" }, { "alpha_fraction": 0.48514851927757263, "alphanum_fraction": 0.49657273292541504, "avg_line_length": 21.053571701049805, "blob_id": "2a52b13a249c607e49cb2ccd1ac7f806d6250091", "content_id": "f221fca499dc12244c6dbf0ac51e30a4fc33c630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1425, "license_type": "no_license", "max_line_length": 60, "num_lines": 56, "path": "/0508/dns_resolver.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\r\n'''\r\nCreated on 2017年5月5日\r\n这个脚本是用来解析域名的IP\r\n@author: Ops\r\n'''\r\nimport dns.resolver\r\n#import os\r\nimport httplib\r\n\r\niplist=[] #定义一个域名IP列表变量\r\n\r\nappdomain=\"sms.phpip.com\" #定义业务域名\r\n\r\ndef get_iplist(domain=\"\"):\r\n try:\r\n A = dns.resolver.query(domain,'A')\r\n \r\n except Exception,e:\r\n print \"dns resolver error:\"+str(e)\r\n return\r\n for i in A.response.answer:\r\n for j in i.items:\r\n if j.rdtype == 1:\r\n iplist.append(j.address) #追加到iplist\r\n else:\r\n pass \r\n return True\r\n\r\ndef checkip(ip):\r\n checkurl=ip+\":80\"\r\n getcontent=\"\"\r\n httplib.socket.setdefaulttimeout(30) #定义http连接超时时间\r\n conn=httplib.HTTPConnection(checkurl) #创建http连接对象\r\n \r\n try:\r\n conn.request(\"GET\",\"/\",headers = {\"HOST\":appdomain})\r\n \r\n r=conn.getresponse()\r\n getcontent = r.read(20)\r\n \r\n finally:\r\n \r\n if getcontent ==\"<!doctype html>\":\r\n \r\n print ip+\" [OK]\"\r\n \r\n else:\r\n print ip+\" [有跳转,没关系。]\"\r\n \r\nif __name__==\"__main__\":\r\n if get_iplist(appdomain) and len(iplist) >0:\r\n for ip in iplist:\r\n checkip(ip) \r\n else:\r\n print \"dns resolver error.\" " }, { "alpha_fraction": 0.6142857074737549, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 14.666666984558105, "blob_id": "58881be69209ca112297419ba9da84be07d7bdb4", "content_id": "33c6196999f0e33fc541669617f3fc3d105c5359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/uplooking_Python/code/lesson04/test3_v1.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nauthor:wolfrg\r\ndate:20171227\r\nfun:随机生成16位的验证码\r\n'''\r\nimport string\r\nimport random\r\n\r\n\r\nmy_list = list(range(16))\r\n#print(range(0,16))\r\n#print(c)\r\nv_code = string.ascii_lowercase + string.ascii_uppercase + string.digits\r\n\r\n\r\nfor i in my_list:\r\n\tmy_list[i] = random.choice(v_code)\r\n\tprint(my_list[i])\r\n\r\nmy_code\t= \"\".join(my_list)\r\nprint(my_code)\r\n" }, { "alpha_fraction": 0.5612903237342834, "alphanum_fraction": 0.5758064389228821, "avg_line_length": 21.035715103149414, "blob_id": "67bc47a13233edc418f0e3c6f9a60522bec28015", "content_id": "cf3fe58c73a05293380a4d82daf5abf6e3447bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/uplooking_Python/code/lesson05/montorDomains/utils/tools.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport time\nfrom commands import getstatusoutput\nfrom logger.logger import app_logger\nfrom libs.deco import recordLog\n\n@recordLog\ndef sys_call(cmd):\n\n # 系统调用\n count = 0\n for i in range(100):\n count += 1\n status, result = getstatusoutput(cmd)\n if status == 0:\n return result\n time.sleep(1)\n if count == 3:\n app_logger.error(\"whois [cmd:%s] timeout!\"%cmd)\n break\n continue\n\n app_logger.error(\"[sys_call timeout][cmd:%s]\"%cmd)\n\n\nif __name__ == \"__main__\":\n sys_call(\"whois bivyy.cn\")\n\n\n\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 20, "blob_id": "dfeb09f79d83efae7a4cce88b8a38839b2011730", "content_id": "8fbd4a06d724ed9dc7c39c96d96279fa8b23eb1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/uplooking_Python/code/lesson08-flask/ops/libs/error/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from errors import *" }, { "alpha_fraction": 0.6283487677574158, "alphanum_fraction": 0.6415002346038818, "avg_line_length": 30.58461570739746, "blob_id": "8aaded1a0dc37149561452605ba7745befe61ab7", "content_id": "c41ab81c086475c33eb9496e2af2b1d94e8d6529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "no_license", "max_line_length": 194, "num_lines": 65, "path": "/uplooking_Python/code/lesson08-flask/ops/hosts/views/host.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom flask import render_template\nfrom flask import request\nfrom hosts import blue_print\nfrom flask import make_response\nfrom hosts.controller import hostHandleController as hc\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"hosts.html\")\n\n@blue_print.route(\"/all\", methods=[\"GET\"])\ndef get_hosts():\n \n hosts = [{\"hostname\":\"tj1-nginx01.kscn\", \"host_type\": \"vm\", \"ip\": \"1.1.1.1\", \"location\":\"beijing\"}, {\"hostname\":\"tj1-nginx02.kscn\", \"host_type\": \"vm\", \"ip\": \"1.1.1.1\", \"location\":\"beijing\"}]\n return make_response(json.dumps({\"code\":\"200\",\"data\": hosts}), 200)\n\n@blue_print.route(\"/test/params\", methods=[\"POST\"])\ndef test_params():\n # hostname = request.args.get(\"hostname\")\n # hostname = request.form[\"hostname\"]\n hostname = request.get_json()\n print type(hostname)\n # print type(h)\n return \"sucess\"\n\n@blue_print.route(\"/add\", methods=[\"POST\"])\ndef host_add():\n # 获取请求参数 request.args从url中获取参数\n # hostnames = request.args.get(\"hostnames\")\n # print \"Hostnames is [{}]\".format(hostnames)\n\n # 获取表单\n # ip = request.form[\"ip\"]\n # system = request.form[\"system\"]\n # print \"Ip is {ip}. system is {sys}\".format(ip=ip, sys=system)\n\n #获取json数据\n # json_data = request.get_json()\n # print \"json_data:\", json_data\n\n # 获取添加的主机\n hostname = request.form[\"hostname\"]\n host_type = request.form[\"type\"]\n ip = request.form[\"ip\"]\n location = request.form[\"location\"]\n\n # 抛给主机管理的model\n addResult = hc.HostHandle.host_add(hostname, host_type, ip, location)\n if addResult:\n return \"资产添加成功!\"\n return \"资产添加失败!\"\n\n@blue_print.route(\"/bind\", methods=[\"POST\"])\ndef host_bind():\n\n # hostnames = request.form[\"hostnames\"]\n # tagstring = request.form[\"tagstring\"]\n # bindInfo = hc.HostHandle.host_bind(hostnames.split(\",\"), tagstring)\n # bind_result = [{\"code\":200, \"data\":[{\"tj1-nginx01.kscn\":\"sucess\"}]}]\n # return json.dumps(bind_result)\n # return json.dumps(bindInfo)\n raise(\"aaa\")\n return \"sucess\"\n" }, { "alpha_fraction": 0.42957746982574463, "alphanum_fraction": 0.42957746982574463, "avg_line_length": 10, "blob_id": "898ad2be7624f6dd32cb7efe4b8dba4a7fd99d9e", "content_id": "c834a8a4b80a6e76e9b489ecf05db741786c4dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 142, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/uplooking_Python/code/前端/lesson10-前端day04/test/mDomain/js/domain.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function(){\n var domain = {\n\n\n\n init:function(){\n public_func.subMenuClick();\n }\n\n };\n\n domain.init();\n});" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 19, "blob_id": "f87366912de113b29a777284618ba5471258dbc5", "content_id": "9700aa96ed3acfa6301e1b8fbf96bbf6be8eb530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/uplooking_Python/code/lesson09-flask/ops/libs/db/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from db import MasterDB\ndb = MasterDB()" }, { "alpha_fraction": 0.7417582273483276, "alphanum_fraction": 0.7472527623176575, "avg_line_length": 21.875, "blob_id": "7e0bd1d2b9044a772a2b03aab2b415dee7dc3140", "content_id": "a04da78d5e1368b666056ee40033834711bb6d3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/uplooking_Python/code/lesson09-flask/ops/hosts/views/api.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom flask import render_template\nfrom flask import request\nfrom hosts import blue_print\n\n@blue_print.route(\"collect/hosts\", methods=[\"POST\"])\ndef collect():\n pass" }, { "alpha_fraction": 0.7159090638160706, "alphanum_fraction": 0.7556818127632141, "avg_line_length": 34.20000076293945, "blob_id": "3be943c933e56b64c306150d01ef2898831c1751", "content_id": "cb7f35a0ab5d92cdbbf5564bee3f942e4f5b7238", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 352, "license_type": "no_license", "max_line_length": 91, "num_lines": 10, "path": "/uplooking_Python/code/jenkins.bak/shell/test-java.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\n. /etc/profile\n\nmkdir -p /root/jenkins/workspace/java_oms/src\ncd /root/jenkins/workspace/java_oms/src && git init\ngit rev-parse --is-inside-work-tree\ngit config remote.origin.url http://root:[email protected]/platform-server/java_oms.git\ngit pull http://root:[email protected]/platform-server/java_oms.git\ncd oms/\nmvn clean package\n" }, { "alpha_fraction": 0.5548387169837952, "alphanum_fraction": 0.5612903237342834, "avg_line_length": 18.125, "blob_id": "c2084fbc3ae56a7fd9f0062212db34bcf2a0e98e", "content_id": "021c552fcd5f933dafd46e49150658ade60a5436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/uplooking_Python/code/lesson05/montorDomains/utils/test.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom utils.tools import sys_call\n\nif __name__ == \"__main__\":\n cmd = \"whois bivyy.cn\"\n sys_call(cmd)\n\n\n" }, { "alpha_fraction": 0.648910403251648, "alphanum_fraction": 0.685230016708374, "avg_line_length": 20.88888931274414, "blob_id": "2376a5f3e12ddd117abc42e9c42802601ce2aa30", "content_id": "caf60f692c04cda3fb766e13bbf14c06bf560b7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/python_test/myPython/test_paramiko.py", "repo_name": "wolfrg/20170508", "src_encoding": "ISO-8859-7", "text": "#coding:gbk\r\n'''\r\nCreated on \r\n\r\n@author: Ops\r\nSSH ΥΛΊΕΓάΒλ΅ΗΒΌ΅Δ·½Κ½\r\n'''\r\nimport paramiko\r\nssh = paramiko.SSHClient()\r\nssh.load_system_host_keys()\r\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\nssh.connect(hostname='118.186.17.174',port=10001,username='root',password='frgadmin')\r\nstdin,stdout,stderr=ssh.exec_command('ls -l')\r\n\r\nfor std in stdout.readlines():\r\n print std,\r\n \r\nssh.close() \r\n" }, { "alpha_fraction": 0.5008756518363953, "alphanum_fraction": 0.5446584820747375, "avg_line_length": 19.071428298950195, "blob_id": "bc77fe6d9070656b958087ca744638aed3a69b8d", "content_id": "c9ba4448ea64d71c269e47396262ee07673d4217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/uplooking_Python/code/lesson04/binary_search_v2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n#author:wolfrg\n#date:20180103\n#func:二分查找\n\n'''\n二分搜索也称折半搜索,是一种在有序数组中查找某一特定元素的搜索算法\n\n'''\n\nL = [1,2,3,4,5,6,7,8,9,10]\n\nstart = 0\nend = len(L) -1\nwhile start <= end:\n hkey = int(raw_input(\"输入你要查找的数:\"))\n print \"要查找的数:%d \" % hkey\n mid = start + (end - start)/2\n guess = L[mid]\n if guess == hkey:\n print \"你查的数的索引为%d\" % mid\n break\n if guess > hkey:\n print \"中间数%d > 查找的数 %d ,去中间数前面的数查找继续\" % (guess, hkey)\n end = mid - 1\n else:\n print \"中间数%d < 查找的数%d ,去中间数后面的数查找继续\" % (guess,hkey)\n start = mid + 1\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4900306761264801, "alphanum_fraction": 0.493865042924881, "avg_line_length": 37.57575607299805, "blob_id": "9e577558464c15e25c3a8e2d5c25d174b30e1c6c", "content_id": "6557ca31079c874afe6cda81b732c477e6d0ae8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1336, "license_type": "no_license", "max_line_length": 106, "num_lines": 33, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/wiz/Browser.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'use strict';\r\nvar Wiz = Wiz || {};\r\nWiz.Browser = {\r\n onRequest : function(){\r\n try {\r\n// console.log('Wiz.Browse() onRequest(): ');\r\n// console.log(arguments);\r\n return (chrome.runtime.onMessage) ? (chrome.runtime.onMessage) : (chrome.extension.onRequest);\r\n } catch (err) {\r\n console.log('Wiz.Browser onRequest() Error : ' + err);\r\n }\r\n },\r\n sendRequest : function (tabId, params, callback) {\r\n try {\r\n// console.log('Wiz.Browser() sendRequest(): ');\r\n// console.log(arguments);\r\n if (chrome.tabs.sendMessage) {\r\n try {\r\n // Chrome 45 bug, 如果不加 frameId 则会导致 消息发给所有的 tab\r\n // throws on Chrome prior to 41\r\n chrome.tabs.sendMessage(tabId, params, {frameId: 0}, callback);\r\n } catch(e) {\r\n chrome.tabs.sendMessage(tabId, params, callback);\r\n }\r\n } else {\r\n chrome.tabs.sendRequest(tabId, params, callback);\r\n }\r\n// return (chrome.tabs.sendMessage) ? (chrome.tabs.sendMessage) : (chrome.tabs.sendRequest);\r\n } catch (err) {\r\n console.log('Wiz.Browser sendRequest() Error : ' + err);\r\n }\r\n }\r\n};" }, { "alpha_fraction": 0.545121967792511, "alphanum_fraction": 0.5512195229530334, "avg_line_length": 22.314285278320312, "blob_id": "4f4b6c890732a9437d776097b1c0fae19e0aad06", "content_id": "58c7ab362feecabc6eb58addf160c5fd4e147570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 106, "num_lines": 35, "path": "/uplooking_Python/code/lesson08-flask/ops/hosts/models/hostOperateModle.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom libs.db import db\n\nclass HostHandleModle(object):\n\n TABLE = \"tb_device\"\n COLUMN = \"id,hostname,type,ip,location\"\n def __init__(self):\n pass\n\n @classmethod\n def host_add(cls, *args):\n '''\n :param args: 主机信息\n :return: bool 成功|失败\n\n '''\n\n add_sql = \"insert into %s (hostname,host_type,ip,location) values(%%s, %%s, %%s, %%s)\"%(cls.TABLE)\n lastId = db.insert(add_sql, *args)\n if lastId > 0:\n return True\n return False\n\n\n\n @classmethod\n def queryIdByHostname(cls, hostname):\n sql = \"select id from %s where hostname=%%s\"%(cls.TABLE)\n hostIdResult = db.query_id(sql, hostname)\n if len(hostIdResult) > 0:\n return hostIdResult[0]\n else:\n return 0\n\n\n\n\n" }, { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.7030302882194519, "avg_line_length": 19.565217971801758, "blob_id": "bc0dda12351cb74767cf3f6ff881d2797d700180", "content_id": "2987c4abe0ad3af632c753efb51d920d1a54742b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/python_test/myPython/paramiko_keyfile.py", "repo_name": "wolfrg/20170508", "src_encoding": "GB18030", "text": "#coding:gbk\r\n'''\r\nCreated on 2017年3月6日\r\n\r\n@author: Ops\r\n\r\n证书登录\r\n'''\r\nimport paramiko\r\nhostname = '192.168.0.221'\r\nusername = 'tomcat'\r\nmySSHKEY = 'E:\\\\内网\\\\内网服务\\\\内网服务器key文件\\\\fengruigang_tomcat_20150427'\r\n\r\nssh = paramiko.SSHClient()\r\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\nssh.connect(hostname,username=username,key_filename=mySSHKEY,password='tomcat@fengruigang')\r\n\r\nstdin,stdout,stderr=ssh.exec_command('uptime')\r\n\r\nfor std in stdout.readlines():\r\n print std,\r\n \r\nssh.close() " }, { "alpha_fraction": 0.6654205322265625, "alphanum_fraction": 0.6654205322265625, "avg_line_length": 19.615385055541992, "blob_id": "c5dd897425949690ae593a1e3ccaae5c0c489344", "content_id": "fd3eddfb9313364962cef0b20ce12c2beacd0459", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 575, "license_type": "no_license", "max_line_length": 76, "num_lines": 26, "path": "/uplooking_Python/code/lesson07-flask/flask.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "return make_response\n\nipython交互模式下查看url\nfrom app import app\napp.url_map\n\nflask 蓝图:\nhttp://docs.jinkan.org/docs/flask/blueprints.html <br>\n\nhttp://dormousehole.readthedocs.io/en/latest/tutorial/dbcon.html <br>\n\nhttp://docs.jinkan.org/docs/flask/appcontext.html 上下文\n\nfrom hosts import blue_print as hosts_bp\n\napp.register_blueprint(hosts_bp,url_prefix=\"hosts\")\n \n定义一个蓝图:\n\n\n__init__.py\n\n\n========================================================================<br>\nrequest\nhttp://docs.python-requests.org/zh_CN/latest/user/quickstart.html" }, { "alpha_fraction": 0.40713536739349365, "alphanum_fraction": 0.4118573069572449, "avg_line_length": 24.47222137451172, "blob_id": "9811271215eb99d0eaf79db70e226afa78f46165", "content_id": "5d1a7eb3734faad3adb1454c33f8877a65aa6e77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2036, "license_type": "no_license", "max_line_length": 118, "num_lines": 72, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/今天做的项目/monitor/mDomain/js/domain.js", "repo_name": "wolfrg/20170508", "src_encoding": "GB18030", "text": "/**\r\n * Created by Administrator on 2018/3/25.\r\n */\r\n$(function(){\r\n\r\n var domain = {\r\n // table 数据获取\r\n tableDataGet: function(){\r\n var _this = this,\r\n param = {};\r\n\r\n // 要请求的参数\r\n //param.nodeid = _this.defaultData.nodeId;\r\n //param.mainMenu = _this.defaultData.mainMenu;\r\n //param.subMenu = _this.defaultData.subMenu;\r\n //param.page = _this.defaultData.page;\r\n\r\n $.getJSON('../../data/tablelist.json',param, function(response){\r\n if(response.code === 1){\r\n var data=response.data.detail;\r\n\r\n // 拼接table\r\n _this.tableTpl(data);\r\n // 调用选择页码按钮\r\n _this.pageSel();\r\n }\r\n })\r\n },\r\n\r\n // 拼接表格\r\n tableTpl: function(data){\r\n var _this = this,\r\n tplHtml = '';\r\n\r\n tplHtml += '<thead><tr><th>id</th><th>name</th><th>domain</th></tr></thead><tbody>'\r\n $.each(data, function(index, value){\r\n tplHtml += '<tr><td>'+index +'</td><td>'+value.name+'</td><td>' + value.domain + '</td></tr></tbody>';\r\n });\r\n\r\n\r\n // 生成表格\r\n $('#tablelist').html(tplHtml);\r\n\r\n\r\n\r\n },\r\n\r\n // 选择页面\r\n pageSel: function(){console.log(4)\r\n var _this = this;\r\n\r\n // 给页数添加点击事件\r\n $('.pagination').off('click').on('click','li', function(e){\r\n // 更新保存的页码数\r\n _this.defaultData.page = $(this).find('a').text();\r\n\r\n // 调一遍table接口,根据页码数,更新table数据\r\n _this.tableDataGet();\r\n })\r\n },\r\n\r\n init:function(){\r\n var _this = this;\r\n _this.tableDataGet();\r\n\r\n public_func.menuClick();\r\n\r\n }\r\n };\r\n\r\n domain.init();\r\n})\r\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.71875, "avg_line_length": 17.200000762939453, "blob_id": "dd3a9adbc6d4f7b368ae6ac6c3601ba325fdfbb3", "content_id": "d85419c93869ee72b03f2d0339b64dc1ee4ac7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/mytest/myDjango/apps.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\r\n\r\n\r\nclass MydjangoConfig(AppConfig):\r\n name = 'myDjango'\r\n" }, { "alpha_fraction": 0.8114035129547119, "alphanum_fraction": 0.8114035129547119, "avg_line_length": 44.79999923706055, "blob_id": "4fc7ec7b8e5532fe259c18dfc7813729a4f6bb0b", "content_id": "2268a17bf35cc3071a395133c83fc3e55b2c526d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 94, "num_lines": 5, "path": "/uplooking_Python/code/lesson08-flask/ops/hosts/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom models.hostOperateModle import HostHandleModle\nfrom models.treeModle import TreeHandle\nblue_print = Blueprint('hosts', __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views" }, { "alpha_fraction": 0.42933332920074463, "alphanum_fraction": 0.4519999921321869, "avg_line_length": 18.83333396911621, "blob_id": "823bdbe40a5c239c178004bd0cb3f0cb6f9be420", "content_id": "bba4b180a406bfd628dd29018381580501713336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 77, "num_lines": 36, "path": "/0901/file03.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月27日\r\n\r\n@author: Ops\r\n'''\r\n\r\n#实现文件的增删改查\r\n\r\nimport sys\r\nfrom sys import argv\r\n\r\nscript,filename = argv\r\n\r\nwhile True:\r\n item = raw_input('''\\033[36;1mWelcome to here,what do you want to do?\r\n----------------------\r\npress 'p' for print\r\npress 'a' for add\r\npress 'd' for delete\r\npress 'u' for update\r\npress 's' for select\r\npress 'q' for quit\r\n----------------------\r\nplease make your choise: \\033[0m''')\r\n\r\n if item == 'p':\r\n while True:\r\n user_select = open(filename,'r')\r\n s_p = user_select.read()\r\n print s_p\r\n break\r\n\r\n\r\n elif item == 'q':\r\n print \"bye!\"\r\n sys.exit()\r\n" }, { "alpha_fraction": 0.6074895858764648, "alphanum_fraction": 0.6208968758583069, "avg_line_length": 29.85714340209961, "blob_id": "4418041704b4f9a7fb8743eee92ab3b485e28d06", "content_id": "fd12371e815dd32920d4b35a3c90d603e095609c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 78, "num_lines": 70, "path": "/uplooking_Python/code/lesson05/montorDomains_bak/core/monitor_domains.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nfrom conf.config import exprie_domains\nfrom logger.logger import app_logger, db_logger\nfrom utils.tools import sys_call\nfrom libs.parser import register_parser, expire_parser, ParseTime, getExprDays\n\nclass DomainManager(object):\n def __init__(self):\n self.info = None\n\n @classmethod\n def whois(cls, domain_name):\n # 获取域名注册信息\n cmd = \"whois %s\"%domain_name\n info = sys_call(cmd)\n cls.info = info\n\n #Registration Time: 2017-11-10 09:48:36\n #Expiration Time: 2018-11-10 09:48:36\n @classmethod\n def get_register_time(cls, domain_name):\n # 获取注册时间\n reg_time = register_parser(cls.info)\n if reg_time:\n return reg_time\n\n @classmethod\n def get_expire_time(cls, domain_name):\n # 获取过期时间\n expire_time = expire_parser(cls.info)\n if expire_time:\n return expire_time\n #app_logger.info([\"not match\"][\"domain:%s\"%domain_name])\n #print \"domain_name:%s, is not match\"%domain_name\n\ndef get_domain_register_expiry_time():\n dTimes = {}\n for domain in exprie_domains:\n # 每个域名的过期和注册时间\n domain_reg_expr_time = {}\n DomainManager.whois(domain)\n registerTime = DomainManager.get_register_time(domain)\n if registerTime:\n expiryTime = DomainManager.get_expire_time(domain)\n domain_reg_expr_time[\"reg_time\"] = registerTime\n domain_reg_expr_time[\"expriy_time\"] = expiryTime\n dTimes[domain] = domain_reg_expr_time\n else:\n app_logger.info(\"[not match][domain:%s]\"%domain)\n print \"domain_name:%s, is not match\"%domain\n print \"DTime-->\", dTimes\n return dTimes\n\ndef calculate_expire_days(dTimes):\n '''计算过期天数'''\n for domain_name, domain_times in dTimes.iteritems():\n pt = ParseTime(domain_times[\"expriy_time\"])\n domain_times[\"expriy_days\"] = getExprDays(pt.parse_time())\n return dTimes\n\ndef main():\n dTimes = get_domain_register_expiry_time()\n result = calculate_expire_days(dTimes)\n # 写入到文件或db\n print result\n\nif __name__ == \"__main__\":\n main()\n\n\n\n" }, { "alpha_fraction": 0.5334420800209045, "alphanum_fraction": 0.5350733995437622, "avg_line_length": 26.68181800842285, "blob_id": "52bcbb08b1b76ede924433e9c6cd0011a1fa37b6", "content_id": "237da6eb3f561eabe0deab6cddf98aab26c3db22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 82, "num_lines": 22, "path": "/uplooking_Python/code/lesson05/myapps/monitor/utils/convert_time.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nimport time\nfrom datetime import datetime\n\n#统一时间格式\n\n\ndef convert_time(tstr):\n try:\n time.strptime(tstr,\"%Y-%m-%d\")\n return tstr\n except:\n try:\n time_convert = datetime.strptime(tstr,\"%d-%b-%Y\").strftime(\"%Y-%m-%d\")\n return time_convert\n except Exception,e:\n try:\n time_convert = datetime.strptime(tstr,\"%d-%m-%Y\").strftime(\"%F\")\n return time_convert\n except Exception,e:\n time_convert = datetime.strptime(tstr,\"%d/%m/%Y\").strftime(\"%F\")\n return time_convert\n\n\n\n\n" }, { "alpha_fraction": 0.5195530652999878, "alphanum_fraction": 0.5466265678405762, "avg_line_length": 31.753623962402344, "blob_id": "70798ab93c962e641a915b4489824c2316dac22b", "content_id": "09085edb7651c1e71cc42d1f24f5f4d4d4348b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2333, "license_type": "no_license", "max_line_length": 69, "num_lines": 69, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/wiz/WizConstant.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'use strict';\r\nvar Wiz = Wiz || {};\r\nWiz.Constant = {\r\n Default : {\r\n DOC_CATEGORY: '/My Notes/',\r\n DOC_TITLE: 'no title',\r\n COOKIE_EXPIRE_SEC: 14 * 24 * 60 * 60,\r\n TOKEN_EXPIRE_SEC: 3 * 60,\r\n REFRESH_TOKEN_TIME_MS: 4 * 60 * 1000,\r\n CATEGORY_EXPIRE_SEC: 10 * 60,\r\n TAG_EXPIRE_SEC: 10 * 60,\r\n\r\n XMLURL : 'http://service.wiz.cn/wizkm/xmlrpc',\r\n API_URL: 'http://api.wiz.cn/?p=wiz&c=openapi_url',\r\n NOTE_URL: 'http://note.wiz.cn',\r\n WEBCLIENT_URL: 'https://note.wiz.cn/web',\r\n UPDATEClient_URL: 'http://blog.wiz.cn/wiz-faq-npapi.html',\r\n REGISTER_URL: 'https://note.wiz.cn/login?p=reg',\r\n\r\n COOKIE_URL : 'http://service.wiz.cn/web',\r\n COOKIE_USER: 'wiz-clip-user',\r\n COOKIE_CERT: 'wiz-clip-cert',\r\n COOKIE_LAST_CATEGORY: 'wiz-last-category',\r\n COOKIE_CATEGORY: 'wiz-all-category',\r\n COOKIE_CATEGORY_TIME: 'wiz-category-stored-time',\r\n COOKIE_TAG: 'wiz-all-tag',\r\n COOKIE_TAG_TIME: 'wiz-tag-stored-time',\r\n PREVIEW_OVER_TIME_MS: 5000, //30秒超时\r\n SAVE_TYPE: 'wiz-save-type'\r\n },\r\n\r\n Service: {\r\n QUERY_TIME_ARRAY: [5, 5, 10, 20, 20, 20, 20, 20, 20, 20]\r\n },\r\n\r\n LOGIN_PARAMS: {\r\n CLIENT_TYPE: 'webclip_chrome',\r\n API_VERSION: 4\r\n },\r\n\r\n API : {\r\n ACCOUNT_LOGIN: 'accounts.clientLogin',\r\n ACCOUNT_KEEPALIVE: 'accounts.keepAlive',\r\n ACCOUNT_GETOKEN: 'accounts.getToken',\r\n GET_AllCATEGORIES: 'category.getAll',\r\n GET_ALLTAGS: 'tag.getList',\r\n DOCUMENT_POSTSIMPLE: 'document.postSimpleData'\r\n },\r\n ListenType : {\r\n SERVICE: 'wiz_service',\r\n CONTENT: 'wiz_content',\r\n POPUP: 'wiz_popup'\r\n }\r\n};\r\n\r\n\r\n//var cookieUrl = 'http://service.wiz.cn/web',\r\n// cookieName = 'wiz-clip-auth',\r\n// cookieExpiredays = 14 * 24 * 60 * 60,\r\n// updateClientUrl = 'http://blog.wiz.cn/wiz-faq-npapi.html';\r\n//\r\n//apiUrl: 'http://api.wiz.cn/?p=wiz&c=openapi_url',\r\n// openapiUrl : '',\r\n// betaUrl: 'http://note.wiz.cn',\r\n// cookieUrl : 'http://service.wiz.cn/web',\r\n// cookieName : 'wiz-clip-auth',\r\n// cookie_category: 'wiz-all-category',\r\n// cookie_category_time: 'wiz-category-stored-time',\r\n// category_expireSec: 10 * 60," }, { "alpha_fraction": 0.4953271150588989, "alphanum_fraction": 0.5046728849411011, "avg_line_length": 11.46875, "blob_id": "4fec0139009b92cef9ff94afe8d8cf0be281c499", "content_id": "e5485dd7382b1bf271f58791b768cb1c8e717c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 520, "license_type": "no_license", "max_line_length": 48, "num_lines": 32, "path": "/uplooking_Python/code/lesson11-20180520/rbac.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "1.用户登陆\n \n2.db判断用户\n \n\[email protected]_quest\ndef check_login():\n username = request.cookies.get(\"username\")\n\n if username:\n pass\n\n else:\n return render_template('login.html') \n\n\n3.功能点前添加校验\n\n4.调用权限系统\n\n 获取权限\n 获取角色\n 数据库设计:\n user\n role\n priv_point:权限点\n system\n map_user_role\n map_priv_role\n\n 后端代码实现:\n select user from \n\n \n\n\n\n \n\n" }, { "alpha_fraction": 0.5596987009048462, "alphanum_fraction": 0.5638418197631836, "avg_line_length": 26.63541603088379, "blob_id": "b23692ad18d4a270ae81c854ec72b389e94a1858", "content_id": "afe8613e3a8dbe3f0757d304849bc07f0836c940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2783, "license_type": "no_license", "max_line_length": 82, "num_lines": 96, "path": "/uplooking_Python/code/lesson11-20180520/deploy_controller.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom deploy.models.job import Job\nfrom libs.http import net\nfrom config.development import Token, PorjectsUrl, CommitLogUrl, PROJECT_WORK_PATH\nfrom libs.error import ParamError, ServerError\nfrom utils.iokits import pCheck, fTar\nimport os\nimport shutil\nfrom commands import getstatusoutput\nfrom fabric import *\n\nclass Deploy(object):\n header = {\"PRIVATE-TOKEN\": Token}\n @classmethod\n def job_add(cls, *args):\n # model 入库\n return Job.add_job(*args)\n\n @classmethod\n def getVersion(cls, git_url, branch):\n\n allProjects = net.request(PorjectsUrl, cls.header)\n idPrj = projectsHandle(allProjects)\n pid = idPrj.get(git_url)\n if not pid:\n raise (ParamError(\"部署的项目不存在\"))\n\n commitLogs = cls.getProjectCommitLog(pid, branch, header)\n\n @classmethod\n def getProjectCommitLog(cls, pid, branch, header):\n r = net.request(CommitLogUrl%(pid, branch), header)\n print r\n\n @classmethod\n def deploy(cls, git_url, branch, machines):\n '''\n 1. 校验project是否存在\n 2. clone project\n 3. 编译\n 4. 打包\n 5. put到部署的设备\n 6. 解压-部署\n :param git_url:\n :param branch:\n :param meachines:\n :return:\n '''\n project_name = git_url.split(\"/\")[1].split(\".\")[0]\n path = PROJECT_WORK_PATH + project_name\n tar_file = \"/tmp/%s\"%project_name\n if cls._clone(git_url, branch, path):\n fTar(path, tar_file)\n\n cls._put(tar_file+\".tar.gz\", machines)\n\n @classmethod\n def _put(cls,tar_file, machines):\n g = machines.split(\",\")\n for host in g:\n c = Connection(host)\n # 传输文件\n try:\n c.put(tar_file, tar_file)\n except Exception,e:\n raise (ParamError(\"文件传输出错了! err:%s\"%e))\n\n # 解压部署\n try:\n result = c.run(\"/bin/tar xvf %s -C /\"%tar_file)\n except Exception, e:\n raise (ServerError(\"解压异常, err:%s\"%result.stdout))\n\n if result.exited != 0:\n raise (ParamError(\"解压失败!\"))\n\n #启动项目\n\n @classmethod\n def _clone(cls, git_url, branch, path):\n if pCheck(path):\n shutil.rmtree(path)\n clone_cmd = \"/usr/bin/git clone -b %s %s %s\"%(branch, git_url, path)\n status, message = getstatusoutput(clone_cmd)\n print status, message\n if int(status) != 0:\n raise (ServerError(\"项目拉取失败:%s\"%message))\n\n return True\n\n\ndef projectsHandle(projects):\n prjs = {}\n for i in projects:\n prjs[i[\"ssh_url_to_repo\"]] = i[\"id\"]\n return prjs\n\n\n" }, { "alpha_fraction": 0.6484458446502686, "alphanum_fraction": 0.6559485793113708, "avg_line_length": 29.100000381469727, "blob_id": "fbf0bba7c58201bf19ee2f772fb8922cbf2317d5", "content_id": "c781fabc23ff7a38a026081c1321a3c1ca9df6b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 979, "license_type": "no_license", "max_line_length": 95, "num_lines": 30, "path": "/uplooking_Python/code/jenkins.bak/models.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n#coding:utf-8\r\nfrom flask_login import UserMixin\r\nfrom index import db\r\nfrom werkzeug.security import generate_password_hash, check_password_hash\r\n\r\n\r\nclass User(UserMixin, db.Model):\r\n __tablename__ = 'user'\r\n id = db.Column(db.Integer, primary_key=True)\r\n username = db.Column(db.String(255), unique=True, index=True) # unique 代表唯一 , index查询=效率更高\r\n password_hash = db.Column(db.String(255))\r\n\r\n @property\r\n def password(self):\r\n raise AttributeError('不能获取明文密码!')\r\n\r\n @password.setter\r\n def password(self, password):\r\n self.password_hash = generate_password_hash(password)\r\n\r\n def verify_password(self, password):\r\n return check_password_hash(self.password_hash, password)\r\n\r\n def __repr__(self):\r\n return '用户名{}'.format(self.username)\r\n\r\n def __init__(self, username, password):\r\n self.username = username\r\n self.password = password\r\n" }, { "alpha_fraction": 0.6083086133003235, "alphanum_fraction": 0.6112759709358215, "avg_line_length": 20.0625, "blob_id": "8695b1eaab5d0a71c2344eeceeb98d9b974828a3", "content_id": "91046b68c6955b06e894ae61b36d84925c3099e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 38, "num_lines": 16, "path": "/uplooking_Python/code/lesson06/less06/err/error.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass MyException(Exception):\n pass\n\nclass HttpError(MyException):\n def __init__(self, code, message):\n self.code = code\n self.message = message\n\nclass DBError(MyException):\n def __init__(self, code, message):\n self.code = code\n self.message = message\n" }, { "alpha_fraction": 0.555716335773468, "alphanum_fraction": 0.5607814788818359, "avg_line_length": 19.292306900024414, "blob_id": "9267ffbae7468d49427e2ac9acae24c2b254a4a5", "content_id": "26a6b23ca9a5ce9c0c61057e31544979a9dbb25e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 75, "num_lines": 65, "path": "/uplooking_Python/code/flask_myself/static/js/lib/utils.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\nvar public_func = {\r\n\t\r\n\r\n\r\n\t// 子菜单点击\r\n\tsubMenuClick: function () {\r\n\r\n\t\t// 点击子菜单\r\n\t\t$('.js-sub-tabs').off('click.open').on('click.open', 'li', function (e) {\r\n\r\n\t\t\t$(this).parent().find('li').removeClass('active'); //所有子菜单去掉active类\r\n\t\t\t$(this).addClass('active'); //就给该子菜单添加active类\r\n\r\n\t\t});\r\n\r\n\t},\r\n\r\n\t// 全选全不选checkbox\r\n\tcheckboxFun: function(){\r\n\t\tvar _this = this,\r\n\t\t\tfather = $('#bodyList');\r\n\t\t// 点击全选\r\n\t\tfather.off('click.all').on('click.all','.total-check',function(){\r\n\t\t\tfather.find(\".sub-check\").prop(\"checked\",$(this).prop(\"checked\"));\r\n\t\t\t//同步所有的全选按钮\r\n\t\t\tfather.find('.total-check').prop(\"checked\",$(this).prop(\"checked\"));\r\n\r\n\t\t});\r\n\r\n\t\t// 处理单个\r\n\t\tfather.off('click.sin').on('click.sin','.sub-check',function(){\r\n\t\t\tif(!$(this).prop('checked')){\r\n\t\t\t\tfather.find('.total-check').prop(\"checked\",$(this).prop(\"checked\"));\r\n\t\t\t}\r\n\r\n\t\t\t//若在非全选状态下,单个商品依次选中要更新全选按钮状态\r\n\t\t\tif($('.sub-check').length == $('input.sub-check:checked').length){\r\n\t\t\t\t$('.total-check').prop(\"checked\",true);\r\n\t\t\t}\r\n\t\t});\r\n\t},\r\n\r\n\t\r\n\r\n\t\r\n\t// 退出按钮\r\n\texitBtn: function () {\r\n\t\t// 点击退出按钮,退出到登录界面\r\n\t\t$('.exit-btn').off('click').on('click', function (e) {\r\n\t\t\twindow.location.pathname = \"/monitor-web/login.html\";\r\n\t\t})\r\n\r\n\t},\r\n\r\n\tinit: function(){\r\n\t\tvar _this = this;\r\n\r\n\t\t // 调退出按钮,因为每个页面都会有退出功能,所以就在公共函数中调用就好\r\n\t\t_this.exitBtn();\r\n\t\t_this.editbtn();\r\n\t}\r\n};" }, { "alpha_fraction": 0.49570199847221375, "alphanum_fraction": 0.49713465571403503, "avg_line_length": 25.730770111083984, "blob_id": "e80f6a2776542a792fd178a5c6722fae14fb7458", "content_id": "5de434f420ecce35ec3cf1f40ef02d5e55d55b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/uplooking_Python/code/20180107/mingganci.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nuser_input = raw_input(\"你想说什么?\")\nwith open(\"./filtered_words.txt\",\"r+\") as f :\n mgc = f.readlines()\n #循环列表的两种方法\n\n #方法一:\n #for i in range(len(mgc)):\n # new_mgc = mgc[i].strip()\n # #print new_mgc\n # if not new_mgc.strip():\n # new_mgc = mgc[i].strip()\n # if new_mgc in user_input:\n # user_input = user_input.replace(new_mgc,\"**\")\n\n # 方法二:\n for words in mgc:\n new_mgc = words.strip()\n if not words.strip():\n #if words in ['\\n','\\r\\n']:\n continue\n if words:\n if new_mgc in user_input:\n user_input = user_input.replace(new_mgc,\"**\")\nprint user_input\n\n\n\n" }, { "alpha_fraction": 0.46075084805488586, "alphanum_fraction": 0.4778156876564026, "avg_line_length": 13.600000381469727, "blob_id": "ea441769f510c0b321cef3be317438dbb2bc0046", "content_id": "47b89bdd3fb18c6f40835726e03c924fda33a96b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 309, "license_type": "no_license", "max_line_length": 28, "num_lines": 20, "path": "/uplooking_Python/code/lesson03/fz.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nvar = \"no\"\n_var1_ = 'single'\n\ndef _print_one_(self):\n print \"单下划线\"\n\nclass A(object):\n\n __var2 = 'double'\n\n def __print_two__(self):\n print \"双下划线\"\n\nif __name__ == \"__main__\":\n a = A()\n #a._var1_\n print a.__var2\n #a._print_one_()\n #a.__print_two__()\n\n" }, { "alpha_fraction": 0.6291261911392212, "alphanum_fraction": 0.662135899066925, "avg_line_length": 19.479999542236328, "blob_id": "04c53997edbbad88d091eaf0dcc00f7ab020c7a0", "content_id": "9287d29ca1fdb38a28da1956fa5fba9d96dc276c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/uplooking_Python/code/lesson07-flask/my_ops/app.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template\nfrom domains import blue_print as domains_bp\n\nimport MySQLdb\nimport json\n\ndb = MySQLdb.connect(\"localhost\", \"root\", \"123321\", \"python01\",charset='utf8')\n\napp = Flask(__name__)\n\n\napp.register_blueprint(domains_bp,url_prefix='/domains')\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/tree/all')\ndef get_tree_all():\n pass\n \n# @app.route('/domians')\n# def \nif __name__==\"__main__\":\n app.run(host='0.0.0.0',port=8888,debug=True) " }, { "alpha_fraction": 0.7225274443626404, "alphanum_fraction": 0.7609890103340149, "avg_line_length": 39.33333206176758, "blob_id": "de034bfc481292c1c664f653f359ac0bb4d1ba8d", "content_id": "d5033375080413004f6f42f3606ca5af631c8fa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 364, "license_type": "no_license", "max_line_length": 98, "num_lines": 9, "path": "/uplooking_Python/code/jenkins.bak/shell/test-php.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\n. /etc/profile &>/dev/null\n\nmkdir -p /root/jenkins/workspace/icss.baojia.com/src\ncd /root/jenkins/workspace/icss.baojia.com/src\ngit init\ngit rev-parse --is-inside-work-tree\ngit config remote.origin.url http://root:[email protected]/platform-server/icss.baojia.com.git\ngit pull http://root:[email protected]/platform-server/icss.baojia.com.git\n\n" }, { "alpha_fraction": 0.3872727155685425, "alphanum_fraction": 0.38787877559661865, "avg_line_length": 25.206348419189453, "blob_id": "40cb4861102d6bc030744bf445c992303869886f", "content_id": "da3be62179aece6242dbafbfeeeed1c287ab1611", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 209, "num_lines": 63, "path": "/uplooking_Python/code/前端/lesson10-前端day04/test/mDeploy/js/deploy.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function(){\n\n var deploy = {\n// 任务列表\n taskDataGet: function () {\n var _this = this;\n\n // $.get('url', {}, function (response) {\n $.getJSON('../../data/task.json', function(json){\n if(json.code == 1){\n var taskData = json.data.tasks;\n\n _this.taskTableTpl(taskData)\n }\n\n\n });\n },\n\n taskTableTpl:function(data) {\n\n var _this = this,\n str = '';\n\n str += '<thead><tr><th><input type=\"checkbox\"></th><th><button class=\"btn btn-sm btn-warning\">批量发起</button></th><th>任务列表</th><th>服务组</th><th>工作组</th><th>环境</th><th>任务状态</th></tr></thead><tbody>'; \n \n $.each(data,function(index,value){\n str += '<tr><td><input type=\"checkbox\"></td>\\\n <td><button class=\"btn btm-sm btn-warning js-lanuch-task mt-curosr\" data-name=\"' + value.name +'\">发起 </button></td>\\\n <td>'+ value.name +' </td>\\\n <td>'+ value.serverGroup +' </td>\\\n <td>'+ value.jobGroup +' </td>\\\n <td>'+ value.envrioment +' </td>\\\n <td>'+ value.restart +'</td></tr>';\n \n \n });\n\n str += '</tbody>';\n $('#bodyList').html(str);\n _this.lanuchTask();\n \n },\n \n\n init:function() {\n var _this = this;\n\n _this.taskDataGet();\n // _this.addTaskItem();\n \n\n\n }\n\n };\n\n\n deploy.init()\n \n\n\n})" }, { "alpha_fraction": 0.6174496412277222, "alphanum_fraction": 0.6208053827285767, "avg_line_length": 18.866666793823242, "blob_id": "9be038468b31c9affc9e50d8233b805a924a1ccf", "content_id": "8bc2a821d9c288c17597aa75724a98b228e512dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/uplooking_Python/code/20180107/get_file_size_v2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nimport os\n\nfrom os.path import join,getsize\n\nfor root,dirs,files in os.walk('/home/f/mysite'):\n # print root,\n # print dirs,\n # print files\n\n for file in files:\n files= os.path.join(root,file)\n #print files\n size = getsize(files)\n print files,size\n" }, { "alpha_fraction": 0.4601227045059204, "alphanum_fraction": 0.5092024803161621, "avg_line_length": 12.5, "blob_id": "64485be0dfdb1421fd4443edfa00c5735f61a0d2", "content_id": "7aef33f133a2756c889222d4591fc4d6c0c91488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/uplooking_Python/code/20180107/student_score.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n'''\n判断学生成绩\n\n'''\n\nwhile True:\n score = int (raw_input(\"请输入学生成绩:\"))\n if 90<=score<=100:\n print \"A\"\n elif score>=80:\n print \"B\"\n\n" }, { "alpha_fraction": 0.6313415765762329, "alphanum_fraction": 0.6730552315711975, "avg_line_length": 20.794872283935547, "blob_id": "d8890f06257b676b20c749d12327fe0377462ae9", "content_id": "fe33aa7a2927155edf3b72ee578793264fd446f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 945, "license_type": "no_license", "max_line_length": 110, "num_lines": 39, "path": "/fishc/translation.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\r\n'''\r\nCreated on 2017年7月28日\r\n\r\n@author: Ops\r\n'''\r\n\r\n\r\nimport urllib.request\r\nimport urllib.parse\r\nimport json\r\n\r\ncontent = input(\"请输入要翻译的内容:\")\r\nurl =\"http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule&sessionFrom=http://www.youdao.com\"\r\n\r\n#创建字典存放表单数据\r\ndata = {}\r\n\r\ndata['i']=content\r\n#data['i']='啊'\r\ndata['from']='AUTO'\r\ndata['to']='AUTO'\r\ndata['smartresult']='dict'\r\ndata['client']='fanyideskweb'\r\ndata['salt']='1501061539231'\r\ndata['sign']='d3a9c2817d53b954a8a8788d30c770a0'\r\ndata['doctype']='json'\r\ndata['version']='2.1'\r\ndata['keyfrom']='fanyi.web'\r\ndata['action']='FY_BY_CL1CKBUTTON'\r\ndata['typoResult']='true'\r\n\r\ndata = urllib.parse.urlencode(data).encode('utf-8')\r\nresponse = urllib.request.urlopen(url,data)\r\n\r\nhtml = response.read().decode('utf-8')\r\ntarget = json.loads(html)\r\n#print(html)\r\nprint(\"翻译结果:%s\" %(target['translateResult'][0][0]['tgt']))" }, { "alpha_fraction": 0.5605749487876892, "alphanum_fraction": 0.5831621885299683, "avg_line_length": 43.272727966308594, "blob_id": "0b6403948119cb90a391cf0d2bc9be3d99829683", "content_id": "a7b19c8c60ee50654b68acec723b337525adb8b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 573, "license_type": "no_license", "max_line_length": 97, "num_lines": 11, "path": "/uplooking_Python/code/lesson03/get_server_info.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#script get_server_info.sh\n#获取硬件信息:IP、主机名、操作系统、cpu核数、内存、硬盘\nip=`ifconfig -a | grep inet | grep -v 127.0.0.1 | grep -v inet6 | awk '{print $2}' | tr -d \"地址:\"`\nos=`lsb_release -a | grep Description | awk -F: '{print $2}' | sed 's/^[\\t]*//g'`\ncpu=`grep 'cpu cores' /proc/cpuinfo | uniq | awk -F: '{print $2}' | sed 's/^[ \\t ]*//g'`\nMem=`cat /proc/meminfo | grep 'MemTotal' | awk -F: '{print $2}' | sed 's/^[ \\t ]*//g'`\necho IP地址:$ip\necho 操作系统:$os\necho cpu核数:$cpu\necho 内存大小:$Mem\n" }, { "alpha_fraction": 0.5430463552474976, "alphanum_fraction": 0.6423841118812561, "avg_line_length": 17.875, "blob_id": "ba51bcbaf98db2f79467b33a6a1f54965098bfd3", "content_id": "13eaeb29cdf097c55272a8cb6bc80781e2a13ba9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/uplooking_Python/code/lesson06/less06/db/conf/config.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "DB_HOST = \"127.0.0.1\"\nDB_PORT = 33068\nDB_NAME = \"python\"\nDB_USER = \"root\"\nDB_PASSWD = \"\"\nDB_CONNECT_TIMEOUT = 10\nDB_CHARSET = \"utf8\"\nTIMEOUT_TIMES = 3\n" }, { "alpha_fraction": 0.6798825263977051, "alphanum_fraction": 0.6813509464263916, "avg_line_length": 25.19230842590332, "blob_id": "fec80dbe34e01305381f0998d82e71279279f320", "content_id": "cdcebde1bd66fa900557f290869a0dbb0e7d4ce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 64, "num_lines": 26, "path": "/uplooking_Python/code/lesson05/montorDomains_bak/logger/logger.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport json\nfrom conf.config import app_log_path\n\n#LOG_FILE = \"../log/app.log\"\ndef get_formatter():\n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(json.dumps(fmt))\n return formatter\n\ndef get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n formatter = get_formatter()\n ch = logging.FileHandler(app_log_path)\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\napp_logger = get_logger(\"montor_domain\")\ndb_logger = get_logger(\"db\")\n\napp_logger.info(\"lsa\")\n" }, { "alpha_fraction": 0.4680493175983429, "alphanum_fraction": 0.47197309136390686, "avg_line_length": 30.14414405822754, "blob_id": "b3a655fce8f229e081c428810bc7fa031f43edef", "content_id": "37c67bd480b34b48960f7e2220e2dbd45bc1a91e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7584, "license_type": "no_license", "max_line_length": 129, "num_lines": 222, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/今天做的项目/monitor/js/util.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by Administrator on 2018/3/25.\r\n */\r\nvar public_func = {\r\n defaultData:{\r\n subMenu:1,\r\n selTreeNodeId:-1\r\n },\r\n\r\n // 左侧的树\r\n beforeDrag: function (treeId, treeNodes) {\r\n return false;\r\n },\r\n\r\n // 查找节点,\r\n onTreeClick: function (event, treeId, treeNode) {\r\n var _this = this;\r\n // 给公共变量赋予节点id\r\n public_func.defaultData.selTreeNodeId = treeNode.id;\r\n\r\n return treeNode;\r\n\r\n },\r\n beforeEditName: function (treeId, treeNode) {\r\n var _this = this;\r\n _this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n var zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n zTree.selectNode(treeNode);\r\n zTree.editName(treeNode);\r\n\r\n return false;\r\n },\r\n beforeRemove: function (treeId, treeNode) {\r\n var _this = this;\r\n _this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n var zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n zTree.selectNode(treeNode);\r\n return public_func.delCheck();\r\n },\r\n delCheck: function () {\r\n var _this = this;\r\n\r\n },\r\n beforeRename: function (treeId, treeNode, newName, isCancel) {\r\n var _this = this;\r\n _this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n if (newName.length == 0) {\r\n setTimeout(function () {\r\n var zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n zTree.cancelEditName();\r\n alert(\"节点名称不能为空.\");\r\n }, 0);\r\n return false;\r\n }\r\n return true;\r\n },\r\n\r\n // 删除节点\r\n onRemove: function (e, treeId, treeNode) {\r\n var _this = this;\r\n\r\n var testDelNodeApi = 'http://www.nodetest.com/node/removeitem', // 删除节点的接口,改成你自己的\r\n param = {};\r\n\r\n param.thisId = treeNode.id; // 删除节点的id\r\n\r\n //删除节点传的数据即param,结构是 {thisId: 13}\r\n $.post(testDelNodeApi, param, function(response){\r\n\r\n })\r\n\r\n },\r\n\r\n // 编辑节点\r\n onRename: function (e, treeId, treeNode, isCancel) {\r\n var _this = this;\r\n var testEditNodeApi = 'http://www.nodetest.com/node/edititem', // 编辑节点的接口,改成你自己的\r\n param = {};\r\n\r\n param.thisId = treeNode.id; // 该编辑节点的id\r\n param.newName = treeNode.name; //修改后的新名称\r\n\r\n //删除节点传的数据即param,结构是 {thisId: 13, newName:'修改后的名称'}\r\n $.post(testEditNodeApi, param, function(response){\r\n\r\n location.reload();\r\n })\r\n\r\n },\r\n showRemoveBtn: function (treeId, treeNode) {\r\n\r\n return !(treeNode.id == -1);\r\n },\r\n showRenameBtn: function (treeId, treeNode) {\r\n\r\n return true;\r\n },\r\n\r\n // 添加节点\r\n addHoverDom: function (treeId, treeNode) {\r\n var _this = this;\r\n var newCount = 1;\r\n var sObj = $(\"#\" + treeNode.tId + \"_span\");\r\n if (treeNode.editNameFlag || $(\"#addBtn_\" + treeNode.tId).length > 0) return;\r\n var addStr = \"<span class='button add' id='addBtn_\" + treeNode.tId\r\n + \"' title='add node' data-toggle='modal' data-target='#exampleModal' data-for='添加' onfocus='this.blur();'></span>\";\r\n\r\n sObj.after(addStr);\r\n var addBtn = $(\"#addBtn_\" + treeNode.tId);\r\n if (addBtn) {\r\n // addBtn.bind(\"click\", function(){\r\n // \tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n // \tzTree.addNodes(treeNode, {id:(100 + newCount), pId:treeNode.id, name:\"new node\" + (newCount++)});\r\n // \treturn false;\r\n // });\r\n\r\n $('#exampleModal').on('show.bs.modal', function (event) {\r\n var button = $(event.relatedTarget);\r\n var recipient = button.data('for');\r\n var modal = $(this);\r\n\r\n if (recipient == '添加') {\r\n modal.find('.modal-title').text('添加节点');\r\n var strTpl = '';\r\n strTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">节点名称:</label>\\\r\n\t\t\t\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-node-box\">\\\r\n\t\t\t\t\t\t\t\t </div>';\r\n\r\n modal.find('form').html(strTpl);\r\n\r\n var submitBtn = modal.find('#confirmBtn');\r\n\r\n submitBtn.off('click').on('click', function () {\r\n var zTree = $.fn.zTree.getZTreeObj(\"treelist\"),\r\n addName = $('#add-node-box').val();\r\n zTree.addNodes(treeNode, {id: (100 + newCount), pId: treeNode.id, name: addName});\r\n\r\n var testAddNodeApi = 'http://www.nodetest.com/node/additem', // 添加节点的接口url,改成你自己的\r\n param = {};\r\n param.parentId = treeNode.id; // 父节点id\r\n param.thisName = addName; // 该新增节点名称\r\n\r\n // 访问传的参数结构即param变量,例如{parentId:13, thisName:'新增节点1'}\r\n $.post(testAddNodeApi, param, function (response) {\r\n if(response.code == 0){\r\n // response中需要你返回给我新的节点的id\r\n location.reload();\r\n }\r\n\r\n\r\n });\r\n modal.modal('hide');\r\n });\r\n }\r\n })\r\n }\r\n },\r\n\r\n removeHoverDom: function (treeId, treeNode) {\r\n $(\"#addBtn_\" + treeNode.tId).unbind().remove();\r\n },\r\n treeList: function () {\r\n var _this = this,\r\n r;\r\n var setting = {\r\n view: {\r\n addHoverDom: _this.addHoverDom,\r\n removeHoverDom: _this.removeHoverDom,\r\n selectedMulti: false\r\n },\r\n edit: {\r\n enable: true,\r\n editNameSelectAll: true,\r\n showRemoveBtn: _this.showRemoveBtn,\r\n showRenameBtn: _this.showRenameBtn\r\n },\r\n data: {\r\n simpleData: {\r\n enable: true\r\n }\r\n },\r\n callback: {\r\n beforeDrag: _this.beforeDrag,\r\n onClick: _this.onTreeClick,\r\n beforeEditName: _this.beforeEditName,\r\n beforeRemove: _this.beforeRemove,\r\n beforeRename: _this.beforeRename,\r\n onRemove: _this.onRemove,\r\n onRename: _this.onRename\r\n }\r\n };\r\n\r\n $.getJSON('../../data/protocolTree.json', function (response) {\r\n var zNodes = response.data.agreement;\r\n\r\n $.fn.zTree.init($(\"#treelist\"), setting, zNodes);\r\n }, \"json\");\r\n\r\n },\r\n\r\n menuClick: function(){\r\n var _this = this;\r\n\r\n // 主菜单和子菜单点击事件\r\n $('.sub-menu').off('click').on('click','li', function(e){\r\n var thisBtn = $(this);\r\n // 点击菜单后先清除所有li的active当前选中类\r\n thisBtn.parent().find('li').removeClass('active');\r\n // 然后给该选中菜单添加选中状态\r\n thisBtn.addClass('active');\r\n\r\n _this.defaultData.subMenu = $(this).data('id');\r\n\r\n\r\n })\r\n\r\n }\r\n};\r\n" }, { "alpha_fraction": 0.8415841460227966, "alphanum_fraction": 0.8415841460227966, "avg_line_length": 33, "blob_id": "5ee7c768ac36472bd7b4c38fb5b41318b6411222", "content_id": "61072281db3debd3f30463ea88cc5e81649b71b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/models/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from db_tree import MapTree\nfrom tree_host_relation import TagsHostRelation\nfrom hosts import Hosts" }, { "alpha_fraction": 0.574638843536377, "alphanum_fraction": 0.6099518537521362, "avg_line_length": 16.382352828979492, "blob_id": "3f32813ea5a63cf5645f78b542ac95ff132134a9", "content_id": "49dcd55af24e9fe94c1177638a31d201e6787996", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 49, "num_lines": 34, "path": "/uplooking_Python/code/lesson04/guess_num_v3.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nauthor:wolfrg\r\ndate:201712.25\r\n功能:给出一个随机数整数,让用户去猜,猜对退出,并统计用户猜的次数\r\n'''\r\n\r\nimport random\r\nnum = random.randint(1,100)\r\nbig_count = 0\r\nlow_count = 0\r\nright_count = 0 \r\n\r\nwhile True:\t\r\n\t#num = random.randint(1,100)\r\n\tguess = int(input('Please input a int number:'))\r\n\r\n\tif guess > num:\r\n\t\t#print('猜大了 ')\r\n\t\tbig_count += 1\r\n\t\tprint('猜大了%d次' % big_count)\r\n\telif guess < num:\r\n\t\t#print('猜小了')\r\n\t\tlow_count += 1\r\n\t\tprint('猜小了%d次' % low_count)\r\n\t\r\n\r\n\telse:\r\n\t\tprint('恭喜你,猜对了')\r\n\t\tright_count += 1\r\n\t\tprint('你一共猜对了%d次' % right_count)\r\n\t\tbreak\r\n\t\r\ntotal_count = right_count + big_count + low_count\r\nprint('你一共猜了%d' % total_count)" }, { "alpha_fraction": 0.5785033106803894, "alphanum_fraction": 0.5825385451316833, "avg_line_length": 25.42718505859375, "blob_id": "c998cf1cc7498f58fba271d20f389be533aa9f31", "content_id": "2950a94d94667c8149fcd23b9d2c19ab2708df31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2922, "license_type": "no_license", "max_line_length": 91, "num_lines": 103, "path": "/uplooking_Python/code/lesson08-flask/ops/hosts/controller/hostHandleController.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom hosts import HostHandleModle, TreeHandle\nfrom servicetree import TagsHostRelation\nfrom libs.error import ParamError, BindError\n\nclass HostHandle(object):\n\n def __init__(self):\n pass\n\n @classmethod\n def host_add(cls, *args):\n '''\n :param hostname:\n :param host_type:\n :param ip:\n :param location:\n :return:\n '''\n # hostinfo = cls.parser(assests)\n addResult = HostHandleModle.host_add(*args)\n return addResult\n\n @classmethod\n def host_bind(cls, hostnames, tagstring):\n '''\n 1. 不允许绑定到顶级节点pid = 0 的节点\n 2. 绑定前要校验,资产是否已经存在了\n :param tagstring: 产品线 cop.uplooking_dep.sys_pdl.ops\n :param hostnames: 绑定的主机名\n :return:\n '''\n\n hostnameList = []\n if isinstance(hostnames, str):\n hostnameList.append(hostnames)\n if isinstance(hostnames, unicode):\n hostnameList.append(str(hostnames))\n if isinstance(hostnames, list):\n hostnameList = hostnames\n\n bindInfo = {}\n for hostname in hostnameList:\n bindResult = cls.exec_bind(hostname, tagstring)\n if not bindResult:\n bindInfo[hostname] = \"failed\"\n else:\n bindInfo[hostname] = \"sucess\"\n\n print \">>>>bindInfo:\", bindInfo\n return bindInfo\n\n\n @classmethod\n def exec_bind(cls, hostname, tagstring):\n tagId = query_tagstring_id(tagstring)\n hostId = query_host_id(hostname)\n if hostId == 0:\n return False\n bindId = TagsHostRelation.bind(tagId, hostId)\n\n if bindId > 0:\n return True\n return False\n\n\n @classmethod\n def parser(cls, assests):\n '''\n :param params: 主机信息参数\n :return: 主机列表[hostname, type, ip, location]\n '''\n\n host_name = assests[\"hostname\"]\n host_type= assests[\"type\"]\n host_ip = assests[\"ip\"]\n host_loc = assests[\"location\"]\n\n return host_name, host_type, host_ip, host_loc\n\ndef query_tagstring_id(tagstring):\n # 产品线 cop.uplooking_dep.sre_pdl.ops\n\n nodes = tagstring.split(\"_\")\n tagId = 0\n # 获取最后一级Node的Pid\n for i in range(len(nodes)):\n type_name = nodes[i].split(\".\")\n if len(type_name) < 2:\n raise (ParamError(\"传入的tag有误请检查后重试!\"))\n tagId = TreeHandle.queryTagIdByPidAndNameAndType(type_name[0], type_name[1], tagId)\n if tagId == 0:\n raise (BindError(\"不允许绑定到根节点,或您的tagstring不存在!\"))\n return tagId\n\n\ndef query_host_id(hostname):\n ''' 通过主机名获取对应id\n :param hostname: 主机名\n :return: hostid\n '''\n return HostHandleModle.queryIdByHostname(hostname)\n\n\n\n\n" }, { "alpha_fraction": 0.44142258167266846, "alphanum_fraction": 0.45188283920288086, "avg_line_length": 22.899999618530273, "blob_id": "ce6898c463fd263d9195ed9f719ba73a80f10d32", "content_id": "fc60b38beda8168a640896ff0a25210a20cde0d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 956, "license_type": "no_license", "max_line_length": 92, "num_lines": 40, "path": "/uplooking_Python/code/前端/lesson10-前端day04/test/mTree/js/tree.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function(){\n var tree = {\n\n getTreeNodeId:function(){\n var _this = this;\n\n var nodeId=public_func.defaultData.selTreeNodeId;\n\n },\n\n controlSilde: function(){\n var _this = this;\n\n $('.js-slide-btn').off('click').on('click', function(e){\n\n if($(this).hasClass('icon-chevron-left')){\n $('.float-tree').animate({left: '-230px'},900);\n $(this).removeClass('icon-chevron-left').addClass('icon-chevron-right');\n }else{\n $('.float-tree').animate({left: '0px'},900);\n $(this).removeClass('icon-chevron-right').addClass('icon-chevron-left');\n }\n })\n },\n\n init: function(){\n var _this =this;\n\n _this.getTreeNodeId();\n _this.controlSilde();\n\n public_func.treeList();\n\n }\n };\n\n tree.init();\n\n\n})\n" }, { "alpha_fraction": 0.5263446569442749, "alphanum_fraction": 0.5274423956871033, "avg_line_length": 23.280000686645508, "blob_id": "b39cecbe361de3fd78413ebb5ccbc5d0ba9b94f6", "content_id": "7804d59bda6f32ea0ab900346fd5f9f476f5ad0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1852, "license_type": "no_license", "max_line_length": 101, "num_lines": 75, "path": "/uplooking_Python/code/lesson06/less06/db/db.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom conf.config import DB_HOST, DB_PORT, DB_USER, DB_NAME, DB_PASSWD, DB_CONNECT_TIMEOUT, DB_CHARSET\nfrom conf.config import TIMEOUT_TIMES\nfrom errors.error import HttpError, DBError\n\nimport MySQLdb\n\nclass DB(object):\n\n def __init__(self):\n self.conn = None\n\n def _get_conn(self):\n #返回一个连接\n try:\n conn = MySQLdb.connect(\n host=DB_HOST,\n port=DB_PORT,\n db=DB_NAME,\n user= DB_USER,\n passwd=DB_PASSWD,\n connect_timeout=DB_CONNECT_TIMEOUT,\n charset=DB_CHARSET\n )\n conn.autocommit(True)\n except Exception, e:\n print \"数据连接异常:%s\"%e\n\n self.conn = conn\n return self.conn\n\n\n def retry(self):\n cursor = None\n for i in range(TIMEOUT_TIMES):\n self._get_conn()\n if self.conn:\n return self.conn.cursor()\n time.sleep(1)\n print \"超时拉\"\n return cursor\n\n def execute(self, sql, *args):\n cursor = self.retry()\n cursor.execute(sql, args)\n return cursor\n\n def query_all(self, *args):\n cur = self.execute(*args)\n result = cur.fetchall()\n cur and cur.close()\n return result\n\n\n\nclass MasterDB(DB):\n def __init__(self):\n pass\n\n def update(self, *args):\n cursor = self.execute(*args)\n cursor and cursor.close()\n return cursor.rowcount\n\n def insert(self, *args):\n cursor = self.execute(*args)\n cursor and cursor.close()\n return cursor.lastrowid\n\n def delete(self, *args):\n cursor = self.execute(*args)\n cursor and cursor.close()\n return cursor.rowcount\n\n" }, { "alpha_fraction": 0.5239506363868713, "alphanum_fraction": 0.5239506363868713, "avg_line_length": 30.174602508544922, "blob_id": "6de8dd3dfbd309a37a0fd900ef7afc23a6e1ca1f", "content_id": "f5552a6b3cb469e116f69287c7793ae33cbe7afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2043, "license_type": "no_license", "max_line_length": 68, "num_lines": 63, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/popup/PopupView.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'use strict';\r\nvar PopupView = {\r\n changeSubmitDisplayByType : function() {\r\n },\r\n showCategoryTreeFromLoading : function() {\r\n PopupView.hideCategoryLoading();\r\n PopupView.switchCategoryTreeVisible(true);\r\n },\r\n showCategoryLoading : function(msg) {\r\n var categoryLoading = $('#category_loading');\r\n categoryLoading.show();\r\n },\r\n hideCategoryLoading : function() {\r\n $('#category_loading').hide();\r\n },\r\n showClipFailure : function(msg) {\r\n var errPageTip = $('#errorpage_tip');\r\n $('#waiting_div').hide();\r\n errPageTip.show();\r\n// $('#errorpage_tip label').html(msg);\r\n errPageTip.find('label').html(msg);\r\n },\r\n showLoginError : function(msg) {\r\n $('#wiz_login').show();\r\n $('#wiz_clip_detail').hide();\r\n $('#div_error_validator').html(msg);\r\n $('#waiting').hide();\r\n },\r\n showWaiting : function(msg) {\r\n $('#waiting').show();\r\n $('#waiting-label').html(msg);\r\n $('#wiz_login').hide();\r\n $('#wiz_clip_detail').hide();\r\n },\r\n showLogin : function() {\r\n $(\"#waiting\").hide();\r\n $(\"#wiz_login\").show();\r\n $('#user_id').focus();\r\n $(\"#wiz_clip_detail\").hide();\r\n },\r\n hideCategoryTreeAfterSelect : function(display) {\r\n $(\"#category_info\").html(display);\r\n $(\"#ztree_container\").removeClass('active');\r\n },\r\n hideCreateDiv : function() {\r\n $('#waiting_div').hide();\r\n },\r\n hideLogoffDiv: function () {\r\n $('#loginoff_div').hide();\r\n },\r\n switchCategoryTreeVisible: function (isActive) {\r\n var ztreeContainer = $('#ztree_container');\r\n\r\n if (ztreeContainer.hasClass('first')) {\r\n //初次加载时,不展开\r\n ztreeContainer.removeClass('first');\r\n } else if (!isActive && ztreeContainer.hasClass('active')) {\r\n ztreeContainer.removeClass('active');\r\n } else {\r\n ztreeContainer.addClass('active');\r\n }\r\n }\r\n};" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6617646813392639, "avg_line_length": 16, "blob_id": "cd85eb9cfea50f63b934645354d35d5ac6122470", "content_id": "02583907007f4a45bef2e46bab030e43c9dd639d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/uplooking_Python/code/lesson05/myapps/monitor/conf/config.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#coding:utf8\n\napp_log_path = \"./logs/app.log\"\n" }, { "alpha_fraction": 0.53226637840271, "alphanum_fraction": 0.5379452705383301, "avg_line_length": 22.212499618530273, "blob_id": "f5dc4762aa934eb993a99549f7f615dc70d87e50", "content_id": "76cff21d807c5f8d78e76e56f0257492e99fa0df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2407, "license_type": "no_license", "max_line_length": 246, "num_lines": 80, "path": "/uplooking_Python/code/前端/lesson10-前端day04/monitor-web1/mTree/js/serviceTree.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function(){\r\n\r\n\t// 定义一个大对象,包含该页面要用到的所有的方法\r\n\tvar tree = {\r\n\t\tsaveData: { // 保存公共变量的一个对象\r\n\t\t\tpageNum:1, // 保存页码数\r\n\t\t\tsubmenuId: 0 // 保存菜单id\r\n\t\t},\r\n\r\n\t\t// 调接口,获取表格数据\r\n\t\tgetTableData: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\t// 给后台传参数\r\n\t\t\tparam.nodeId = public_func.defaultSet.selTreeNodeId // 传节点id\r\n\t\t\tparam.page = _this.saveData.pageNum; // 传页码数\r\n\t\t\tparam.submenu = _this.saveData.submenuId; // 传子菜单id\r\n\r\n\t\t\t// $.get('接口', param, function(response){ // domain列表接口\r\n\t\t\t$.getJSON('../../data/zhuji.json', function(response){\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t\t\tvar data = response.data.detail; // 从接口中得到列表数据\r\n\t\t\t\t\t_this.tableTpl(data); // 传给拼接列表模板的函数,拼接列表\r\n\r\n\t\t\t\t\t_this.pageSel(); // 调页码选择的函数\r\n\r\n\t\t\t\t}\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\t//拼接列表\r\n\t\ttableTpl: function (data) {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\t\t\t// 拼接列表头部\r\n\t\t\tstr += '<thead><th><td>主机名</td><td>IP</td><td>宿主机</td><td>机房</td><td>状态</td><td>套餐</td><td>tags</td></th></thead><tbody>';\r\n\r\n\t\t\t// 拼接列表主体\r\n\t\t\t$.each(data, function(index, value){\r\n\t\t\t\tstr += '<tr><td>' + (index + 1) + '</td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td><td>' + value.tags + '</td></tr>';\r\n\r\n\t\t\t});\r\n\r\n\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t// 将拼好的列表放入父元素中,生成列表\r\n\t\t\t$('#bodyList').html(str);\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\r\n\t\t\t\t_this.saveData.pageNum = $(this).text(); // 将点击后选择的页码数赋值给公共变量\r\n\r\n\t\t\t\t_this.getTableData(); // 调列表数据获取函数,将新一页的数据展示出来\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\t// 入口函数\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tpublic_func.treeList(); // 调公共方法中的树\r\n\t\t\t_this.getTableData(); // 调图表显示方法\r\n\r\n\t\t\tpublic_func.subMenuClick(); // 调公共中子菜单点击函数\r\n\t\t\tpublic_func.leftTreeAni(); // 调左侧树动画\r\n\t\t}\r\n\t};\r\n\r\n\ttree.init(); // 调入口函数\r\n})\r\n" }, { "alpha_fraction": 0.42326074838638306, "alphanum_fraction": 0.42804035544395447, "avg_line_length": 27.571428298950195, "blob_id": "2e41f7045b71759f576178b3fb02b059929fc8d7", "content_id": "c9eaa2aec3d59c4ad86de4c3ac6d1a85635be95e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1977, "license_type": "no_license", "max_line_length": 64, "num_lines": 63, "path": "/python_test/myPython/my_rsync.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年8月31日\r\n\r\n@author: Ops\r\n'''\r\nimport os\r\nimport json\r\n\r\n\r\np='F:\\\\Python\\\\zm-mobile-h5-spa'\r\npdir=os.listdir(p) \r\n\r\ndef my_rsync(p,filename):\r\n \r\n #需要复制的目录\r\n cpdir='src'\r\n srcname = os.path.join(p,filename)\r\n print (srcname)\r\n \r\n if os.path.isdir(srcname):\r\n os.chdir(srcname)\r\n #print('进入:' + os.getcwd())\r\n if os.path.isfile(os.path.join(srcname,'package.json')):\r\n #print(srcname + \"目录:存在package.json文件\")\r\n with open('package.json','r') as f:\r\n lines = f.read(-1) \r\n try:\r\n text=json.loads(lines)\r\n #print(text)\r\n except ValueError:\r\n print(' json解析失败')\r\n \r\n else:\r\n print('json解析成功')\r\n \r\n if text.get(\"scripts\",()).get(\"build\"):\r\n #print(text.get(\"scripts\",('aa')))\r\n #print(text.get(\"scripts\"))\r\n #print('package.json文件中包含build字段')\r\n cpdir='dist'\r\n #os.system('npm install')\r\n #print ('执行命令npm install')\r\n \r\n if(os.path.isdir(os.path.join(srcname,cpdir))):\r\n dstPath='F:\\\\Python\\\\spa\\\\'\r\n dstPath=os.path.join(dstPath,filename)\r\n \r\n if(not os.path.isdir(dstPath)):\r\n os.makedirs(dstPath)\r\n \r\n srcPath=os.path.join(srcname,cpdir)\r\n print(srcPath)\r\n #windows命令格式 \r\n comm='xcopy /y /E ' + srcPath + ' ' + dstPath \r\n #Linux命令格式\r\n #comm='cp -a ' + srcPath + ' ' + dstPath\r\n #print (comm)\r\n os.system(comm)\r\n \r\n\r\n\r\nfor filename in pdir:\r\n my_rsync(p, filename) \r\n \r\n \r\n " }, { "alpha_fraction": 0.6029309034347534, "alphanum_fraction": 0.6154919862747192, "avg_line_length": 26.538461685180664, "blob_id": "0e4584c9cfef836781ddd55f1d63c8aede2fe5db", "content_id": "f903047e9df53ab94f6a6db5ce7a644302be3cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1559, "license_type": "no_license", "max_line_length": 91, "num_lines": 52, "path": "/uplooking_Python/code/lesson05/monitor_test.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n#监控域名到期时间\n\nimport commands\nimport re\nimport datetime\n\ndef get_register_time():\n\n reg = r'Registration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2})'\n status,register = commands.getstatusoutput('whois gaiay.net.cn')\n if status == 0:\n register = re.search(reg,register).group(1)\n return register\n\n\ndef get_expire_time():\n\n reg = r'Expiration\\sTime:\\s(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2})'\n status,output = commands.getstatusoutput('whois gaiay.net.cn')\n if status == 0:\n expire = re.search(reg,output).group(1)\n return expire\n\n\ndef get_update_time():\n\n reg = r'Updated\\sDate:\\s\\d{4}-\\d{2}-\\d{2}'\n status,update = commands.getstatusoutput('whois baidu.com')\n if status == 0 :\n update = re.search(reg,update)\n return update.group()\n else:\n print \"No match domain\"\n\ndef get_days():\n\n domain_name='gaiay.net.cn'\n expire_time = get_expire_time() #调用域名到期时间的函数\n register = get_register_time() #调用获取注册时间的函数\n now = datetime.datetime.now() #获取当前的时间\n today = now.strftime('%Y-%m-%d %H:%M:%S')\n expire_day = datetime.datetime.strptime(expire_time,'%Y-%m-%d %H:%M:%S')\n today = datetime.datetime.strptime(today,'%Y-%m-%d %H:%M:%S')\n #print expire_day\n #print today\n delta = (expire_day-today).days\n print \"域名:%s,注册时间:%s,到期时间:%s,截止目前域名有效天数还剩%s天\"% (domain_name,register,expire_time,delta)\n\n\nif __name__ == '__main__':\n get_days()\n\n" }, { "alpha_fraction": 0.6092066764831543, "alphanum_fraction": 0.6190009713172913, "avg_line_length": 25.153846740722656, "blob_id": "3c5f352469624b3ef258b988ac524c0a289530a4", "content_id": "f19915f62c9bfed265858b14206891919b7815fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 75, "num_lines": 39, "path": "/uplooking_Python/code/lesson09-flask/ops/libs/error/errors.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys, traceback\nfrom flask import make_response\n\ndef error_handler(error):\n if isinstance(error, Error):\n return make_response(error.message, error.code)\n else:\n exc_type, exc_value, exc_msg = sys.exc_info()\n exc_info = traceback.format_exception(exc_type, exc_value, exc_msg)\n return make_response(exc_info, 500)\n\n\nclass Error(Exception):\n code = 500\n message = \"\"\n def __init__(self, message):\n self.code = 500\n self.message = message\n\nclass DBError(Error):\n def __init__(self, message = \"数据库错误\"):\n self.message = message\n\nclass HTTPError(Error):\n def __init__(self, message = \"http请求错误\"):\n self.message = message\n\nclass ParamError(Error):\n def __init__(self, message = \"参数有误\"):\n self.message = message\n\nclass BindError(Error):\n def __init__(self, message = \"绑定有误\"):\n self.message = message\n\nclass ServerError(Error):\n def __init__(self, message = \"参数有误\"):\n self.message = message\n\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 44.66666793823242, "blob_id": "91a975540f92875062890f7e8ba053758a2e4f29", "content_id": "f443da625bf5f3ab95b0f39e1782307f2fc4c541", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 95, "num_lines": 3, "path": "/uplooking_Python/code/lesson09-flask/ops/deploy/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nblue_print = Blueprint(\"deploy\", __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views" }, { "alpha_fraction": 0.4226289391517639, "alphanum_fraction": 0.42595672607421875, "avg_line_length": 21.22222137451172, "blob_id": "1c5cb1855309662740769da311af3bf0b3471132", "content_id": "7b61ef9c6fdecea0e381da747c4e00ca36ef8cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 29, "num_lines": 27, "path": "/uplooking_Python/code/lesson05/myapps/monitor/conf/exprie_domains.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nexprie_domains = [\n 'google.com.sg',\n 'google.com.hk',\n 'google.fr',\n 'bigvyy.cn',\n 'bivyy.xyz',\n 'mi-idc.commiui.com',\n 'gaiay.net.cn',\n 'miliao.com',\n 'tjqonline.cn',\n 'xiaomi.tw',\n 'hada.me',\n 'wlimg.cn',\n 'aleenote.com',\n 'alinotes.cn',\n 'x9m.cn',\n 'midoujiang.com',\n 'duokan.com',\n 'mi-ae.cn',\n 'mi-ae.net',\n 'zhimi.com',\n 'mizhuanqian.com',\n 'miot-spec.org',\n 'gancuidai.com',\n ]\n\n" }, { "alpha_fraction": 0.4891826808452606, "alphanum_fraction": 0.5456730723381042, "avg_line_length": 19.073171615600586, "blob_id": "d9fc0eedf95cf555cc91f168ec2fb092dde921f1", "content_id": "4623db981d0918fa872be017c8db7ddb509ce9bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1104, "license_type": "no_license", "max_line_length": 55, "num_lines": 41, "path": "/uplooking_Python/code/lesson04/binary_search.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#encoding:utf8\n#author:wolfrg\n#date:20180103\n#func:二分查找\n\n'''\n二分搜索也称折半搜索,是一种在有序数组中查找某一特定元素的搜索算法\n\n'''\n\n#定义一个列表\nL = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\nprint L\nprint \"序列的长度为 %d \" % len(L)\nprint \"序列的第一个元素为 %d ,索引为0\" % L[0]\nprint \"序列的最后一个元素为 %d ,索引为 %d\" %(L[len(L) -1],len(L) -1)\n#print \"序列中间的元素为 %d \" % L[0 + (len(L) - 1) - 0]\n\n\n#start = 0 #索引的开始\n#end = len(L) -1 #索引的结束\n#mid = start + (end - start)/2\n#print L[mid]\n#while True:\n\nstart = 0\nend = len(L) -1\nwhile start <= end:\n hkey = int(raw_input(\"输入你要查找的数:\"))\n print \"要查找的数:%d \" % hkey\n mid = start + (end - start) / 2\n print \"中间的数:%d\" % mid\n #hkey = int(raw_input(\"输入你要查找的数:\"))\n if L[mid] == hkey:\n print \"你猜的数的索引为%d\" % mid\n elif L[mid] > hkey:\n end = mid - 1\n print \"猜小了\"\n else:\n start = mid + 1\n print \"猜大了\"\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7439024448394775, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 26.5, "blob_id": "6a4495c3f9ebd9a629874f37db81d6bf32decf31", "content_id": "c4a0ffb3bb0ac6eff1073198f91e581ea495f53f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 43, "num_lines": 6, "path": "/uplooking_Python/code/lesson07-flask/my_ops/domains/views/domain.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from domains import blue_print\nfrom flask import render_template\n\n@blue_print.route('/index',methods=[\"GET\"])\ndef index():\n return render_template('domain.html')" }, { "alpha_fraction": 0.49341416358947754, "alphanum_fraction": 0.5350198745727539, "avg_line_length": 19.00418472290039, "blob_id": "fa0c5697553b8a85c2e44493ac1ebe37558cfbec", "content_id": "5a9d489284323e94adee1b196489ba89a1bde7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5295, "license_type": "no_license", "max_line_length": 75, "num_lines": 239, "path": "/uplooking_Python/code/笔记.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": " \n # Wolfrg笔记\n \n ## 网上参考资料:\n \nhttp://wiki.jikexueyuan.com/project/start-learning-python/230.html\n\n\n========================================\n \n## MySQL基础学习_20180226\n\n#创建数据库并指定字符集:\nmysql> create database python01 character set utf8;\n\n#创建表\nmysql> use python01;\nDatabase changed\nmysql> create table userinfo (id int,name varchar(10));\nQuery OK, 0 rows affected (0.23 sec)\n\nmysql> desc userinfo;\n+-------+-------------+------+-----+---------+-------+\n| Field | Type | Null | Key | Default | Extra |\n+-------+-------------+------+-----+---------+-------+\n| id | int(11) | YES | | NULL | |\n| name | varchar(10) | YES | | NULL | |\n+-------+-------------+------+-----+---------+-------+\n2 rows in set (0.00 sec)\n\n #插入数据\nmysql> insert into userinfo (id,name) values (1,'alex');\nQuery OK, 1 row affected (0.03 sec)\n\nmysql> select * from python01.userinfo;\n+------+------+\n| id | name |\n+------+------+\n| 1 | alex |\n+------+------+\n1 row in set (0.00 sec)\n\n #字段自增的设置\n #前面在创建表的时候没有设置主键和id自增,设置字段为auto_increment,字段必须是int类型且为primary key:\n命令如下:\nalter table userinfo change id id int primary key AUTO_INCREMENT;\n\nmysql> desc userinfo\n -> ;\n+-------+-------------+------+-----+---------+----------------+\n| Field | Type | Null | Key | Default | Extra |\n+-------+-------------+------+-----+---------+----------------+\n| id | int(11) | NO | PRI | NULL | auto_increment |\n| name | varchar(10) | YES | | NULL | |\n+-------+-------------+------+-----+---------+----------------+\n2 rows in set (0.00 sec)\n\n #再来插入一条数据\ninsert into userinfo (name) values ('frg');\nmysql> select * from userinfo;\n+----+------+\n| id | name |\n+----+------+\n| 1 | alex |\n| 2 | frg |\n+----+------+\n2 rows in set (0.00 sec)\n\n #可以看到id自增为2\n\n\n\n\n #python操作MySQL\n\n #查询操作:\n\n In [61]: import MySQLdb\n In [55]: conn = MySQLdb.connect(\n ...: host = '127.0.0.1',\n ...: port = 3306,\n ...: user = 'root',\n ...: passwd = '123321',\n ...: db = 'test',\n ...: charset = 'utf8'\n ...: )\n\nIn [56]: cur = conn.cursor()\n\nIn [57]: cur.execute('select * from frg')\nOut[57]: 3L\n\nIn [58]: cur.fetchall()\nOut[58]: ((1L, u'zjq'), (2L, u'aaa'), (3L, u'tom'))\n\nIn [59]:\n\n#插入操作:\n\n #实例:\nIn [63]: conn = MySQLdb.connect(\n ...: host = '127.0.0.1',\n ...: port = 3306,\n ...: user = 'root',\n ...: passwd = '123321',\n ...: db = 'python01',\n ...: charset = 'utf8'\n ...: )\n\nIn [64]: cur = conn.cursor()\n\nIn [65]: sql = \"insert into userinfo (name) values ('dddd')\"\n\nIn [66]: sql = \"insert into userinfo (name) values (%s)\"\n\nIn [67]: sql\nOut[67]: 'insert into userinfo (name) values (%s)'\n\nIn [68]: params = ('dddd')\n\n\n\nIn [70]: cur.execute(sql,params)\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-70-c70064def401> in <module>()\n----> 1 cur.execute(sql,params)\nTypeError: not all arguments converted during string formatting\n\n#66处 采用%s的写法\n#上面的报错 TypeError:解决办法是 params = ('dddd',)\n\nIn [76]: params = ('dddd',)\n\nIn [77]: re = cur.execute(sql,params)\n\nIn [78]: conn.commit()\n\nIn [79]: cur.fetchall()\nOut[79]: ()\n\nIn [80]: cur.close()\n\nIn [81]: conn.close()\n\nIn [82]: print re\n1\n\n #验证:在MySQL命令 查询看到新插入的数据\n\n\n\n## lesson07\n\n#前端基础\n\n ## html\n ## css\n ## js\n\n 内联元素\n span\n\n\n css\n 式样的构成:选择符和样式\n\n div{width:200px;}\n\n#选择符:\n#标签选择符\n\n#类选择符\n<div class=\"odiv\"></div>\n\n\nid选择符:id选择符是唯一的,每个id名称必须不同\n<div id=\"oidiv1\"></div>\n\n#子元素选择符:\n#有两种\n1.只选中第一级\n<div class=\"outer1\">\n\n</div>\n\n\n#伪类选择符\n\ninArray\n\n\n20180422\npython异常处理\n\[email protected]_handler\n\n错误集中处理\n\n\nsentry\n\nFlask http接收方式\nPOST的方式\nparams url形式的参数\n表单形式的参数\n客户端向服务器发送json格式request.get_json()\n\n1、request.args.get(\"hostname\")\n2、request.form[\"hostname\"]\n3、request.get_json()\n\nCREATE TABLE `map_tree` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(45) DEFAULT NULL,\n `cname` varchar(45) DEFAULT NULL,\n `node_type` varchar(45) DEFAULT NULL,\n `pid` varchar(45) DEFAULT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `id_UNIQUE` (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;\n\nCREATE TABLE `tb_device` (\n `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n `hostname` varchar(45) DEFAULT NULL,\n `host_type` varchar(45) DEFAULT NULL,\n `ip` varchar(45) DEFAULT NULL,\n `location` varchar(45) DEFAULT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `id_UNIQUE` (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;\n\n\nCREATE TABLE `map_device` (\n `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,\n `tree_id` bigint(20) DEFAULT NULL,\n `device_id` bigint(20) DEFAULT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `id_UNIQUE` (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=21 DEFAULT CHARSET=utf8;" }, { "alpha_fraction": 0.49030470848083496, "alphanum_fraction": 0.5270082950592041, "avg_line_length": 35.07692337036133, "blob_id": "e8e0fe356d755f00b884cac901f41f09c8c22cdb", "content_id": "5b4779598699e4bdd43cb068788302e2e37e7825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1450, "license_type": "no_license", "max_line_length": 119, "num_lines": 39, "path": "/0629/testaa.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\r\n'''\r\nCreated on 2017年8月24日\r\n\r\n@author: Ops\r\n'''\r\n\r\n\r\nimport os\r\n\r\ndef my_rsync(source):\r\n\r\n os.chdir(source)\r\n\r\n if os.path.isfile(\"package.json\"):\r\n f = open(\"package.json\").readlines()\r\n for build in f:\r\n os.system(\"npm install\")\r\n os.system(\"npm run build\")\r\n if os.path.isdir(\"dist\"):\r\n #src = \"/data/jenkins_workspace/jenkins_8001/workspace/zm-mobile-h5-spa_all/alliance_add/dist/\"\r\n source = os.path.join(source,\"dist/\")\r\n dst_ma = \"[email protected]:/webapps/zm.gaiay.net.cn-ma/www/spa/alliance_add/\"\r\n dst_reader = \"[email protected]:/webapps/zm.gaiay.net.cn/www/spa/alliance_add/\"\r\n\r\n os.system(\"rsync -vzrtopg --progress -e ssh source dst_ma\" )\r\n os.system(\"rsync -vzrtopg --progress -e ssh source dst_reader\")\r\n\r\n\r\n else:\r\n\r\n if os.path.isdir(\"src\"):\r\n source = os.path.join(source,\"src/\")\r\n dst_ma = \"[email protected]:/webapps/zm.gaiay.net.cn-ma/www/spa/live-message/\"\r\n #dst_reader = \r\n\r\nmy_rsync(\"/data/jenkins_workspace/jenkins_8001/workspace/zm-mobile-h5-spa_all/alliance_add/\")\r\n\r\nmy_rsync(\"/data/jenkins_workspace/jenkins_8001/workspace/zm-mobile-h5-spa_all/live-message/\")" }, { "alpha_fraction": 0.3920792043209076, "alphanum_fraction": 0.41386139392852783, "avg_line_length": 16.035715103149414, "blob_id": "35394f418483479aac57e1e5e162efbd039de3e7", "content_id": "d70a998c022a123d4cd33d25f24a3425d1558b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 505, "license_type": "no_license", "max_line_length": 48, "num_lines": 28, "path": "/uplooking_Python/code/前端/lesson08-web/addNode.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by Administrator on 2018/3/11.\r\n */\r\n$(function(){\r\n var addNode = {\r\n def:{\r\n num:10\r\n },\r\n addFirstNode: function(){\r\n\r\n },\r\n removeFirstNode: function(index, value){\r\n this._jsonPase();\r\n },\r\n _jsonPase: function(){\r\n\r\n },\r\n\r\n init: function(){\r\n var _this = this;\r\n\r\n _this.addFirstNode();\r\n _this.removeFirstNode(4,6)\r\n }\r\n };\r\n\r\n addNode.init();\r\n})\r\n" }, { "alpha_fraction": 0.4853938817977905, "alphanum_fraction": 0.5269278883934021, "avg_line_length": 30.69230842590332, "blob_id": "62a81bdd8d57d3cafb7dbee3449426a8c22972d5", "content_id": "c8941e140de3ed70cc2dd869be74b75a00939202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7619, "license_type": "no_license", "max_line_length": 118, "num_lines": 221, "path": "/0901/file_caozuo.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月26日\r\n\r\n@author: Ops\r\n'''\r\n#encoding:utf-8 # 支持中文输入\r\n\r\nimport sys\r\nimport getpass\r\nimport shutil\r\nimport commands\r\nimport time\r\nimport fileinput\r\n\r\nstaff_list = 'contact_list.txt'\r\n\r\n# 参数配置\r\nuser = 'admin'\r\npasswd = '123456'\r\ns = file(staff_list)\r\nss = s.readlines()\r\na = file(staff_list,'a')\r\ncounter = 0\r\n_counter = 0\r\n\r\n# 认证登陆\r\nwhile True:\r\n # 计数器,超过3次强制退出\r\n if counter <= 3:\r\n # 空用户名判断\r\n name = raw_input(\"please input your name: \").strip()\r\n if len(name) == 0:\r\n print \"empty name,try again!\"\r\n continue\r\n\r\n # 用户名密码判断,密码隐藏\r\n # pwd = raw_input(\"please input your password: \")\r\n pwd = getpass.getpass('please input your password:')\r\n if pwd == passwd and name == user:\r\n print \"Welcome to login,%s\" %name\r\n else:\r\n print \"name or password is not valid,please try again!\"\r\n counter +=1\r\n continue\r\n break\r\n else:\r\n print \"exceeded 3 times user login..exit the script\"\r\n sys.exit()\r\n\r\n# 选择增删改查\r\nwhile True:\r\n item = raw_input('''\\033[36;1mWelcome to login %s, what do you want to do?\r\n-----------------------\r\npress 'p' for print \r\npress 'a' for add\r\npress 'd' for delete \r\npress 'u' for update\r\npress 's' for select\r\npress 'q' for quit\r\n-----------------------\r\nplease make your choise: \\033[0m''' % user)\r\n \r\n # 打印所有\r\n if item == 'p':\r\n while True:\r\n user_select = open(staff_list,'r')\r\n s_ = user_select.read() \r\n print ' '\r\n print '\\033[32;1mThe content of the file\\033[0m '\r\n print '\\033[32;1m--------------------------\\033[0m '\r\n print s_\r\n print '\\033[32;1m--------------------------\\033[0m '\r\n print ' '\r\n break\r\n \r\n # 增加\r\n elif item == 'a':\r\n while True:\r\n user_add_num = raw_input((\"\\033[32;1mplease input your number: \\033[0m \").strip())\r\n user_add_name = raw_input((\"\\033[32;1mplease input your name: \\033[0m \").strip())\r\n user_add_dept = raw_input((\"\\033[32;1mplease input your department: \\033[0m \").strip())\r\n user_add_id = raw_input((\"\\033[32;1mplease input your id: \\033[0m \").strip())\r\n user_item = '%s\\t%s\\t%s\\t%s' %(user_add_num,user_add_name,user_add_dept,user_add_id)\r\n a.write(\"\\n%s\" %user_item)\r\n a.flush()\r\n print \"\\033[32;1mAdd item:\\033[0m\"\r\n print \"\\033[32;1m------------------\\033[0m\"\r\n print user_item\r\n print \"\\033[32;1m------------------\\033[0m\"\r\n print \"\\033[32;1mAdded successful!\\033[0m\"\r\n \r\n # 删除空行\r\n del_blank_in = open('contact_list.txt','r')\r\n del_blank_out = open('contact_list_new.txt','w')\r\n lines = del_blank_in.readlines()\r\n for blank in lines:\r\n if blank.split():\r\n del_blank_out.writelines(blank)\r\n del_blank_in.close()\r\n del_blank_out.close()\r\n # 覆盖原文件\r\n shutil.move('contact_list_new.txt','contact_list.txt')\r\n user_add_choise = raw_input('press Q for quit or press any key to continue: ')\r\n if user_add_choise == 'Q':\r\n print 'bye!'\r\n break\r\n \r\n # 删除\r\n elif item == 'd':\r\n while True:\r\n user_del_input = raw_input(\"please input sth to delete: \").strip()\r\n if len(user_del_input) == 0:\r\n print \"empty input,try again!\"\r\n else:\r\n # 输入值与源文件比对,有则丢弃,没有则添加到新文件,最后新文件覆盖源文件,实现删除功能\r\n with open('contact_list.txt','r') as ff:\r\n with open('contact_list.txt.new','w') as gg:\r\n for line in ff.readlines():\r\n if user_del_input not in line:\r\n gg.write(line)\r\n if user_del_input in line:\r\n print \"\\033[32;1mDelete item:\\033[0m\"\r\n print \"\\033[32;1m------------------\\033[0m\"\r\n print \" %s \" %line\r\n _counter += 1 # 计数器,判断输入值命中次数\r\n print \"\\033[32;1m------------------\\033[0m\"\r\n print \"\\033[32;1mDeleted successful!\\033[0m\"\r\n if _counter == 0: \r\n print 'nothing delete!'\r\n shutil.move('contact_list.txt.new','contact_list.txt')\r\n # 退出删除\r\n user_del_input_quit = raw_input(\"\\033[32;1mpress Q for quit or press any key to continue? \\033[0m\").strip()\r\n if user_del_input_quit == 'Q':\r\n break \r\n \r\n \r\n # 查询\r\n elif item == 's':\r\n while True:\r\n match_yes = 0\r\n #输入判断,忽略空格输入,加入颜色\r\n user_select_input = raw_input(\"\\033[32;1mplease input sth to search:\\033[0m \").strip()\r\n contact_file = file (staff_list)\r\n if len(user_select_input) == 0:\r\n print \"empty input,try again!\"\r\n else: \r\n while True:\r\n line = contact_file.readline()\r\n if len(line) == 0:\r\n break\r\n if user_select_input in line:\r\n match_yes = 1\r\n print line\r\n else:\r\n pass\r\n if match_yes == 0 :\r\n print \"No match item found\"\r\n # 退出查询\r\n user_select_input_quit = raw_input(\"\\033[32;1mpress Q for quit or press any key to continue? \\033[0m\").strip()\r\n if user_select_input_quit == 'Q':\r\n break \r\n \r\n # 修改\r\n elif item == 'u':\r\n while True:\r\n # 输入为空以及匹配查询内容判断\r\n user_update_input_from = raw_input(\"\\033[32;1mplease search sth to update: \\033[0m\").strip()\r\n update_match = 0\r\n update_file = file(staff_list).readlines()\r\n for n_ in range(len(update_file)):\r\n if user_update_input_from in update_file[n_]:\r\n update_match = 1\r\n else:\r\n pass\r\n if update_match == 0:\r\n print \"No match item found\"\r\n elif len(user_update_input_from) == 0:\r\n print \"empty input,try again!\"\r\n else:\r\n # 将匹配到的字符修改为新字符\r\n while True:\r\n user_update_input_to = raw_input(\"\\033[32;1mupdate %s to what?\\033[0m \" %(user_update_input_from)).strip()\r\n if len(user_update_input_to) == 0:\r\n print \"empty input,try again!\"\r\n else:\r\n for line_ in fileinput.input(staff_list,inplace = 1,backup='.bak'):\r\n line_ = line_.replace(user_update_input_from,user_update_input_to)\r\n print line_\r\n # 打印修改字符的行\r\n print \"\\033[32;1mupdate item:\\033[0m\"\r\n output_ = commands.getoutput(\"diff contact_list.txt contact_list.txt.bak|grep '^>.*' | sed 's/^>//g'\")\r\n print \"\\033[32;1m---------------------------\\033[0m\"\r\n print output_\r\n print \"\\033[32;1m---------------------------\\033[0m\"\r\n print \"\\033[32;1mupdate successfully!\\033[0m\"\r\n # 删除空行\r\n del_blank_in = open('contact_list.txt','r')\r\n del_blank_out = open('contact_list_new.txt','w')\r\n lines = del_blank_in.readlines()\r\n for blank in lines:\r\n if blank.split():\r\n del_blank_out.writelines(blank)\r\n del_blank_in.close()\r\n del_blank_out.close()\r\n # 覆盖原文件\r\n shutil.move('contact_list_new.txt','contact_list.txt')\r\n break\r\n # 退出更新\r\n user_update_input_quit = raw_input(\"\\033[32;1mpress Q for quit or press any key to continue? \\033[0m\").strip()\r\n if user_update_input_quit == 'Q':\r\n break \r\n \r\n \r\n # 退出\r\n elif item == 'q':\r\n print 'bye!'\r\n sys.exit()\r\n \r\n else:\r\n print \"\\033[31;1mnot a valid key word\\033[0m\"\r\n time.sleep(1)" }, { "alpha_fraction": 0.5098039507865906, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 24.5, "blob_id": "c2fef818c70fc0c2af6325d93db0e9e4b5234965", "content_id": "1c9cc219e269513691600376de694b8c611888cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 51, "license_type": "no_license", "max_line_length": 38, "num_lines": 2, "path": "/uplooking_Python/code/jenkins.bak/shell/test-java-build.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\nssh 10.1.11.92 \"sh /root/test-java.sh\"\n" }, { "alpha_fraction": 0.6062043905258179, "alphanum_fraction": 0.6138685941696167, "avg_line_length": 24.137615203857422, "blob_id": "97aedc461d22bb9875e8c9280ad98d6d8c68923a", "content_id": "f293f32848922f8d9aab595a194f919a867cf335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2780, "license_type": "no_license", "max_line_length": 116, "num_lines": 109, "path": "/uplooking_Python/code/flask/app_bak.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nfrom flask import Flask, request, render_template\nimport MySQLdb\nimport json\n\ndb = MySQLdb.connect(\"localhost\", \"root\", \"123321\", \"python01\",charset='utf8')\n\napp = Flask(__name__)\n\n# index views\[email protected]('/')\ndef show_index():\n # cursor = db.cursor()\n # sql = \"SELECT * FROM user_ip_info\"\n # cursor.execute(sql)\n # results = cursor.fetchall()\n return render_template('index.t.html')\n\n\n# @app.route('/tree')\n# def tree_views():\n# cursor = db.cursor()\n# sql = \"SELECT * FROM map_tree\"\n# cursor.execute(sql)\n# results = cursor.fetchall()\n# return render_template('index.t.html',results=results)\n\[email protected]('/delete',methods=['GET','POST'])\ndef update_sql():\n cursor = db.cursor()\n sql = \"delete from user_ip_info where id=2\"\n cursor.execute(sql)\n db.commit()\n return '将要删除这行数据'\n\[email protected]('/insert',methods=['GET','POST'])\ndef insert_sql():\n cursor = db.cursor()\n sql = \"insert into user_ip_info (id,username,position,ipaddr,remark) values (7,'王超','前锋','192.168.0.7','足球')\"\n cursor.execute(sql)\n db.commit()\n return '插入一行数据'\n\[email protected]('/ajax.html',methods=['GET','POST'])\ndef myajax():\n return render_template('ajax.html')\n #pass\n\n#get right table api\[email protected]('/right-table',methods=['GET','POST']) \ndef show_table():\n cursor = db.cursor()\n sql = \"SELECT * FROM user_ip_info\"\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n \n # return json.dumps(results)\n\n data=[]\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data) \n\n \n # data = []\n # content = {}\n # for result in results:\n # content = {'id':result[0],'username':result[1],'position':result[2],'ipaddr':result[3],'remark':result[4]}\n # data.append(content)\n # content = {}\n # return json.dumps(data) \n \n\n#get left tree table api\[email protected]('/tree/all',methods=['GET','POST'])\ndef get_tree_all():\n cursor = db.cursor()\n sql = \"SELECT * FROM map_tree\"\n cursor.execute(sql)\n row_headers = [x[0] for x in cursor.description]\n results = cursor.fetchall()\n\n data = []\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data) \n\n# rename tree node \[email protected]('/node/rename',methods=[\"POST\"])\ndef rename_node(name,id):\n id = request.form.get('id')\n name = request.form.get('name')\n\n sql = 'update map_server set name=%s where id=%s'\n params = (id,name)\n\n cursor = db.cursor()\n results = cursor.execute(sql,params)\n cursor.commit()\n cursor.close()\n db.close()\n\n return results\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.527999997138977, "alphanum_fraction": 0.5424000024795532, "avg_line_length": 19.161291122436523, "blob_id": "8f63823cd87a5568b12c812201a9ab97bab0fedc", "content_id": "30df39147ede335c41631d3aea9c3c53adf29428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 73, "num_lines": 31, "path": "/uplooking_Python/code/lesson06/less06/simple/test.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nclass Test(object):\n Name = None\n instance = None\n def __init__(self, age):\n self.age = age\n self.name = None\n\n def __new__(cls, *args, **kwargs):\n if not cls.instance:\n cls.instance = super(Test, cls).__new__(cls, *args, **kwargs)\n return cls.instance\n\n def set_age(self, age):\n self.age = age\n\n def get_age(self):\n print self.age\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\nt1 = Test(18)\nt2 = Test(30)\nt1.set_name(\"bigv\")\nprint t2.get_name()\n" }, { "alpha_fraction": 0.510869562625885, "alphanum_fraction": 0.5173913240432739, "avg_line_length": 17.875, "blob_id": "26a2ee2f993ba61e73884e2bc5b3f74fee8cb2f9", "content_id": "45fb8e3b1f57b533953c185891ad308a38ec2225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 70, "num_lines": 24, "path": "/uplooking_Python/code/lesson06/less06/simple/test2.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nclass T(object):\n\n _instance = None\n\n def __init__(self, name):\n self.name = name\n\n def __new__(cls, *args, **kwarg):\n if not cls._instance:\n cls._instance = super(T, cls).__new__(cls, *args, **kwarg)\n return cls._instance\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n print self.name\n\n\nt1 = T(\"bigv\")\nt2 = T(\"xiaoqiang\")\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7664233446121216, "alphanum_fraction": 0.7664233446121216, "avg_line_length": 45, "blob_id": "d0695b000956ff21cf7deb2cc0db2535090f2f91", "content_id": "f9f1917c9a07abf2a116375bc32d34086b4f6330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 96, "num_lines": 3, "path": "/uplooking_Python/code/lesson08-flask/ops/domains/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nblue_print = Blueprint(\"domains\", __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views" }, { "alpha_fraction": 0.666528046131134, "alphanum_fraction": 0.673180878162384, "avg_line_length": 29.45569610595703, "blob_id": "0ada182b596e8eb7439be783b277389f6e9e8070", "content_id": "a06ba409aaad529552afd461c95f5dd4349d4373", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2405, "license_type": "no_license", "max_line_length": 108, "num_lines": 79, "path": "/uplooking_Python/code/SimpleFlask+jquery+mysql/util.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport MySQLdb as mysql\n# from config import ST,DB_PORT,DB_USER,DB_PASSWD,DB_DBNAME,DB_CHARSET\n# db = MySQLdb.connect(\"localhost\", \"root\", \"123321\", \"python01\",charset='utf8')\n\nST = \"localhost\"\nDB_PORT = \"3306\"\nDB_USER = \"root\"\nDB_PASSWD = \"123321\"\nDB_DBNAME = \"python01\"\nDB_CHARSET = \"charset='utf8'\"\n\n\n\n\nsql_all = 'select * from user'\nsql_login = 'select * from user where (username=\"%s\") and (password=\"%s\")'\nsql_delete = 'delete from user where username=\"%s\"'\nsql_adduser = 'insert into user values (\"%s\",\"%s\")'\nsql_updatepw = 'update user set password=\"%s\" where username=\"%s\"'\nsql_getpw = 'select password from user where username=\"%s\"'\n\n\ndef app_index():\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_all)\n res = cur.fetchall()\n cur.close()\n con.close()\n return res\n\ndef app_login(username,passwd):\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_login%(username,passwd))\n res = cur.fetchone()\n cur.close()\n con.close()\n return res\n\ndef app_delete(username):\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_delete%(username))\n res = cur.fetchone()\n cur.close()\n con.close()\n\ndef app_adduser(username,passwd):\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_adduser%(username,passwd))\n res = cur.fetchone()\n cur.close()\n con.close()\n\ndef app_updatepw(passwd,username):\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_updatepw%(passwd,username))\n res = cur.fetchone()\n cur.close()\n con.close()\n\ndef app_getpw(username):\n con = mysql.connect(host=ST,port=DB_PORT,user=DB_USER,passwd=DB_PASSWD,db=DB_DBNAME,charset=DB_CHARSET)\n con.autocommit(True)\n cur =con.cursor()\n cur.execute(sql_getpw%(username))\n res = cur.fetchone()\n cur.close()\n con.close()\n return res" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 11.272727012634277, "blob_id": "68ca115494448e7de003948bedec324d5db8057b", "content_id": "52b02818824198c7b0f702614db90163696030f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 29, "num_lines": 22, "path": "/uplooking_Python/code/lesson03/import_fz.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nimport fz\nfrom fz import A\n#print fz.var\n#print fz._print_one_\n#print fz._var1_\n#from fz import _var1_\n\nprint a.__var2__\n\n#print _var1_\n#_print_one_()\n\n#from fz import __print_two__\n#\n#def myprint():\n# print \"aaaa\"\n# a.__print_two__()\n#\n#\n#myprint()\n" }, { "alpha_fraction": 0.5188307762145996, "alphanum_fraction": 0.5250140428543091, "avg_line_length": 20.80769157409668, "blob_id": "92f057567d3415fa376ffb46d5d8566359e94ad9", "content_id": "46d983e9523ad7d8835522f63a9278ced7057e4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2155, "license_type": "no_license", "max_line_length": 246, "num_lines": 78, "path": "/uplooking_Python/code/前端/lesson10-前端day04/monitor-web1/mHosts/js/hosts.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function(){\r\n\tvar hosts = {\r\n\r\n\t\tsaveData: {\r\n\t\t\tpageNum:1, // 存储页码,调接口时跟这儿取页码数据\r\n\t\t\tsubmenuId: 0 // 存储子菜单id,调接口时跟这儿取子菜单id数据\r\n\t\t},\r\n\r\n\t\t// 获取表格数据\r\n\t\tgetTableData: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\t// 给后台传参数\r\n\t\t\tparam.page = _this.saveData.pageNum;\r\n\t\t\tparam.submenu = _this.saveData.submenuId;\r\n\r\n\t\t\t// $.get('接口', param, function(response){ // domain列表接口\r\n\t\t\t$.getJSON('../../data/zhuji.json', function(response){\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t\t\tvar data = response.data.detail; // 获取接口中返回的列表的数据\r\n\t\t\t\t\t_this.tableTpl(data); // 调用拼接图表模板的函数\r\n\r\n\t\t\t\t\t_this.pageSel(); // 页码选择\r\n\r\n\t\t\t\t}\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\t// 表格模板\r\n\t\ttableTpl: function (data) {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\r\n\t\t\t// 拼接表格头部\r\n\t\t\tstr += '<thead><th><td>主机名</td><td>IP</td><td>宿主机</td><td>机房</td><td>状态</td><td>套餐</td><td>tags</td></th></thead><tbody>';\r\n\r\n\t\t\t// 拼接表格body\r\n\t\t\t$.each(data, function(index, value){\r\n\t\t\t\tstr += '<tr><td>' + (index + 1) + '</td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td><td>' + value.tags + '</td></tr>';\r\n\r\n\t\t\t});\r\n\r\n\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t$('#bodyList').html(str); // 放入父元素,生成表格\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\r\n\t\t\t\t_this.saveData.pageNum = $(this).text(); // 将点击的页码赋给公共变量\r\n\t\t\t\t_this.getTableData(); // 调显示图表的函数,显示新一页的表格数据\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\t// 入口函数\r\n\t\tinit: function(){\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t_this.getTableData(); // 调显示图表函数\r\n\t\t\tpublic_func.leftNav(); // 调左侧导航函数\r\n\r\n\t\t\tpublic_func.subMenuClick(); // 调公共函数中的子菜单点击事件\r\n\r\n\r\n\t\t}\r\n\t};\r\n\r\n\thosts.init(); // 调入口函数\r\n})\r\n" }, { "alpha_fraction": 0.5184300541877747, "alphanum_fraction": 0.5255972743034363, "avg_line_length": 25.40186882019043, "blob_id": "e18913f6f5ab91adfce2ca64cd1f58b132dc5fa8", "content_id": "5a1bf12dfa110a8c0dd3db2ad31a4dda0f9ea7e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3102, "license_type": "no_license", "max_line_length": 251, "num_lines": 107, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/完整项目/monitor-web1/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2017/12/27.\r\n */\r\n\r\n$(function () {\r\n\tvar monitor = {\r\n\r\n\t\t// 表格列表\r\n\t\ttableList: function () {\r\n\t\t\tvar _this = this,\r\n\t\t\t\turl = '',\r\n\t\t\t\tthisMainMenu = _this.defaultSet.thisMainMenu,\r\n\t\t\t\tthisSubMenu = _this.defaultSet.thisSubMenu,\r\n\t\t\t\tparams = {}; // 列表接口的参数\r\n\r\n\t\t\tparams.thisPage = _this.defaultSet.thisPage; // 当前页数名称\r\n\t\t\tparams.thisNode = _this.defaultSet.selTreeNodeId; //当前node节点\r\n\t\t\tparams.mainMenu = thisMainMenu ; // 当前主菜单id\r\n\t\t\tparams.subMenu = thisSubMenu; // 当前子菜单id\r\n\r\n\t\t\turl = 'js/tablelist.json'; // 改成你自己的列表的接口地址\r\n\r\n\t\t\t// 访问传的参数结构即params变量,例如{thisPage:1, thisNode:13, mainMenu:2, subMenu:1}\r\n\t\t\t$.get(url, params, function (res) {\r\n\t\t\t\tvar listData = res.data.detail,\r\n\t\t\t\t\thtmlTpl = '';\r\n\r\n\t\t\t\tif (thisMainMenu == 1) {\r\n\t\t\t\t\thtmlTpl += '<thead><th><td>name</td><td>mDomain</td><td>enable</td><td>type</td><td>line</td><td>ttl</td></th></thead><tbody>';\r\n\r\n\t\t\t\t} else {\r\n\t\t\t\t\thtmlTpl += '<thead><th><td>主机名</td><td>IP</td><td>宿主机</td><td>机房</td><td>状态</td><td>套餐</td><td>tags</td></th></thead><tbody>';\r\n\t\t\t\t}\r\n\r\n\t\t\t\t$.each(listData, function (index, value) {\r\n\t\t\t\t\tif (thisMainMenu == 1) {\r\n\t\t\t\t\t\thtmlTpl += '<tr><td>' + (index + 1) + '</td><td>' + value.name + '</td><td>' + value.domain + '</td><td>' + value.enable + '</td><td>' + value.type + '</td><td>' + value.line + '</td><td>' + value.ttl + '</td></tr>'\r\n\t\t\t\t\t} else {\r\n\t\t\t\t\t\thtmlTpl += '<tr><td>' + (index + 1) + '</td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td><td>' + value.tags + '</td></tr>'\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t});\r\n\r\n\t\t\t\thtmlTpl += '</tbody>';\r\n\r\n\t\t\t\t$('#bodyList').html(htmlTpl);\r\n\r\n\t\t\t}, \"json\");\r\n\r\n\t\t\t_this.pageSel();\r\n\r\n\t\t},\r\n\r\n\t\ttabMenuSwich: function () {\r\n\t\t\tvar _this = this;\r\n\t\t\t_this.tableList('domain', '');\r\n\r\n\t\t\t// 主菜单切换,默认选中第一个子菜单\r\n\t\t\t$('.js-main-tabs').off('click.choice').on('click.choice', 'li', function (e) {\r\n\t\t\t\tvar thisTab = $(this),\r\n\t\t\t\t\tsubMenuArr = [],\r\n\t\t\t\t\tsubMenuTpl = '',\r\n\t\t\t\t\tsubList = $('.js-sub-tabs');\r\n\t\t\t\tthisTab.parent().find('li').removeClass('active');\r\n\t\t\t\tthisTab.addClass('active');\r\n\r\n\t\t\t\t_this.defaultSet.thisPage = 1;\r\n\t\t\t\t_this.defaultSet.thisMainMenu = $(this).data('menu');\r\n\t\t\t\tswitch (thisTab.attr('id')) {\r\n\t\t\t\t\tcase 'domain':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.domainNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 1;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase 'machine':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.machineNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 4;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase 'nodemsg':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.nodemsgNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 7;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tdefault:\r\n\t\t\t\t\t\treturn;\r\n\t\t\t\t}\r\n\r\n\t\t\t\t_this.tableList();\r\n\r\n\t\t\t});\r\n\r\n\t\t},\r\n\r\n\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t_this.treeList();\r\n\t\t\t_this.tabMenuSwich();\r\n\r\n\t\t\t_this.exitBtn();\r\n\t\t}\r\n\t};\r\n\r\n\tmonitor.init();\r\n});" }, { "alpha_fraction": 0.6581818461418152, "alphanum_fraction": 0.671818196773529, "avg_line_length": 34.45161437988281, "blob_id": "9361ecf99cc0455ff2d271807d94c99d9acbab5d", "content_id": "ec7ec678054ffc1ac4de53a7c6854c3ed42b2b27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 121, "num_lines": 31, "path": "/uplooking_Python/code/jenkins.bak/funtion/main.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#coding:utf-8\nimport os\nimport paramiko\nimport MySQLdb.cursors\nimport sys,string\nimport MySQLdb\ndef login():\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=\"10.1.11.92\",username=\"root\",password=\"123456\")\n return client\ndef accept():\n conn = MySQLdb.connect(host='localhost',user='root',passwd='',db='jenkins',cursorclass = MySQLdb.cursors.DictCursor,)\n return conn\ndef tijiao(name):\n a = 'sh /root/jenkins/shell/%s.sh'%name\n print os.system(a)\ndef server_bulid(name):\n \n a = 'sh /root/jenkins/shell/%s.sh'%name\n os.system(a)\ndef build_server(name):\n if name == 'test-php':\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=\"10.1.11.95\",username=\"root\",password=\"123456\")\n stdin, stdout, stderr = client.exec_command(\"sh /root/test-php.sh\")\n if name == 'test-java':\n client = login()\n stdin, stdout, stderr = client.exec_command(\"sh /root/test-java.sh\")\n\n" }, { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.760869562625885, "avg_line_length": 12.666666984558105, "blob_id": "6e0c67bed8068260e0302a1e607c93332469f39c", "content_id": "0c57c247cd1d00c2a4adad7c7e10d66da5cd5d89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/uplooking_Python/code/flask_myself/readme.md", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "\n#目前实现了初步的增删改查 20180520\n\n#先实现登陆的功能再实现RBAC\n\n\n\n\n" }, { "alpha_fraction": 0.6794055104255676, "alphanum_fraction": 0.6804670691490173, "avg_line_length": 26.647058486938477, "blob_id": "c48ed5b6f13abfc09a717ed29fce1c6e56178ebf", "content_id": "fe0a8e7a4ae3b978631064a0fc2cd8414b7f244f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/views/tree.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from servicetree import blue_print\nfrom flask import render_template\nfrom flask import request\nfrom servicetree.controller import *\nimport json\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"serviceTree.html\")\n\n@blue_print.route(\"/all\", methods=[\"GET\"])\ndef getTrees():\n trees = Tree.get_all()\n return trees\n\n@blue_print.route(\"/all_map\", methods=[\"GET\"])\ndef getTagHosts():\n\n return\n\n@blue_print.route(\"/node/add\", methods=[\"POST\"])\ndef addNode():\n pid = request.form.get(\"pid\")\n nodeName = request.form.get(\"nodeName\")\n cname = request.form.get(\"cname\")\n # node_type = request.form.get(\"nodeType\")\n # print node_type\n Tree.add_node(int(pid), nodeName, cname)\n return json.dumps([{\"code\":0,\"message\":\"sucess\"}])\n\n@blue_print.route(\"/query/<tagId>/hosts\", methods=[\"GET\"])\ndef getHostsByTagId(tagId):\n hosts = Tree.getHostsByTagId(tagId)\n return json.dumps(hosts)\n\n\n" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25.33333396911621, "blob_id": "46881c34ac19a9361ea16ae7a0a8286dd181e4eb", "content_id": "988722e58caa00c30a14ca6aadb6a3fe56b94957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/uplooking_Python/code/lesson07-flask/ops/hosts/views/api.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom flask import render_template\nfrom hosts import blue_print" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5484400391578674, "avg_line_length": 28.158416748046875, "blob_id": "40730c5708184482c528af32674e3340bca8de6b", "content_id": "b6429dc933b21a44125552771ced25d78dd6ffd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6430, "license_type": "no_license", "max_line_length": 198, "num_lines": 202, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/完整项目/monitor-web1/mDeploy/js/deploy.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function () {\r\n\tvar deploy = {\r\n\t\tsaveData: {\r\n\t\t\tpageNum:1,\r\n\t\t\tsubmenuId: 0\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\t\t\t\t// 一般直接调接口\r\n\t\t\t\tvar thisPageNum = $(this).text();\r\n\r\n\t\t\t\t_this.saveData.pageNum = thisPageNum;\r\n\t\t\t\t_this.getTableData();\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\t// 任务列表\r\n\t\ttaskDataGet: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t// $.get('url', {}, function (response) {\r\n\t\t\t$.getJSON('../../data/task.json', function(response){\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t\t\tvar taskData = response.data.tasks;\r\n\r\n\t\t\t\t\t_this.taskTableTpl(taskData)\r\n\t\t\t\t}\r\n\r\n\t\t\t\t_this.taskActionPage();\r\n\r\n\t\t\t\t_this.showLanuchPage();\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\ttaskTableTpl: function(data){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\t\t\tstr += '<thead><tr><th><input type=\"checkbox\"></th><th><button class=\"btn btn-sm btn-warning\">批量发起</button></th><th>任务列表</th><th>服务组</th><th>工作组</th><th>环境</th><th>任务状态</th></tr></thead><tbody>';\r\n\r\n\t\t\t\t$.each(data, function(index, value){\r\n\t\t\t\t\tstr += '<tr><td><input type=\"checkbox\"></td>\\\r\n\t\t\t\t\t\t\t\t<td><button class=\"btn btn-sm btn-warning js-lanuch-task mt-curosr\" data-name=\"' + value.name + '\">发起</button></td>\\\r\n\t\t\t\t\t\t\t\t<td class=\"js-edit-btn mt-curosr\" data-for=\"edit\" data-toggle=\"modal\" data-target=\"#exampleModal\">' + value.name + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.serverGroup + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.jobGroup + '</td>\\\r\n\t\t\t\t\t\t <td>' + value.enviroment + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.restart + '</td></tr>';\r\n\r\n\t\t\t\t});\r\n\r\n\t\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t\t$('#bodyList').html(str);\r\n\t\t},\r\n\r\n\t\t// 点击添加按钮,添加任务\r\n\t\ttaskActionPage: function(){\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('#exampleModal').on('show.bs.modal', function (event) {\r\n\t\t\t\tvar button = $(event.relatedTarget),\r\n\t\t\t\t\tactionType = button.data('for'),\r\n\t\t\t\t\tmodal = $(_this);\r\n\r\n\t\t\t\tif(actionType == 'add'){\r\n\t\t\t\t\t$('#exampleModalLabel').text('添加任务');\r\n\t\t\t\t\t_this.addTaskItem();\r\n\r\n\t\t\t\t}else if(actionType == 'edit'){\r\n\t\t\t\t\t$('#exampleModalLabel').text('编辑任务');\r\n\t\t\t\t\t_this.editTaskItem();\r\n\t\t\t\t}\r\n\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\taddTaskItem: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tformTpl = '';\r\n\t\t\tformTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署环境:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-service\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">代码地址:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-job-name\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署账号:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-serviceGroup\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署路径:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-jobGroup\">\\\r\n\t\t\t\t\t \t</div>';\r\n\r\n\t\t\t$('#exampleModal').find('form').html(formTpl);\r\n\r\n\r\n\t\t},\r\n\r\n\t\t// 点击任务名称到编辑任务界面\r\n\t\teditTaskItem: function(){\r\n\r\n\t\t},\r\n\r\n\t\t//点击发起名称到发起任务界面\r\n\t\tshowLanuchPage: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.js-lanuch-task').off('click').on('click', function(e){\r\n\t\t\t\t$('.task-list-page').hide();\r\n\t\t\t\t$('.lanuch-task-page').show().find('.lanuch-job-name').text($(this).data('name'));\r\n\r\n\t\t\t\t_this.lanuchTaskDetail();\r\n\t\t\t})\r\n\r\n\t\t\t$('.content-main').off('click').on('click', '.back-to-list', function(e){\r\n\t\t\t\t$('.task-list-page').show();\r\n\t\t\t\t$('.lanuch-task-page').hide();\r\n\r\n\t\t\t\t_this.taskActionPage();\r\n\t\t\t})\r\n\t\t},\r\n\t\tlanuchTaskDetail: function () {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tformTpl = '';\r\n\t\t\tformTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">版本号:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-service\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">部署账号:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-serviceGroup\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">部署路径:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-jobGroup\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">启停动作:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-service\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">机器列表:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><img class=\"js-machine-sel mt-curosr\" data-target=\"#exampleModal\" data-toggle=\"modal\" src=\"../../img/deploy-edit.svg\"></div>\\\r\n\t\t\t\t\t \t</div>';\r\n\r\n\t\t\t$('.lanuch-task-page').find('.panel-body').html(formTpl);\r\n\r\n\t\t\t_this.machineSelect();\r\n\r\n\t\t},\r\n\r\n\t\t// 点击机器列表,显示机器列表选择项\r\n\t\tmachineSelect: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\r\n\t\t\tstr += ' <select multiple=\"multiple\" size=\"10\" name=\"doublebox\" class=\"machineBox\"></select>';\r\n\r\n\t\t\t$('#exampleModal').find('form').html(str);\r\n\r\n\t\t\t$('.machineBox').doublebox({\r\n\t\t\t\tnonSelectedListLabel: '产品线下的机器列表',\r\n\t\t\t\tselectedListLabel: '已选择的机器列表',\r\n\t\t\t\tpreserveSelectionOnMove: 'moved',\r\n\t\t\t\tmoveOnSelect: false,\r\n\t\t\t\tnonSelectedList:[{\"roleId\":\"1\",\"roleName\":\"设备1\"},{\"roleId\":\"2\",\"roleName\":\"设备2\"},{\"roleId\":\"3\",\"roleName\":\"设备3\"},{\"roleId\":\"4\",\"roleName\":\"设备4\"}],\r\n\t\t\t\tselectedList:[{\"roleId\":\"4\",\"roleName\":\"设备4\"},{\"roleId\":\"5\",\"roleName\":\"设备2\"}],\r\n\t\t\t\toptionValue:\"roleId\",\r\n\t\t\t\toptionText:\"roleName\",\r\n\t\t\t\tdoubleMove:true,\r\n\t\t\t});\r\n\t\t},\r\n\r\n\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\t\t\t_this.taskDataGet();\r\n\t\t\tpublic_func.treeList();\r\n\r\n\t\t\tpublic_func.subMenuClick();\r\n\r\n\t\t\t// 在本页面获取点击的node的id\r\n\t\t\t$('.btn-success').on('click.gtye',function(e) {\r\n\t\t\t\tconsole.log(public_func.defaultSet.selTreeNodeId);\r\n\r\n\t\t\t})\r\n\t\t}\r\n\t};\r\n\r\n\tdeploy.init();\r\n})" }, { "alpha_fraction": 0.5068492889404297, "alphanum_fraction": 0.5140247941017151, "avg_line_length": 19.26388931274414, "blob_id": "58bba6f2ad4c92b1b486bedb02d2a9f289eee3e1", "content_id": "39fb1b7d8ede6f077cabfb6701d5734a3cad867f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1563, "license_type": "no_license", "max_line_length": 216, "num_lines": 72, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/完整项目/monitor-web1/mDomain/js/domain.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function () {\r\n\tvar domain = {\r\n\r\n\t\tsaveData: {\r\n\t\t\tpageNum:1,\r\n\t\t\tsubmenuId: 0\r\n\t\t},\r\n\r\n\t\tgetTableData: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\tparam.page = _this.saveData.pageNum;\r\n\t\t\tparam.submenu = _this.saveData.submenuId;\r\n\t\t\tconsole.log(param)\r\n\r\n\t\t\t// $.get('接口', param, function(response){ // domain列表接口\r\n\t\t\t$.getJSON('../../data/tablelist.json', function(response){\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t\t\tvar data = response.data.detail;\r\n\t\t\t\t\t_this.tableTpl(data);\r\n\r\n\t\t\t\t\t_this.pageSel();\r\n\r\n\t\t\t\t}\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\ttableTpl: function (data) {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\r\n\t\t\tstr += '<thead><th><td>name</td><td>mDomain</td><td>enable</td><td>type</td><td>line</td><td>ttl</td></th></thead><tbody>';\r\n\r\n\t\t\t$.each(data, function(index, value){\r\n\t\t\t\tstr += '<tr><td>' + (index + 1) + '</td><td>' + value.name + '</td><td>' + value.domain + '</td><td>' + value.enable + '</td><td>' + value.type + '</td><td>' + value.line + '</td><td>' + value.ttl + '</td></tr>';\r\n\r\n\t\t\t});\r\n\r\n\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t$('#bodyList').html(str);\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\t\t\t\t// 一般直接调接口\r\n\t\t\t\tvar thisPageNum = $(this).text();\r\n\r\n\t\t\t\t_this.saveData.pageNum = thisPageNum;\r\n\t\t\t\t_this.getTableData();\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\tinit: function(){\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t_this.getTableData();\r\n\r\n\t\t\tpublic_func.subMenuClick()\r\n\t\t}\r\n\t};\r\n\r\n\tdomain.init();\r\n});\r\n\r\n" }, { "alpha_fraction": 0.6210070252418518, "alphanum_fraction": 0.6312940120697021, "avg_line_length": 28.79032325744629, "blob_id": "0907b823ada87589ae7ff2485e65d410373f987c", "content_id": "a2bceb933edb9154db5c5cfc2d1abee488b424d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1935, "license_type": "no_license", "max_line_length": 195, "num_lines": 62, "path": "/uplooking_Python/code/lesson09-flask/ops/hosts/views/host.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom flask import render_template\nfrom flask import request\nfrom hosts import blue_print\nfrom flask import make_response\nfrom hosts.controller import hostHandleController as hc\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"hosts.html\")\n\n@blue_print.route(\"/all\", methods=[\"GET\"])\ndef get_hosts():\n page = request.args.get(\"page\")\n host_type = request.args.get(\"host_type\")\n # 获取主机\n hosts = hc.HostHandle.getHostsByPage(page, host_type)\n #hosts = [{\"hostname\":\"tj1-nginx01.kscn\", \"host_type\": \"vm\", \"ip\": \"1.1.1.1\", \"location\":\"beijing\"}, {\"hostname\":\"tj1-nginx02.kscn\", \"host_type\": \"vm\", \"ip\": \"1.1.1.1\", \"location\":\"beijing\"}]\n result = {\n \"code\":200,\n \"data\": hosts\n }\n return make_response(json.dumps(result))\n\n\n@blue_print.route(\"/add\", methods=[\"POST\"])\ndef host_add():\n # 获取请求参数 request.args从url中获取参数\n # hostnames = request.args.get(\"hostnames\")\n # print \"Hostnames is [{}]\".format(hostnames)\n\n # 获取表单\n # ip = request.form[\"ip\"]\n # system = request.form[\"system\"]\n # print \"Ip is {ip}. system is {sys}\".format(ip=ip, sys=system)\n\n #获取json数据\n # json_data = request.get_json()\n # print \"json_data:\", json_data\n\n # 获取添加的主机\n hostname = request.form[\"hostname\"]\n host_type = request.form[\"type\"]\n ip = request.form[\"ip\"]\n location = request.form[\"location\"]\n addResult = hc.HostHandle.host_add(hostname, host_type, ip, location)\n if addResult:\n return \"资产添加成功!\"\n return \"资产添加失败!\"\n\n@blue_print.route(\"/bind\", methods=[\"POST\"])\ndef host_bind():\n\n hostnames = request.form[\"hostname\"]\n node = request.form[\"node\"]\n bindInfo = hc.HostHandle.host_bind(hostnames.split(\",\"), node)\n result = {\n \"code\":0,\n \"data\":bindInfo\n }\n return json.dumps(result)\n" }, { "alpha_fraction": 0.5043500661849976, "alphanum_fraction": 0.5107471942901611, "avg_line_length": 28.310077667236328, "blob_id": "f46f72630a7e0a7ef0276c3063f9ef525521b044", "content_id": "240f0cf94c23805d722932ac71b2a560c1e8238d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3982, "license_type": "no_license", "max_line_length": 127, "num_lines": 129, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/popup/LoginControl.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/* global PopupView: false ztreeControl:false*/\r\n'use strict';\r\n\r\nvar ztreeControl = new ZtreeController();\r\nfunction LoginControl() {\r\n\r\n var isAutoLogin = false;\r\n var errValidator = $('#div_error_validator');\r\n var user_id = $('#user_id');\r\n var password = $('#password');\r\n var keep_password = $('#keep_password');\r\n //add click listener and enter to login button\r\n $('#login_button').on('click', loginSubmit);\r\n $('.wiz_login').on('keydown', 'input, button', function(e) {\r\n if (e.which === 13) {\r\n loginSubmit();\r\n }\r\n });\r\n\r\n /**\r\n * 使用cookie自动登录\r\n * @param {object} cookie 登录的cookie对象\r\n */\r\n function autoLogin(cookie) {\r\n isAutoLogin = true;\r\n $('#waiting').show();\r\n\r\n var info = cookie.value;\r\n var loginParam = {\r\n client_type: Wiz.Constant.LOGIN_PARAMS.CLIENT_TYPE,\r\n api_version: Wiz.Constant.LOGIN_PARAMS.API_VERSION,\r\n cookieStr: info\r\n };\r\n login(loginParam);\r\n }\r\n\r\n function login(loginParam) {\r\n var port = chrome.extension.connect({\r\n name : 'login'\r\n });\r\n port.postMessage(loginParam);\r\n port.onMessage.addListener(function(res) {\r\n var code = res.code;\r\n var msg, cert;\r\n if (code === '200') {\r\n localStorage.clear();\r\n\r\n cert = res.cookieStr;\r\n //cookie保存时间 (秒)\r\n var expiredays;\r\n if (keep_password.prop('checked')) {\r\n expiredays = Wiz.Constant.Default.COOKIE_EXPIRE_SEC;\r\n }\r\n PopupView.hideLogoffDiv();\r\n localStorage[Wiz.Constant.Default.COOKIE_USER] = loginParam.user_id;\r\n\r\n if (!isAutoLogin) {\r\n //自动登陆不需要再次设置token\r\n Wiz.Cookie.setCookies(Wiz.Constant.Default.COOKIE_URL, Wiz.Constant.Default.COOKIE_CERT, cert, expiredays);\r\n }\r\n }\r\n else {\r\n if (!!code) {\r\n msg = chrome.i18n.getMessage('err_' + code);\r\n } else {\r\n msg = res;\r\n }\r\n PopupView.showLoginError(msg);\r\n }\r\n });\r\n }\r\n\r\n function doLogin() {\r\n var loginingMsg = chrome.i18n.getMessage('logining');\r\n PopupView.showWaiting(loginingMsg);\r\n\r\n var loginParam = {\r\n client_type: Wiz.Constant.LOGIN_PARAMS.CLIENT_TYPE,\r\n api_version: Wiz.Constant.LOGIN_PARAMS.API_VERSION,\r\n user_id: user_id.val(),\r\n password: 'md5.' + hex_md5(password.val())\r\n };\r\n login(loginParam);\r\n }\r\n\r\n /**\r\n * 点击登陆按钮触发事件\r\n */\r\n function loginSubmit() {\r\n if (checkEmail() && checkPassword()) {\r\n doLogin();\r\n }\r\n }\r\n\r\n function checkEmail() {\r\n errValidator.html('');\r\n var email = user_id.val();\r\n var valid = verifyEmail(email);\r\n if (!valid) {\r\n errValidator.html(chrome.i18n.getMessage('userid_error')).show(100);\r\n }\r\n return valid;\r\n\r\n }\r\n\r\n function verifyEmail(str_email) {\r\n return !!(str_email && str_email.trim().length > 1);\r\n\r\n }\r\n\r\n function checkPassword() {\r\n errValidator.html('');\r\n var passwordVal = password.val();\r\n if (passwordVal.trim().length < 1) {\r\n errValidator.html(chrome.i18n.getMessage('password_error')).show(100);\r\n return false;\r\n }\r\n return true;\r\n\r\n }\r\n\r\n function initCreateAccountLink() {\r\n $('#create_acount').html(chrome.i18n.getMessage('create_account_link')).bind('click', function(evt) {\r\n window.open(Wiz.Constant.Default.REGISTER_URL);\r\n });\r\n }\r\n this.initCreateAccountLink = initCreateAccountLink;\r\n this.autoLogin = autoLogin;\r\n}" }, { "alpha_fraction": 0.6727941036224365, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 26.100000381469727, "blob_id": "6ec4567e9252e921567eaa5a7081c06caa3f4369", "content_id": "a4ceaef9a8fb4f1ae98a2a1abd7804369eab65d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/uplooking_Python/code/20180107/get_file_size.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nimport os\n\nfrom os.path import join,getsize\n\nfor root,dirs,files in os.walk('/home/f/20170508/uplooking_Python'):\n print root,\"consumes\",\n print sum([getsize(join(root,name)) for name in files]),\n print \"bytes in \",len(files),\"non-directory files\"\n\n" }, { "alpha_fraction": 0.512646496295929, "alphanum_fraction": 0.5194324254989624, "avg_line_length": 19.613332748413086, "blob_id": "a942088b3f3ef97a098c000f5ab3c1a5e0b75ecc", "content_id": "ff055e709a3a5fd413986bd94c40ec4bf8b9e1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1675, "license_type": "no_license", "max_line_length": 246, "num_lines": 75, "path": "/uplooking_Python/code/lesson07-flask/ops/servicetree/static/js/serviceTree.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function(){\r\n\tvar tree = {\r\n\r\n\t\tsaveData: {\r\n\t\t\tpageNum:1,\r\n\t\t\tsubmenuId: 0\r\n\t\t},\r\n\r\n\t\tgetTableData: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\tparam.nodeId = public_func.defaultSet.selTreeNodeId\r\n\t\t\tparam.page = _this.saveData.pageNum;\r\n\t\t\tparam.submenu = _this.saveData.submenuId;\r\n\t\t\tconsole.log(param)\r\n\r\n\t\t\t// $.get('接口', param, function(response){ // domain列表接口\r\n\t\t\t$.get('/tree/all', function(response){\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t\t\tvar data = response.data.detail;\r\n\r\n\t\t\t\t\t_this.tableTpl(data);\r\n\r\n\t\t\t\t\t_this.pageSel();\r\n\r\n\t\t\t\t}\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\ttableTpl: function (data) {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\r\n\t\t\tstr += '<thead><th><td>主机名</td><td>IP</td><td>宿主机</td><td>机房</td><td>状态</td><td>套餐</td><td>tags</td></th></thead><tbody>';\r\n\r\n\t\t\t$.each(data, function(index, value){\r\n\t\t\t\tstr += '<tr><td>' + (index + 1) + '</td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td><td>' + value.tags + '</td></tr>';\r\n\r\n\t\t\t});\r\n\r\n\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t$('#bodyList').html(str);\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\t\t\t\t// 一般直接调接口\r\n\t\t\t\tvar thisPageNum = $(this).text();\r\n\r\n\t\t\t\t_this.saveData.pageNum = thisPageNum;\r\n\t\t\t\t_this.getTableData();\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tpublic_func.treeList();\r\n\t\t\t_this.getTableData();\r\n\r\n\t\t\tpublic_func.subMenuClick()\r\n\t\t}\r\n\t};\r\n\r\n\ttree.init();\r\n})\r\n" }, { "alpha_fraction": 0.5183982849121094, "alphanum_fraction": 0.6417748928070068, "avg_line_length": 24.457143783569336, "blob_id": "77f5ba641ea527582417bce7220e2954591d5bfc", "content_id": "41f411a8267675337c1f4d1852b7fd4e1d5deae9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "no_license", "max_line_length": 94, "num_lines": 35, "path": "/0629/func_para.py", "repo_name": "wolfrg/20170508", "src_encoding": "GB18030", "text": "#coding:gbk\r\n'''\r\nCreated on 2017年3月7日\r\n\r\n@author: Ops\r\n'''\r\nimport paramiko\r\nimport os\r\n\r\n\r\n\r\ndef func_paramiko(server,hostname,commands):\r\n \r\n sshkey= os.path.expanduser('E:\\\\内网\\\\内网服务\\\\内网服务器key文件\\\\fengruigang_tomcat_20150427')\r\n username = \"tomcat\"\r\n\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(hostname=hostname,username=username,key_filename=sshkey,password=\"tomcat@fengruigang\")\r\n \r\n stdin,stdout,stderr = ssh.exec_command(commands)\r\n \r\n for std in stdout.readlines():\r\n print server,std\r\n \r\n \r\n ssh.close()\r\n \r\n\r\n#调用函数\r\nfunc_paramiko(\"192.168.0.221:\",\"192.168.0.221\",\"uptime\")\r\nfunc_paramiko(\"192.168.0.222:\",\"192.168.0.222\",\"uptime\") \r\nfunc_paramiko(\"192.168.0.223:\",\"192.168.0.223\",\"uptime\")\r\nfunc_paramiko(\"192.168.0.224:\",\"192.168.0.224\",\"uptime\")\r\nfunc_paramiko(\"192.168.0.226:\",\"192.168.0.226\",\"uptime\")" }, { "alpha_fraction": 0.5352751016616821, "alphanum_fraction": 0.5449838042259216, "avg_line_length": 27.03636360168457, "blob_id": "2ee07fbcbafa2af3bdaabf6d85f93822c6de0da8", "content_id": "9d5c3ec389a268bb38675a9b338b190c55b6b1a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 57, "num_lines": 55, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/controller/tree.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n############################\n#Author: BigV Yang #\n#Data: 2018/03/30 #\n############################\nimport json\nfrom servicetree.models import *\nfrom config.development import NODE_TYPE\nfrom libs.error import ParamError, ServerError\n\nclass Tree(object):\n\n @classmethod\n def get_all(self):\n # 获取树\n trees = MapTree.get_all()\n TreeStruct = {\n \"message\": \"sucess\",\n \"code\": 1,\n \"data\":{\n \"agreement\": trees\n }\n }\n\n return json.dumps(TreeStruct)\n\n @classmethod\n def add_node(cls, pid, nodeName, cname):\n # 添加节点\n pNode = MapTree.get_item_byPid(pid)\n if len(pNode) != 1:\n raise(ServerError(\"服务器内部错误, 未获取到对应父节点\"))\n\n pnode_type = pNode[0][\"node_type\"]\n node_type = get_nodeType(pnode_type)\n MapTree.add_node(pid, nodeName, cname, node_type)\n\n @classmethod\n def getHostsByTagId(cls, tagId):\n hosts = []\n hostIds = TagsHostRelation.getHostsIds(tagId)\n if len(hostIds) > 0:\n hosts = Hosts.getHostsByIds(hostIds)\n return hosts\n\ndef get_nodeType(pNodeType):\n len_node_type = len(NODE_TYPE)\n for index, value in enumerate(NODE_TYPE):\n if value == pNodeType:\n new_node_index = index + 1\n if new_node_index >= len_node_type:\n raise (ParamError(\"传入的tag有误请检查后重试!\"))\n return NODE_TYPE[index+1]\n raise (ParamError(\"传入的tag有误请检查后重试!\"))\n\n\n\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 10.25, "blob_id": "a6fac9f2a4c47dcf26512fa6fbcb20e9b1c13f44", "content_id": "de9c7041f3103978d42e94359666dffc7f86ea94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/uplooking_Python/code/lesson05/myapps/monitor/bin/env.sh", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#添加项目路径\nexport PYTHONPATH=`pwd`\n" }, { "alpha_fraction": 0.7597765326499939, "alphanum_fraction": 0.7625698447227478, "avg_line_length": 31.363636016845703, "blob_id": "8872706d59376530d8932c2a2b89198279d33d99", "content_id": "68f85ec58cfa0fe28069fa2d86d98e43e4f4e74d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 358, "license_type": "no_license", "max_line_length": 45, "num_lines": 11, "path": "/uplooking_Python/code/lesson07-flask/ops/libs/log/logger.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport logging.config\nfrom config.development import config_log\nlogging.config.dictConfig(config_log)\n\nhosts_logger = logging.getLogger(\"hosts\")\ndb_logger = logging.getLogger(\"mysql-db\")\ndeploy_logger = logging.getLogger(\"deploy\")\nsrvtree_logger = logging.getLogger(\"srvtree\")\npriv_logger = logging.getLogger(\"priv\")\n\n\n" }, { "alpha_fraction": 0.502686083316803, "alphanum_fraction": 0.5049884915351868, "avg_line_length": 33.28947448730469, "blob_id": "6cce75613a3be31eb2ff7b412ad5f507310da1c0", "content_id": "61978bfc9957c81baeeff7ab0e02a0c9fc8b5d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 94, "num_lines": 38, "path": "/uplooking_Python/code/lesson05/myapps/monitor/utils/expire_time.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\n\nimport re\nimport commands\nfrom libs.parser import expire_reg\nfrom logger.logger import get_logger\nfrom convert_time import convert_time\ndef expire_time(info,domain_name):\n #for reg in expire_reg:\n # try:\n # result = re.search(reg,info)\n # expire_time = result.group(1)\n # expire_day = expire_time\n # #print \"域名:%s,到期时间:%s\" % (domain_name,expire_day)\n # if expire_day is not None:\n # if expire_day == \"No matching record\":\n # #return \"No matching record.\"\n # logger.info(\"域名:%s没查到有效信息\" % (domain_name))\n # elif expire_day == \"The queried object does not exist: DOMAIN NOT FOUND\":\n # #return \"The queried object does not exist: DOMAIN NOT FOUND\"\n # logger.info(\"域名:%s没查到有效信息\" % (domain_name))\n # else:\n # return expire_day\n # except Exception,e:\n # continue\n\n\n\n for reg in expire_reg:\n try:\n\n expire_day = re.search(reg,info).group(1)\n expire_day = convert_time(expire_day)\n return expire_day\n\n except Exception,e:\n continue\n" }, { "alpha_fraction": 0.3692077696323395, "alphanum_fraction": 0.3863976001739502, "avg_line_length": 24.75, "blob_id": "38fb113406db273a199df3ade35445a3b1706e52", "content_id": "951b11db11b3b4ec74e660da4e6e021c3081b964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 81, "num_lines": 52, "path": "/uplooking_Python/code/lesson09-flask/ops/config/development.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nDEBUG = True\nAPP_DEV_PORT = 8888\n\n# db\nDB_HOST = \"127.0.0.1\"\nDB_PORT = 3306\nDB_NAME = \"python01\"\nDB_USER = \"root\"\nDB_PASSWD = \"123321\"\nDB_CONNECT_TIMEOUT = 10\nDB_CHARSET = \"utf8\"\nTIMEOUT_TIMES = 3\n\nNODE_TYPE = [\"cop\", \"dep\", \"pdl\", \"service\", \"servicegroup\"]\n# 日志\nconfig_log = {\n 'version': 1,\n 'formatters':{\n 'simple':{\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n 'simple2':{\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n },\n 'handlers':{\n 'hostfile': {\n 'class': 'logging.FileHandler',\n 'filename': 'log/hosts.log',\n 'level': 'DEBUG',\n 'formatter': 'simple'\n },\n\n 'httpfile': {\n 'class': 'logging.FileHandler',\n 'filename': 'log/http.log',\n 'level': 'DEBUG',\n 'formatter': 'simple'\n },\n },\n 'loggers':{\n 'hosts':{\n 'handlers': ['hostfile'],\n 'level': 'DEBUG',\n },\n 'http':{\n 'handlers': ['httpfile'],\n 'level': 'INFO',\n }\n }\n }" }, { "alpha_fraction": 0.7671957612037659, "alphanum_fraction": 0.7698412537574768, "avg_line_length": 30.5, "blob_id": "d856b06c80268e29d6433fbbc70c024afde003ef", "content_id": "51d5e8b1388760e145cbec85633d9c5d1e77f3a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/views/tree_api.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom servicetree import blue_print\nfrom flask import request\nfrom servicetree.controller.tree_api import HostApi\nfrom servicetree.controller import *\nimport json\n\n@blue_print.route(\"/hostname/tags\", methods=[\"GET\"])\ndef getNodesByHostname():\n hostname = request.args.get(\"hostname\")\n tags = HostApi.getNodesByHostname(hostname)\n return json.dumps(tags)\n" }, { "alpha_fraction": 0.606940507888794, "alphanum_fraction": 0.6154391169548035, "avg_line_length": 26.69607925415039, "blob_id": "2c0ad70c8e18a241b205ad521926bfad89440499", "content_id": "10033af922e0e80f36be5371c65a46a31896f3b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2824, "license_type": "no_license", "max_line_length": 86, "num_lines": 102, "path": "/uplooking_Python/code/SimpleFlask+jquery+mysql/app.1.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom flask import Flask,render_template,request,redirect,session\nimport MySQLdb as mysql\n\ncon = mysql.connect(host='localhost',user='root',passwd='123321',db='python01')\ncon.autocommit(True)\ncur =con.cursor()\n\napp = Flask(__name__)\nimport util\nfrom util import app_index,app_login,app_delete,app_adduser,app_updatepw,app_getpw\n# use random\napp.secret_key = 'iouasoiduio89127398981273'\n\[email protected]('/usertemp')\ndef usertemp():\n if 'user' in session:\n return render_template('usertemp.html',user=session['user'],users=app_index())\n\[email protected]('/')\ndef index():\n if 'user' in session:\n return render_template('index.html',user=session['user'],users=app_index())\n else:\n return redirect('/login')\n\[email protected]('/login',methods=['GET','POST'])\ndef login():\n if request.method=='GET':\n return render_template('login.html')\n elif request.method=='POST':\n user = request.form.get('user')\n pwd = request.form.get('pwd')\n app_user = app_login(user,pwd)\n if app_user:\n session['user'] = user\n return redirect('/')\n else:\n return 'wrong user. or passwd'\n\[email protected]('/delete')\ndef deleteuser():\n user = request.args.get('user')\n print 'user',user\n app_delete(user)\n return 'ok' \n\[email protected]('/changepw',methods=['GET','POST'])\ndef changepw():\n # if request.method == 'GET':\n # user = request.args.get('user')\n # return render_template('changepw.html',user=user)\n #elif request.method == 'POST':\n user = request.form.get('user')\n oldpwd = request.form.get('oldpwd')\n newpwd = request.form.get('newpwd')\n confirmpwd = request.form.get('confirmpwd')\n pwd = list(app_getpw(user)) \n pwd = ''.join(pwd)\n pwd = pwd.strip()\n if pwd!=oldpwd:\n return 'wrong old password'\n if newpwd!=confirmpwd:\n return 'new pwd not equal to confirmpwd'\n app_updatepw(newpwd,user)\n return 'ok'\n\n##@app.route('/adduser')\n##def adduser():\n## user = request.args.get('user')\n## pwd = request.args.get('pwd')\n## if (not user) or (not pwd):\n## return 'need username and password'\n##\n## sql = 'insert into user values (\"%s\",\"%s\")'%(user,pwd)\n## cur.execute(sql)\n## return 'ok'\n\[email protected]('/adduser',methods=['GET','POST'])\ndef adduser():\n #if request.method == 'GET':\n # return render_template('adduser.html')\n #elif request.method =='POST':\n #user = request.form.get('user')\n # pwd = request.form.get('pwd')\n user = request.args.get('user')\n pwd = request.args.get('pwd')\n app_adduser(user,pwd)\n #return redirect('/')\n return 'ok' \n\[email protected]('/logout')\ndef logout():\n del session['user']\n return redirect('/login')\n\n\n\n\n\nif __name__==\"__main__\":\n app.run(host='0.0.0.0',port=3333,debug=True)" }, { "alpha_fraction": 0.38051843643188477, "alphanum_fraction": 0.38072094321250916, "avg_line_length": 31.926666259765625, "blob_id": "35cbcbebd590886b8272f747959d3f53863c7a32", "content_id": "87876426c2832f1c9b27a3a0bc18ef7a940b6856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5064, "license_type": "no_license", "max_line_length": 131, "num_lines": 150, "path": "/uplooking_Python/code/flask/static/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "$(function(){\n var monitor = {\n\n menuClick:function() {\n // var _this = this;\n\n $('.main-menu').off('click').on('click','li',function(e){\n var thisBtn = $(this);\n thisBtn.parent().find('li').removeClass('active');\n thisBtn.addClass('active');\n })\n\n },\n\n\n // 点击添加按钮,添加任务\n taskActionPage: function(){\n debugger;\n var _this = this;\n\n $('#exampleModal').on('show.bs.modal', function (event) {\n var button = $(event.relatedTarget),\n actionType = button.data('for'),\n modal = $(_this);\n\n var id=button.attr(\"v\");\n\n if(actionType == 'add'){\n $('#exampleModalLabel').text('添加员工信息');\n _this.addTaskItem();\n\n }else if(actionType == 'edit'){\n $('#exampleModalLabel').text('编辑任务');\n _this.editTaskItem(id);\n }\n\n })\n },\n\n // 添加: 编号 用户名 职位 IP地址 备注 function\n addTaskItem: function(){\n var _this = this,\n formTpl = '';\n formTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\">\\\n </div>';\n\n $('#exampleModal').find('form').html(formTpl);\n\n\n },\n\n\n\n // click edit button show edit view\n editTaskItem:function(){\n\n // var data = {\n // 'id':'1',\n // 'username':'fengruigang'\n // };\n\n var _this = this;\n formTpl = '';\n formTpl += '<div class=\"form-group\">\\\n <label class=\"control-label\">编号:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"num\" id=\"num\" value=\"'+data.id+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">用户名:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"username\" id=\"username\" value=\"'+data.username+'\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">职位:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"position\" id=\"position\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">IP地址:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"ipaddr\" id=\"ipaddr\">\\\n </div>\\\n <div>\\\n <label class=\"control-label\">备注:</label>\\\n <input type=\"text\" class=\"form-control\" name=\"remark\" id=\"remark\">\\\n </div>';\n\n $('#exampleModal').find('form').html(formTpl);\n \n \n\n\n\n },\n\n commitAdd:function() {\n var _this = this;\n $('#submitbtn').on('click.add',function(e){\n alert('commitAdd function')\n public_func.addInfo();\n \n });\n\n },\n\n commitEdit:function() {\n \n var _this = this;\n $('submitbtn').on('click.edit',function(e){\n alert('commitEdit function')\n public_func.editInfo();\n });\n\n },\n\n\n\n \n init:function(){\n var _this = this;\n _this.menuClick();\n _this.taskActionPage();\n \n // 调用公共的方法\n // public_func.treeList();\n public_func.showInfo();\n _this.commitAdd();\n _this.commitEdit();\n \n \n }\n };\n\n monitor.init()\n})" }, { "alpha_fraction": 0.32592591643333435, "alphanum_fraction": 0.32592591643333435, "avg_line_length": 12.600000381469727, "blob_id": "5eddf011b0b91fd538f8be452a10d4722c85a02c", "content_id": "3e5f33de2659f56a078e882c8c8ebaed63fcf210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 135, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/uplooking_Python/code/前端/lesson10-前端day04/test/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "// $(function(){\n// var monitor = {\n\n// pageSel:function(){\n// var _this = this;\n \n\n// }\n// };\n// })" }, { "alpha_fraction": 0.47058823704719543, "alphanum_fraction": 0.686274528503418, "avg_line_length": 16, "blob_id": "0c1a26f692b75a31e31a7a4c3183c24b0d0a7fe0", "content_id": "b891924f6ad808451265076521f849388c730c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 51, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/uplooking_Python/code/lesson08-flask/ops/requirement.txt", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "flask==0.12.2\ngunicorn==19.7.1\nMySQL-python==1.2.5\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 46.5, "blob_id": "18621fd1ae67638665098a87851b283a2eaddf45", "content_id": "b1c7ada77d44f52c2acb4d430cec294fcd534bc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 93, "num_lines": 4, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom models.tree_host_relation import TagsHostRelation\nblue_print = Blueprint(\"tree\", __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views\n" }, { "alpha_fraction": 0.498516321182251, "alphanum_fraction": 0.5103857517242432, "avg_line_length": 15.850000381469727, "blob_id": "c37b21b24ea28cfc1278cae24a10bde28f293ebb", "content_id": "a32cee291982c646f68591da738bf67da6ed3a35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/uplooking_Python/code/lesson06/less05/err/test.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom error import DBError\n\n\ndef test():\n try:\n int(\"aaaaa\")\n except Exception, e: \n raise DBError(\"500\", \"%s to %s 类型转换失败\"%(\"str\", \"int\"))\n\ndef main():\n try:\n test()\n except DBError, e:\n print e.code, e.message\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4931034445762634, "alphanum_fraction": 0.5051724314689636, "avg_line_length": 20.384614944458008, "blob_id": "812ec92cea156d0fa427f2d61ce60260800c4d7f", "content_id": "9265ac01ac3b98e2448abc8d1bf582ba8f057d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 54, "num_lines": 26, "path": "/0901/findFile.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月6日\r\n\r\n@author: Ops\r\n'''\r\nimport os\r\n\r\n\r\ndef find_files(path,wanted):\r\n try:\r\n dir_list = os.listdir(path)\r\n for filename in dir_list:\r\n new_path = os.path.join(path,filename)\r\n\r\n if os.path.isdir(new_path):\r\n find_files(new_path,wanted)\r\n\r\n elif os.path.isfile(new_path):\r\n if wanted.lower() in filename.lower():\r\n #print(filename)\r\n print(new_path) \r\n except Exception as e:\r\n raise e\r\n\r\n\r\nfind_files('F:\\\\Python\\\\spa','mp4')" }, { "alpha_fraction": 0.42127659916877747, "alphanum_fraction": 0.42620381712913513, "avg_line_length": 34.90082550048828, "blob_id": "ea68e66a00895e8fa0271001e8a8ba64a7352860", "content_id": "3bd56386c2a2f64dc05d03c1ed712487ec86eaae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4629, "license_type": "no_license", "max_line_length": 207, "num_lines": 121, "path": "/uplooking_Python/code/前端/lesson10-前端day04/web-day-04/今天做的项目/monitor/mDeploy/js/deploy.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by Administrator on 2018/3/25.\r\n */\r\n$(function(){\r\n var deploy = {\r\n\r\n // 任务列表\r\n taskDataGet: function () {\r\n var _this = this;\r\n\r\n // $.get('url', {}, function (response) {\r\n $.getJSON('../../data/task.json', function(response){\r\n if(response.code == 1){\r\n var taskData = response.data.tasks;\r\n\r\n _this.taskTableTpl(taskData)\r\n }\r\n\r\n\r\n })\r\n },\r\n\r\n taskTableTpl: function(data){\r\n var _this = this,\r\n str = '';\r\n str += '<thead><tr><th><input type=\"checkbox\"></th><th><button class=\"btn btn-sm btn-warning\">批量发起</button></th><th>任务列表</th><th>服务组</th><th>工作组</th><th>环境</th><th>任务状态</th></tr></thead><tbody>';\r\n\r\n $.each(data, function(index, value){\r\n str += '<tr><td><input type=\"checkbox\"></td>\\\r\n\t\t\t\t\t\t\t\t<td><button class=\"btn btn-sm btn-warning js-lanuch-task mt-curosr\" data-name=\"' + value.name + '\">发起</button></td>\\\r\n\t\t\t\t\t\t\t\t<td class=\"js-edit-btn mt-curosr\" data-for=\"edit\" data-toggle=\"modal\" data-target=\"#exampleModal\">' + value.name + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.serverGroup + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.jobGroup + '</td>\\\r\n\t\t\t\t\t\t <td>' + value.enviroment + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.restart + '</td></tr>';\r\n\r\n });\r\n\r\n str += '</tbody>';\r\n\r\n $('#bodyList').html(str);\r\n\r\n _this.lanuchTask();\r\n },\r\n\r\n\r\n addTaskItem:function(){\r\n var _this = this,\r\n formHtml = '';\r\n\r\n $('.js-add-task').off('click.task').on('click.task', function(e){\r\n $('#exampleModalLabel').text('添加任务')\r\n });\r\n\r\n formHtml += '<div class=\"form-group\">\\\r\n <label for=\"add-enviroment\">部署环境:</label>\\\r\n <input class=\"form-control\" type=\"text\" name=\"add-enviroment\" id=\"add-enviroment\"/>\\\r\n </div>\\\r\n <div class=\"form-group\">\\\r\n <label for=\"add-address\">代码地址:</label>\\\r\n <input class=\"form-control\" type=\"text\" name=\"add-address\" id=\"add-address\"/>\\\r\n </div>\\\r\n <div class=\"form-group\">\\\r\n <label for=\"add-account\">部署账号:</label>\\\r\n <input class=\"form-control\" type=\"text\" name=\"add-account\" id=\"add-account\"/>\\\r\n </div>\\\r\n <div class=\"form-group\">\\\r\n <label for=\"add-path\">部署路径:</label>\\\r\n <input class=\"form-control\" type=\"text\" name=\"add-path\" id=\"add-path\"/>\\\r\n </div>';\r\n\r\n $('#exampleModal').find('form').html(formHtml);\r\n\r\n\r\n // 这种方式也可以\r\n //$('#exampleModal').on('show.bs.modal', function (event) {\r\n //\r\n //})\r\n\r\n\r\n },\r\n lanuchTask: function(){\r\n var _this = this;\r\n $('.js-lanuch-task').off('click').on('click', function(e){\r\n $('.deploy-task-page').removeClass('hide');\r\n $('.task-list-page').addClass('hide');\r\n })\r\n\r\n $('#showMachine').off('click').on('click', function(e){\r\n\r\n var str = '';\r\n\r\n str += '<select multiple=\"multiple\" size=\"10\" name=\"doublebox\" class=\"machineBox\"></select>';\r\n\r\n $('#exampleModal').find('form').html(str);\r\n\r\n $('.machineBox').doublebox({\r\n nonSelectedListLabel: '产品线下的机器列表',\r\n selectedListLabel: '已选择的机器列表',\r\n preserveSelectionOnMove: 'moved',\r\n moveOnSelect: false,\r\n nonSelectedList:[{\"roleId\":\"1\",\"roleName\":\"设备1\"},{\"roleId\":\"2\",\"roleName\":\"设备2\"},{\"roleId\":\"3\",\"roleName\":\"设备3\"},{\"roleId\":\"4\",\"roleName\":\"设备4\"}],\r\n selectedList:[{\"roleId\":\"4\",\"roleName\":\"设备4\"},{\"roleId\":\"5\",\"roleName\":\"设备2\"}],\r\n optionValue:\"roleId\",\r\n optionText:\"roleName\",\r\n doubleMove:true,\r\n \r\n });\r\n })\r\n },\r\n\r\n\r\n init: function(){\r\n var _this = this;\r\n\r\n _this.taskDataGet();\r\n _this.addTaskItem();\r\n }\r\n }\r\n deploy.init();\r\n})\r\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 26.5, "blob_id": "6ebbcf0fdab745815421fdb12025306d9fe34a0f", "content_id": "c431b689339aea5d33cf41976db2be2ba94a1f28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/uplooking_Python/code/lesson08-flask/ops/deploy/views/deploys.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import render_template\nfrom deploy import blue_print\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"deploy.html\")\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8214285969734192, "avg_line_length": 28, "blob_id": "a7089e5dcfe93d80d264cfa8b293999ef707781b", "content_id": "27a59e4dec65e92cb0160886838a3fe9c477dca3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/uplooking_Python/code/lesson08-flask/ops/servicetree/models/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from db_tree import MapTree" }, { "alpha_fraction": 0.7761194109916687, "alphanum_fraction": 0.7761194109916687, "avg_line_length": 32.75, "blob_id": "8c43cc65f00c74f0effb395096a8ecf71e33c197", "content_id": "9596c94b167f9a97376f3ec4e91266eedfe85616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 92, "num_lines": 4, "path": "/uplooking_Python/code/lesson07-flask/my_ops/domains/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nblue_print = Blueprint(\"domain\",__name__,template_folder='templates',static_folder='static')\n\nimport views" }, { "alpha_fraction": 0.5230178833007812, "alphanum_fraction": 0.5268542170524597, "avg_line_length": 22, "blob_id": "997feb74bfb525fb1c2893abe324eed38bac0579", "content_id": "c1d4fdf555cf575b4468ed3ba3c94cfc09b7f535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 53, "num_lines": 68, "path": "/uplooking_Python/code/lesson03/user_login.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nusers = {} #存储用户信息\nstatus = {} #{username:count} #记录用户的登录状态\ncount = 0\n\ndef user_login():\n print \"########请登录###########\"\n username = raw_input(\"请输入用户名:\")\n if users.has_key(username):\n password = raw_input(\"请输入密码:\")\n if users[username] == password:\n print \"登录成功\"\n change_password(username) #登录成功后调用修改密码的函数\n else:\n print \"用户名or密码错误\"\n user_login()\n\n else:\n print \"用户不存在,请注册\"\n user_register() #调用注册的函数\n\n\ndef user_register():\n\n #注册的时候增加一个判断:判断用户是否已经被注册\n username = raw_input(\"请输入用户名:\")\n if username not in users:\n password = raw_input(\"请输入密码:\")\n users[username] = password\n print users\n print \"恭喜您,注册成功!\"\n else :\n username = raw_input(\"用户名已经被注册,请注册一个别的:\")\n user_register()\n user_login()\n\n\n\ndef change_password(username):\n choice = raw_input(\"是否修改密码(y/N):\")\n if choice.lower() == 'y':\n while True:\n password = raw_input(\"请输入密码:\")\n password_again = raw_input(\"请再输入一次密码:\")\n if password == password_again:\n users[username] = password\n print \"Hello %s 您的密码修已改成功\" % username\n #break\n user_login()\n return True\n\n else:\n print \"您没有选择修改密码,请继续浏览\"\n #exit(-1)\n choice = raw_input(\"退出本站,请输入q\\n\")\n if choice.lower() == 'q':\n exit(-1)\n\ndef main():\n user_login()\n #user_register() #17行调用了该函数,此处就不能再调用了\n #user_login()\n\n\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.5778012275695801, "alphanum_fraction": 0.5829815864562988, "avg_line_length": 24.940568923950195, "blob_id": "f9b68c67adf01a2dd3aee107dedc0cca66f5b655", "content_id": "5bfbc63ea7ca0962c822222f6ab419fde82b22d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 12188, "license_type": "no_license", "max_line_length": 120, "num_lines": 387, "path": "/uplooking_Python/code/lesson08-flask/ops/static/js/lib/utils.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\nvar public_func = {\r\n\tdefaultSet: {\r\n\t\tclassName: \"dark\", // 树自带属性,\r\n\t\tselTreeNodeId: -1, // 保存节点id\r\n\t\tzNodes:[], //树所有节点\r\n\t},\r\n\r\n\t// 树---拖拽前方法(可忽略)\r\n\tbeforeDrag: function (treeId, treeNodes) {\r\n\t\treturn false;\r\n\t},\r\n\r\n\t//// 树---点击节点方法(重要)\r\n\t//onTreeClick: function (event, treeId, treeNode) {\r\n\t//\tvar _this = this;\r\n\t//\tpublic_func.defaultSet.selTreeNodeId = treeNode.id; // 将点击选中的节点id赋值给公共变量的节点id\r\n //\r\n\t//\treturn treeNode;\r\n //\r\n\t//},\r\n\r\n\t// 树---点击节点方法(重要)\r\n\tonTreeClick: function (event, treeId, treeNode) {\r\n\t\tvar _this = this;\r\n\t\tpublic_func.defaultSet.selTreeNodeId = treeNode.id; // 将点击选中的节点id赋值给公共变量的节点id\r\n\t\tvar zNodes = public_func.defaultSet.zNodes\r\n\t\tvar pNodeInfo = {\r\n\t\t\tname : '',\r\n\t\t\tpId : treeNode.pid\r\n\t\t}\r\n\t\tvar tags = ''\r\n\t\tfor(var i=0;i<treeNode.level;i++) {\r\n\t\t\tpNodeInfo = public_func.getPNodeInfo(zNodes,pNodeInfo.pId)\r\n\t\t\ttags = pNodeInfo.name += '_'+tags\r\n\t\t}\r\n\r\n\t\tif(treeNode.node_type){\r\n\t\t\ttags += treeNode.node_type+'.'\r\n\t\t}\r\n\t\t$('#tags').html(tags += treeNode.name)\r\n\t\treturn treeNode;\r\n\r\n\t},\r\n\r\n\r\n\t//树--点击获取tag串\r\n\tgetPNodeInfo : function (zNodes,pId) {\r\n\t\tvar pName = ''\r\n\t\t\tfor (var j = 0; j < zNodes.length; j++) {\r\n\t\t\t\tif (zNodes[j].id == pId) {\r\n\t\t\t\t\tif(zNodes[j].node_type){\r\n\t\t\t\t\t\tpName = zNodes[j].node_type+'.'\r\n\t\t\t\t\t}\r\n\t\t\t\t\tpName += zNodes[j].name\r\n\t\t\t\t\tpId = zNodes[j].pid\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\treturn {\r\n\t\t\tname : pName,\r\n\t\t\tpId : pId\r\n\t\t}\r\n\t},\r\n\r\n\r\n\r\n\t// 树---编辑形成前方法(可忽略)\r\n\tbeforeEditName: function (treeId, treeNode) {\r\n\t\tvar _this = this;\r\n\t\t_this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\tzTree.selectNode(treeNode);\r\n\t\tzTree.editName(treeNode);\r\n\r\n\t\treturn false;\r\n\t},\r\n\r\n\t// 树---删除节点前方法(可忽略)\r\n\tbeforeRemove: function (treeId, treeNode) {\r\n\t\tvar _this = this;\r\n\t\t_this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\tzTree.selectNode(treeNode);\r\n\t\treturn public_func.delCheck();\r\n\t},\r\n\r\n\tdelCheck: function () {\r\n\t\tvar _this = this;\r\n\r\n\t},\r\n\r\n\t// 树---重命名节点方法(可忽略)\r\n\tbeforeRename: function (treeId, treeNode, newName, isCancel) {\r\n\t\tvar _this = this;\r\n\t\t_this.className = (_this.className === \"dark\" ? \"\" : \"dark\");\r\n\r\n\t\tif (newName.length == 0) {\r\n\t\t\tsetTimeout(function () {\r\n\t\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\t\t\tzTree.cancelEditName();\r\n\t\t\t\talert(\"节点名称不能为空.\");\r\n\t\t\t}, 0);\r\n\t\t\treturn false;\r\n\t\t}\r\n\t\treturn true;\r\n\t},\r\n\r\n\t// 树---删除节点方法(重要)\r\n\tonRemove: function (e, treeId, treeNode) {\r\n\t\tvar _this = this;\r\n\r\n\t\tvar testDelNodeApi = '/tree/del/node', // 删除节点的接口,改成你自己的\r\n\t\t\tparam = {};\r\n\r\n\t\tparam.pid = treeNode.id; // 传递删除节点的id\r\n\r\n\t\t//删除节点传的数据即param,结构是 {thisId: 13}\r\n\t\t$.post(testDelNodeApi, param, function(response){\r\n\r\n\t\t})\r\n\r\n\t},\r\n\r\n\t// 树---重命名节点方法(重要)\r\n\tonRename: function (e, treeId, treeNode, isCancel) {\r\n\t\tvar _this = this;\r\n\t\tvar testEditNodeApi = '/tree/edit/node', // 编辑节点的接口,改成你自己的\r\n\t\t\tparam = {};\r\n\r\n\t\tparam.pid = treeNode.id; // 该编辑节点的id\r\n\t\tparam.name = treeNode.name; //修改后的新名称\r\n\r\n\t\t//删除节点传的数据即param,结构是 {thisId: 13, newName:'修改后的名称'}\r\n\t\t$.post(testEditNodeApi, param, function(response){\r\n\r\n\t\t\tlocation.reload();\r\n\t\t})\r\n\r\n\t},\r\n\r\n\t// 树---显示删除节点的button方法(可忽略)\r\n\tshowRemoveBtn: function (treeId, treeNode) {\r\n\r\n\t\treturn !(treeNode.id == -1);\r\n\t},\r\n\r\n\t// 树---显示重命名节点的button方法(可忽略)\r\n\tshowRenameBtn: function (treeId, treeNode) {\r\n\r\n\t\treturn true;\r\n\t},\r\n\r\n\t// 树---添加节点的方法(重要)\r\n\taddHoverDom: function (treeId, treeNode) {\r\n\t\tvar _this = this;\r\n\t\tvar newCount = 1;\r\n\t\tvar sObj = $(\"#\" + treeNode.tId + \"_span\"); // 选中的该节点\r\n\t\tif (treeNode.editNameFlag || $(\"#addBtn_\" + treeNode.tId).length > 0) return; // 如果是编辑状态或该节点没有配置添加按钮,则返回,不进行下面的代码\r\n\r\n\t\t// 拼接一个添加的button,将模态框与之关联\r\n\t\tvar addStr = \"<span class='button add' id='addBtn_\" + treeNode.tId\r\n\t\t\t+ \"' title='add node' data-toggle='modal' data-target='#exampleModal' data-for='添加' onfocus='this.blur();'></span>\";\r\n\r\n\t\tsObj.after(addStr);\t// 将添加按钮放在该节点后面\r\n\t\tvar addBtn = $(\"#addBtn_\" + treeNode.tId); // 获取该节点的添加按钮\r\n\t\tif (addBtn) {\r\n\t\t\t// addBtn.bind(\"click\", function(){\r\n\t\t\t// \tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\t\t// \tzTree.addNodes(treeNode, {id:(100 + newCount), pId:treeNode.id, name:\"new node\" + (newCount++)});\r\n\t\t\t// \treturn false;\r\n\t\t\t// });\r\n\r\n\t\t\t\t// 打开添加节点的模态框\r\n\t\t\t$('#exampleModal').on('show.bs.modal', function (event) {\r\n\t\t\t\tvar button = $(event.relatedTarget);\r\n\t\t\t\tvar recipient = button.data('for');\r\n\t\t\t\tvar modal = $(this);\r\n\r\n\t\t\t\tif (recipient == '添加') {\r\n\t\t\t\t\tmodal.find('.modal-title').text('添加节点');\r\n\t\t\t\t\tvar strTpl = '';\r\n\r\n\t\t\t\t\t// 拼接模态框中的内容\r\n\t\t\t\t\tstrTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">节点名称:</label>\\\r\n\t\t\t\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-node-box\">\\\r\n\t\t\t\t\t\t\t\t </div>\\\r\n\t\t\t\t\t\t\t\t <div class=\"form-group\">\\\r\n\t\t\t\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">code_type:</label>\\\r\n\t\t\t\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-node_type-box\">\\\r\n\t\t\t\t\t\t\t\t </div>';\r\n\r\n\t\t\t\t\t//将拼接好的添加模板放入模态框中\r\n\t\t\t\t\tmodal.find('form').html(strTpl);\r\n\r\n\t\t\t\t\tvar submitBtn = modal.find('#confirmBtn');\r\n\r\n\t\t\t\t\t// 点击提交按钮后,调添加模态框的接口,将新添加的节点传给后台插库\r\n\t\t\t\t\tsubmitBtn.off('click').on('click', function () {\r\n\t\t\t\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\"),\r\n\t\t\t\t\t\t\taddName = $('#add-node-box').val(),\r\n\t\t\t\t\t\t\taddNodeType = $('#add-node_type-box').val();\r\n\t\t\t\t\t\t//zTree.addNodes(treeNode, {id: (100 + newCount), pId: treeNode.id, name: addName});\r\n\t\t\t\t\t\tzTree.addNodes(treeNode, {id: (100 + newCount), pid: treeNode.id, name: addName});\r\n\r\n//\t\t\t\t\t\tvar testAddNodeApi = 'http://www.nodetest.com/node/additem', // 添加节点的接口url,改成你自己的\r\n\t\t\t\t\t\tvar testAddNodeApi = '/tree/add/node', // 添加节点的接口url,改成你自己的\r\n\r\n\t\t\t\t\t\t\tparam = {};\r\n\t\t\t\t\t\t//param.parentId = treeNode.id; // 父节点id\r\n\t\t\t\t\t\t//param.thisName = addName; // 该新增节点名称\r\n\t\t\t\t\t\tparam.pid = treeNode.id; // 父节点id\r\n\t\t\t\t\t\tparam.name = addName; // 该新增节点名称\r\n\t\t\t\t\t\tparam.nodetype = addNodeType; //该新增节点的node_type值\r\n\r\n\r\n\t\t\t\t\t\t// 访问传的参数结构即param变量,例如{parentId:13, thisName:'新增节点1'}\r\n\t\t\t\t\t\t$.post(testAddNodeApi, param, function (response) {\r\n\t\t\t\t\t\t\tif(response.code == 0){\r\n\t\t\t\t\t\t\t\t// response中需要你返回给我新的节点的id\r\n\t\t\t\t\t\t\t\tlocation.reload();\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tmodal.modal('hide');// 隐藏模态框\r\n\t\t\t\t\t\t});\r\n\t\t\t\t\t});\r\n\t\t\t\t}\r\n\t\t\t})\r\n\t\t}\r\n\t},\r\n\r\n\t// 树---删除节点hover的方法(可忽略)\r\n\tremoveHoverDom: function (treeId, treeNode) {\r\n\t\t$(\"#addBtn_\" + treeNode.tId).unbind().remove();\r\n\t},\r\n\r\n\t// 树---树的入口函数,非常重要,在这里面配置树,访问接口得到树的数据\r\n\ttreeList: function () {\r\n\t\tvar _this = this,\r\n\t\t\tr;\r\n\t\tvar setting = {\r\n\t\t\tview: {\r\n\t\t\t\taddHoverDom: _this.addHoverDom,\r\n\t\t\t\tremoveHoverDom: _this.removeHoverDom,\r\n\t\t\t\tselectedMulti: false\r\n\t\t\t},\r\n\t\t\tedit: {\r\n\t\t\t\tenable: true,\r\n\t\t\t\teditNameSelectAll: true,\r\n\t\t\t\tshowRemoveBtn: _this.showRemoveBtn,\r\n\t\t\t\tshowRenameBtn: _this.showRenameBtn\r\n\t\t\t},\r\n\t\t\tdata: {\r\n\t\t\t\tsimpleData: {\r\n\t\t\t\t\tenable: true\r\n\t\t\t\t}\r\n\t\t\t},\r\n\t\t\tcallback: {\r\n\t\t\t\tbeforeDrag: _this.beforeDrag,\r\n\t\t\t\tonClick: _this.onTreeClick,\r\n\t\t\t\tbeforeEditName: _this.beforeEditName,\r\n\t\t\t\tbeforeRemove: _this.beforeRemove,\r\n\t\t\t\tbeforeRename: _this.beforeRename,\r\n\t\t\t\tonRemove: _this.onRemove,\r\n\t\t\t\tonRename: _this.onRename\r\n\t\t\t}\r\n\t\t};\r\n\r\n\t//\t$.getJSON('/data/protocolTree.json', function (response) {\r\n\t//\t\tconsole.log(response,'6')\r\n\t//\t\tvar zNodes = response.data.agreement;\r\n\t//\t\t$.fn.zTree.init($(\"#treelist\"), setting, zNodes);\r\n\t//\t}, \"json\");\r\n //\r\n\t//},\r\n\r\n\t\t$.get('/tree/all', function (response) {\r\n\t\t\tconsole.log(response,'6')\r\n\t\t\tvar zNodes = response.data.agreement;\r\n\t\t\t$.fn.zTree.init($(\"#treelist\"), setting, zNodes);\r\n\t\t\tpublic_func.defaultSet.zNodes = zNodes\r\n\t\t}, \"json\");\r\n\r\n\t},\r\n\r\n\r\n\t// 子菜单点击\r\n\tsubMenuClick: function () {\r\n\r\n\t\t// 点击子菜单\r\n\t\t$('.js-sub-tabs').off('click.open').on('click.open', 'li', function (e) {\r\n\r\n\t\t\t$(this).parent().find('li').removeClass('active'); //所有子菜单去掉active类\r\n\t\t\t$(this).addClass('active'); //就给该子菜单添加active类\r\n\r\n\t\t});\r\n\r\n\t},\r\n\r\n\t// 全选全不选checkbox\r\n\tcheckboxFun: function(){\r\n\t\tvar _this = this,\r\n\t\t\tfather = $('#bodyList');\r\n\t\t// 点击全选\r\n\t\tfather.off('click.all').on('click.all','.total-check',function(){\r\n\t\t\tfather.find(\".sub-check\").prop(\"checked\",$(this).prop(\"checked\"));\r\n\t\t\t//同步所有的全选按钮\r\n\t\t\tfather.find('.total-check').prop(\"checked\",$(this).prop(\"checked\"));\r\n\r\n\t\t});\r\n\r\n\t\t// 处理单个\r\n\t\tfather.off('click.sin').on('click.sin','.sub-check',function(){\r\n\t\t\tif(!$(this).prop('checked')){\r\n\t\t\t\tfather.find('.total-check').prop(\"checked\",$(this).prop(\"checked\"));\r\n\t\t\t}\r\n\r\n\t\t\t//若在非全选状态下,单个商品依次选中要更新全选按钮状态\r\n\t\t\tif($('.sub-check').length == $('input.sub-check:checked').length){\r\n\t\t\t\t$('.total-check').prop(\"checked\",true);\r\n\t\t\t}\r\n\t\t});\r\n\t},\r\n\r\n\t// 左侧树展开缩小动画\r\n\tleftTreeAni: function(){\r\n\t\tvar _this = this;\r\n\r\n\t\t// 点击数的控制按钮\r\n\t\t$('.js-slide-tree').off('click').on('click', function(){\r\n\t\t\tif($(this).hasClass('icon-chevron-left')){ // 如果有向左的按钮的类,说明是展开状态,我们让它合上\r\n\t\t\t\t$('.float-tree').animate({left:'-250px'}, 500);//整个树向左移动树的宽度,将其隐藏\r\n\t\t\t\t$(this).removeClass('icon-chevron-left').addClass('icon-chevron-right')//并将按钮改成向右的按钮\r\n\t\t\t}else{\r\n\t\t\t\t$('.float-tree').animate({left:'0'}, 500);\r\n\t\t\t\t$(this).removeClass('icon-chevron-right').addClass('icon-chevron-left')\r\n\t\t\t}\r\n\t\t})\r\n\t},\r\n\r\n\t// 侧栏导航\r\n\tleftNav: function(){\r\n\t\t$('.navMenu li a').on('click',function(){\r\n\t\t\tvar parent = $(this).parent().parent();\r\n\t\t\tvar labeul =$(this).parent(\"li\").find(\">ul\");\r\n\t\t\tif ($(this).parent().hasClass('open') == false) { // 判断你如果父元素没有open类,即没有展开\r\n\r\n\t\t\t\t//展开\r\n\t\t\t\tparent.find('ul').slideUp(300); // 先将所有的列表都收起来\r\n\t\t\t\tparent.find(\"li\").removeClass(\"open\"); // 去掉open类\r\n\t\t\t\tparent.find('li a').removeClass(\"active\").find(\".arrow\").removeClass(\"open\") // 去掉所有箭头的open类,相当于reset\r\n\t\t\t\t$(this).parent(\"li\").addClass(\"open\").find(labeul).slideDown(300); // 单独向下展示该点击的列表\r\n\t\t\t\t$(this).addClass(\"active\").find(\".arrow\").addClass(\"open\") // 单独给该箭头添加open类,展示展开效果\r\n\t\t\t}else{ // 父元素已经有open类了,点击收回\r\n\r\n\t\t\t\t$(this).parent(\"li\").removeClass(\"open\").find(labeul).slideUp(300); // 向上收起列表\r\n\r\n\t\t\t\tif($(this).parent().find(\"ul\").length>0){ // 判断是否有二级菜单\r\n\r\n\t\t\t\t\t$(this).removeClass(\"active\").find(\".arrow\").removeClass(\"open\");// 有二级菜单去掉箭头的open类\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t});\r\n\t},\r\n\r\n\t// 退出按钮\r\n\texitBtn: function () {\r\n\t\t// 点击退出按钮,退出到登录界面\r\n\t\t$('.exit-btn').off('click').on('click', function (e) {\r\n\t\t\twindow.location.pathname = \"/monitor-web/login.html\";\r\n\t\t})\r\n\r\n\t},\r\n\r\n\tinit: function(){\r\n\t\tvar _this = this;\r\n\r\n\t\t // 调退出按钮,因为每个页面都会有退出功能,所以就在公共函数中调用就好\r\n\t\t_this.exitBtn();\r\n\t}\r\n};" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 23, "blob_id": "6bc780c3e051389d5639cea0c7c8d26b0bbea815", "content_id": "2bf3dcde632a7db9821933343ab1c48d25876db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/uplooking_Python/code/lesson07-flask/ops/libs/db/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from db import MasterDB\n" }, { "alpha_fraction": 0.6144366264343262, "alphanum_fraction": 0.6179577708244324, "avg_line_length": 28.842105865478516, "blob_id": "e23c0296d4f6a60f3770f99f32a0cc35a357303a", "content_id": "6f6a11115a7174c3744c8700068682218d1e0cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 67, "num_lines": 19, "path": "/uplooking_Python/code/lesson09-flask/ops/servicetree/models/hosts.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom libs.db import db\n\nclass Hosts(object):\n TABLE = \"tb_device\"\n COLUMN = \"id,hostname,host_type,ip,location\"\n\n @classmethod\n def getHostsByIds(cls, hostids):\n print hostids\n sql = \"select hostname from %s where id in %%s\"%(cls.TABLE)\n hosts = db.query_all(sql, hostids)\n return [host[0] for host in hosts]\n\n @classmethod\n def getHostIdByHostname(cls, hostname):\n sql = \"select id from %s where hostname=%%s\"%(cls.TABLE)\n hostId = db.query_all(sql, hostname)\n return hostId\n\n" }, { "alpha_fraction": 0.5140562057495117, "alphanum_fraction": 0.5662650465965271, "avg_line_length": 13.125, "blob_id": "c5c6e41da3ead62cdf05922d861efd7b02a8fcc8", "content_id": "8dc7e144bc2617b1b1ed7c6127c81f8d79e6e5ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 46, "num_lines": 16, "path": "/uplooking_Python/code/lesson04/guess_num_v1.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nauthor:wolfrg\r\ndate:2017.12.25\r\n猜数字游戏,V1版,被猜的数字不是随机的\r\n'''\r\nflag =30\r\nwhile True:\r\n\t#flag = 30\r\n\tnum = int(input('please input your number:'))\r\n\r\n\tif num > flag:\r\n\t\tprint('大了')\r\n\telif num < flag:\t\r\n\t\tprint('小了')\r\n\telse:\r\n\t\tprint('猜对了')\r\n\t\t\t\r\n\r\n" }, { "alpha_fraction": 0.7150837779045105, "alphanum_fraction": 0.7206704020500183, "avg_line_length": 24.571428298950195, "blob_id": "da00472a14edcfdc89318856456d134e362e8ebc", "content_id": "d0703915e53b61e76599548565117a76ec06400a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/uplooking_Python/code/lesson07-flask/ops/hosts/views/host.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom flask import render_template\nfrom hosts import blue_print\n\n@blue_print.route(\"/index\", methods=[\"GET\"])\ndef index():\n return render_template(\"hosts.html\")\n" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 44, "blob_id": "15a54bc832dd328671df38a67894036785d627ca", "content_id": "5819fc2e2eb3be7d63f653763beabf53f8bdf8e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 93, "num_lines": 3, "path": "/uplooking_Python/code/lesson07-flask/ops/servicetree/__init__.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nblue_print = Blueprint(\"tree\", __name__, template_folder=\"templates\", static_folder=\"static\")\nimport views\n" }, { "alpha_fraction": 0.6906636953353882, "alphanum_fraction": 0.7007874250411987, "avg_line_length": 30.678571701049805, "blob_id": "c5e5693585033e4e8093068aeec6f0042efcb340", "content_id": "25d8f8fc5341ae4f24015eae917ee0ab21a6e05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 73, "num_lines": 28, "path": "/uplooking_Python/code/lesson07-flask/ops/app.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom flask import Flask, render_template\nfrom hosts import blue_print as hosts_bp\nfrom domains import blue_print as domains_bp\nfrom deploy import blue_print as deploy_bp\nfrom servicetree import blue_print as tree_bp\napp = Flask(__name__)\n# 从环境变量指定的配置文件获取配置\n#app.config.from_envvar('APP_CONFIG_FILE')\napp.config.from_object('config.development')\n# 注册蓝图\napp.register_blueprint(hosts_bp, url_prefix=\"/hosts\")\napp.register_blueprint(domains_bp, url_prefix=\"/domains\")\napp.register_blueprint(deploy_bp, url_prefix=\"/deploy\")\napp.register_blueprint(tree_bp, url_prefix=\"/tree\")\n\[email protected](\"/\", methods=[\"GET\"])\ndef index():\n return render_template(\"index.html\")\n\n\nif __name__ == '__main__':\n APP_DEV_PORT = 8888\n try:\n APP_DEV_PORT = app.config['APP_DEV_PORT']\n except:\n pass\n app.run(host='0.0.0.0', port=APP_DEV_PORT, debug=app.config[\"DEBUG\"])\n\n\n" }, { "alpha_fraction": 0.5452302694320679, "alphanum_fraction": 0.5542762875556946, "avg_line_length": 30.128204345703125, "blob_id": "ba5dabb4d4c8a680b641f1256c439ec548c55ee4", "content_id": "39bea5c5b01e1d7d52e15106bb40da88f174cba2", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 60, "num_lines": 39, "path": "/uplooking_Python/code/lesson09-flask/ops/libs/cache/cache.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom servicetree.models.db_tree import MapTree\n\nclass Cache(object):\n node_tags = []\n @classmethod\n def _build_cache(cls):\n tree = MapTree.get_all()\n node_hash = {int(item[\"id\"]): item for item in tree}\n return node_hash\n\n @classmethod\n def getTagstringsByTreeIds(cls, treeIds):\n node_hash = cls._build_cache()\n tagstrings = []\n for treeId in treeIds:\n cls.node_tags = []\n tagstring = cls.getTreePath(treeId, node_hash)\n print \">>>>\", tagstring\n tagstrings.append(tagstring)\n return tagstrings\n\n @classmethod\n def getTreePath(cls, treeId, node_hash):\n\n #cop.uplooking_dep.sre_pdl.sys\n #sys 10 pid 2\n #sre: 2 pid 1\n #uplooking: 1 pid 0\n node = node_hash.get(int(treeId))\n pid = node.get(\"pid\")\n node_type = node.get(\"node_type\")\n name = node.get(\"name\")\n cls.node_tags.insert(0,\".\".join([node_type, name]))\n #1 -> [cop.uplooking, dep.sre, pdl.sys]\n if int(pid) == 0:\n tagstring = \"_\".join(cls.node_tags)\n return tagstring\n return cls.getTreePath(pid, node_hash)\n\n\n" }, { "alpha_fraction": 0.5404298901557922, "alphanum_fraction": 0.545547604560852, "avg_line_length": 23.0256404876709, "blob_id": "6befaac4f7f4495dbc5dd624982b26382f2df743", "content_id": "b7531cc99eac8843ffc847021c591a53d1c5c03f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2378, "license_type": "no_license", "max_line_length": 243, "num_lines": 78, "path": "/uplooking_Python/code/前端修改/serviceTree.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n\r\n\t// 定义一个大对象,包含该页面要用到的所有的方法\r\n\tvar tree = {\r\n\t\tsaveData: { // 保存公共变量的一个对象\r\n\t\t\tpageNum:1, // 保存页码数\r\n\t\t\tsubmenuId: 0 // 保存菜单id\r\n\t\t},\r\n\r\n\t\t// 调接口,获取表格数据\r\n\t\tgetTableData: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\t// 给后台传参数\r\n\t\t\tparam.nodeId = public_func.defaultSet.selTreeNodeId // 传节点id\r\n\t\t\tparam.page = _this.saveData.pageNum; // 传页码数\r\n\t\t\tparam.submenu = _this.saveData.submenuId; // 传子菜单id\r\n\r\n\t\t\t$.get('/tree/all_map', param, function(response){ //\r\n\t\t\t\tif(response.code == 1){\r\n\t\t\t var data = response.data.agreement; // 获取接口中返回的列表的数据\r\n\t\t\t\t\t_this.tableTpl(data); // 调用拼接图表模板的函数\r\n\t\t\t\t\t_this.pageSel(); // 页码选择\r\n\r\n\t\t\t\t}\r\n\t\t\t},'json');\r\n\r\n\t\t},\r\n\r\n\t\t//拼接列表\r\n\t\ttableTpl: function (data) {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\t\t\t// 拼接表格头部\r\n\t\t\tstr += '<thead><th><input class=\"total-check\" type=\"checkbox\"></th><th>主机名</th></thead><tbody>';\r\n\r\n\t\t\t// 拼接表格body\r\n\t\t\t$.each(data, function(index, value){\r\n//\t\t\t\tstr += '<tr><td><input class=\"sub-check\" type=\"checkbox\"></td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td></tr>';\r\n\t\t\t\tstr += '<tr><td><input class=\"sub-check\" type=\"checkbox\"></td><td>' + value.hostname + '</td></tr>';\r\n\r\n\t\t\t});\r\n\r\n\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t// 将拼好的列表放入父元素中,生成列表\r\n\t\t\t$('#bodyList').html(str);\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\r\n\t\t\t\t_this.saveData.pageNum = $(this).text(); // 将点击后选择的页码数赋值给公共变量\r\n\r\n\t\t\t\t_this.getTableData(); // 调列表数据获取函数,将新一页的数据展示出来\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\t// 入口函数\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tpublic_func.treeList('tree'); // 调公共方法中的树\r\n\t\t\t_this.getTableData(); // 调图表显示方法\r\n\r\n\t\t\tpublic_func.subMenuClick(); // 调公共中子菜单点击函数\r\n\t\t\tpublic_func.leftTreeAni(); // 调左侧树动画\r\n\t\t}\r\n\t};\r\n\r\n\ttree.init(); // 调入口函数\r\n\r\n" }, { "alpha_fraction": 0.5889159440994263, "alphanum_fraction": 0.5895249843597412, "avg_line_length": 28.321428298950195, "blob_id": "141f3bc03699a84a83cc0277ff4c24abbc530480", "content_id": "4f76e35d23a416e06ae31c05cef660fd62a7180a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1696, "license_type": "no_license", "max_line_length": 104, "num_lines": 56, "path": "/uplooking_Python/code/lesson05/myapps/monitor/core/monitor_domains.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nimport datetime\nfrom conf.exprie_domains import exprie_domains\nfrom utils.whois_commands import sys_call\nfrom utils.expire_time import expire_time\nfrom utils.register_time import register_time\nfrom utils.expire_days import expire_days\nfrom logger.logger import get_logger\n\nclass Monitor_Domain(object):\n\n def __init__(self):\n self.info = None\n self.domain_name = None\n\n def whois(self,domain_name):\n cmd = \"whois %s\" % domain_name\n info = sys_call(cmd)\n self.info = info\n self.domain_name = domain_name\n\n\n def get_register_time(self):\n register_date = register_time(self.info,self.domain_name)\n self.register_date = register_date\n return self.register_date\n\n\n\n def get_expire_time(self):\n expire_date = expire_time(self.info,self.domain_name)\n self.expire_date = expire_date\n return self.expire_date\n\n\n\n def get_expire_days(self):\n my_expire_days = expire_days(self.expire_date)\n self.my_expire_days = my_expire_days\n return self.my_expire_days\n\n\n\n#if __name__ == '__main__':\n# md = Monitor_Domain()\n# logger = get_logger(\"whois domains\")\n# for domain in exprie_domains:\n# md.whois(domain)\n# r_time = md.get_register_time()\n# e_time = md.get_expire_time()\n# my_days = md.get_expire_days()\n# if e_time == None:\n# logger.info(\"没查到 %s的有效信息.\"%(domain))\n# else:\n# print \"===================================================================================\"\n# print \"域名:%s,注册时间:%s,到期时间:%s,到期天数:%s天\" % (domain,r_time,e_time,my_days)\n" }, { "alpha_fraction": 0.5283389687538147, "alphanum_fraction": 0.5395622849464417, "avg_line_length": 32.960784912109375, "blob_id": "25006c113f2e49cedf9de380d4f7a3d87f0a791d", "content_id": "ec3d5aa52dc3c8507499fddb91a1c2e6065fe225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3678, "license_type": "no_license", "max_line_length": 100, "num_lines": 102, "path": "/uplooking_Python/code/前端/lesson08-web/kuozhanPackage/为知笔记/scripts/options/options.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "//// 将选项保存在 chrome.storage 中。\r\n//function save_options() {\r\n// var color = document.getElementById('color').value;\r\n// var likesColor = document.getElementById('like').checked;\r\n// chrome.storage.sync.set({\r\n// favoriteColor: color,\r\n// likesColor: likesColor\r\n// }, function() {\r\n// // 更新状态,告诉用户选项已保存。\r\n// var status = document.getElementById('status');\r\n// status.textContent = '选项已保存。';\r\n// setTimeout(function() {\r\n// status.textContent = '';\r\n// }, 750);\r\n// });\r\n//}\r\n//\r\n//// 从保存在 chrome.storage 中的首选项恢复选择框和复选框状态。\r\n//function restore_options() {\r\n// // 使用默认值 color = 'red' 和 likesColor = true 。\r\n// chrome.storage.sync.get({\r\n// favoriteColor: 'red',\r\n// likesColor: true\r\n// }, function(items) {\r\n// document.getElementById('color').value = items.favoriteColor;\r\n// document.getElementById('like').checked = items.likesColor;\r\n// });\r\n//}\r\n//document.addEventListener('DOMContentLoaded', restore_options);\r\n//document.getElementById('save').addEventListener('click',\r\n// save_options);\r\n\r\n\r\n$(function () {\r\n\r\n var saveImage2Server = $('#options-saveImage2Server');\r\n var defaultOptions = {\r\n saveImage2Server: true\r\n };\r\n\r\n function localize() {\r\n $('head title').html(chrome.i18n.getMessage('options_title'));\r\n $('#options-title').html(chrome.i18n.getMessage('WizNote'));\r\n $('.options-name').each(function() {\r\n $(this).html(chrome.i18n.getMessage('options'));\r\n });\r\n $('.options-about').each(function() {\r\n $(this).html(chrome.i18n.getMessage('about'));\r\n });\r\n $('#options-save').html(chrome.i18n.getMessage('save'));\r\n $('#lable-options-saveImage2Server').html(chrome.i18n.getMessage('save_image_to_server'));\r\n $('#options-saveImage2Server-tip').html(chrome.i18n.getMessage('save_image_to_server_tip'));\r\n }\r\n\r\n function restoreOptions(options) {\r\n chrome.storage.sync.get(options, function(itmes) {\r\n saveImage2Server[0].checked = itmes.saveImage2Server;\r\n });\r\n }\r\n\r\n function initEvent() {\r\n $('.menu a').click(function (ev) {\r\n ev.preventDefault();\r\n var selected = 'selected';\r\n\r\n $('.mainview > *').removeClass(selected);\r\n $('.menu li').removeClass(selected);\r\n setTimeout(function () {\r\n $('.mainview > *:not(.selected)').css('display', 'none');\r\n }, 100);\r\n\r\n $(ev.currentTarget).parent().addClass(selected);\r\n var currentView = $($(ev.currentTarget).attr('href'));\r\n currentView.css('display', 'block');\r\n setTimeout(function () {\r\n currentView.addClass(selected);\r\n }, 0);\r\n\r\n setTimeout(function () {\r\n $('body')[0].scrollTop = 0;\r\n }, 200);\r\n });\r\n $('.mainview > *:not(.selected)').css('display', 'none');\r\n\r\n //listen the checkbox event of save img to server\r\n saveImage2Server.on('click', function() {\r\n var isSaveImage2Server = saveImage2Server[0].checked;\r\n chrome.storage.sync.set({\r\n saveImage2Server: isSaveImage2Server\r\n }, function() {\r\n// alert('save success');\r\n// console.log(\"save success\");\r\n });\r\n });\r\n }\r\n function init() {\r\n localize();\r\n initEvent();\r\n restoreOptions(defaultOptions);\r\n }\r\n init();\r\n});" }, { "alpha_fraction": 0.4600200653076172, "alphanum_fraction": 0.47507527470588684, "avg_line_length": 27.435644149780273, "blob_id": "97f74944110496f722f70aa1ea783bed6dd4c623", "content_id": "13dfc2be071415a11f4c5a66a283a0f03507901b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3143, "license_type": "no_license", "max_line_length": 82, "num_lines": 101, "path": "/0901/socket3.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 2017年9月8日\r\n\r\n@author: Ops\r\n'''\r\n\r\nimport os,socket\r\nimport json\r\n\r\n\r\np='F:\\\\Python\\\\zm-mobile-h5-spa'\r\npdir=os.listdir(p) \r\n\r\nsk = socket.socket()\r\nsk.bind((\"127.0.0.1\", 8080))\r\nsk.listen(50)\r\np=os.getcwd()\r\nprint('http://localhost:8080/')\r\n\r\n\r\ndef my_rsync(p,filename):\r\n \r\n #根目录下需要复制的目录\r\n cpdir='src'\r\n srcname = os.path.join(p,filename)\r\n #print (srcname)\r\n \r\n if os.path.isdir(srcname):\r\n os.chdir(srcname)\r\n #print('进入:' + os.getcwd())\r\n jsonDir=os.path.join(srcname,'package.json') #遍历包含package.json的目录\r\n if os.path.isfile(jsonDir):\r\n #print(srcname + \"目录:存在package.json文件\")\r\n print(jsonDir)\r\n with open('package.json','r') as f:\r\n lines = f.read(-1) \r\n try:\r\n text=json.loads(lines)\r\n #print(text)\r\n except ValueError:\r\n #print(' json解析失败')\r\n pass\r\n \r\n else:\r\n #print('json解析成功')\r\n pass\r\n \r\n if text.get(\"scripts\",()).get(\"build\"):\r\n #print('package.json文件中包含build字段')\r\n cpdir='dist'\r\n #print(cpdir)\r\n #os.system('npm install')\r\n #print ('执行命令npm install')\r\n \r\n if(os.path.isdir(os.path.join(srcname,cpdir))):\r\n dstPath='F:\\\\Python\\\\spa\\\\'\r\n dstPath=os.path.join(dstPath,filename)\r\n \r\n if(not os.path.isdir(dstPath)):\r\n os.makedirs(dstPath)\r\n \r\n srcPath=os.path.join(srcname,cpdir)\r\n \r\n #print(srcPath)\r\n #windows命令格式 \r\n comm='xcopy /y /E ' + srcPath + ' ' + dstPath \r\n #Linux命令格式\r\n #comm='cp -a ' + srcPath + ' ' + dstPath\r\n #print (comm)\r\n os.system(comm)\r\n\r\nwhile True:\r\n conn, addr = sk.accept()\r\n accept_data = str(conn.recv(1024),encoding=\"utf8\")\r\n lines = accept_data.split('\\n')[0]\r\n print(lines)\r\n if len(accept_data)<3:\r\n continue;\r\n filename = accept_data.split()[1]\r\n print(filename);\r\n #print(\"\".join([\"接收内容:\", accept_data, \" 客户端口:\", str(addr[1])]))\r\n send_data='HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n';\r\n conn.send(bytes(send_data, encoding=\"utf8\"))\r\n data=filename\r\n filenameOld=filename\r\n \r\n\r\n\r\n eddir=os.listdir(p)\r\n for filename in eddir:\r\n if(filenameOld[1:]==filename or filenameOld[1:]=='_all'):\r\n print(filenameOld[1:])\r\n my_rsync( p,filename )\r\n\r\n data=''\r\n for filename in eddir:\r\n if filename[0]!='.' and os.path.isdir(p+'/'+filename):\r\n data+='<a href=\"'+filename+'\">'+filename+'</a> <br>'\r\n\r\n conn.send(bytes(data, encoding=\"utf8\"))\r\n conn.close() # 跳出循环时结束通讯\r\n \r\n \r\n " }, { "alpha_fraction": 0.5714483857154846, "alphanum_fraction": 0.5761653780937195, "avg_line_length": 30.772727966308594, "blob_id": "60aeb8b9f99e1e94fda72fab44f182966897e618", "content_id": "a1eaafc2b17e3710540bccacfae530252b2d2636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8968, "license_type": "no_license", "max_line_length": 218, "num_lines": 220, "path": "/uplooking_Python/code/前端/lesson10-前端day04/monitor-web1/mDeploy/js/deploy.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2018/3/22.\r\n */\r\n$(function () {\r\n\tvar deploy = {\r\n\t\tsaveData: {\r\n\t\t\tpageNum:1, // 存储页码,每次点击页码都改变该值,调接口时传此的页码值\r\n\t\t\tsubmenuId: 0 // 存储子菜单id,每次点击子菜单都改变该值,调接口时传此的子菜单值\r\n\t\t},\r\n\r\n\t\t// 分页\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\t\t\t\t// 一般直接调接口\r\n\t\t\t\tvar thisPageNum = $(this).text();\r\n\r\n\t\t\t\t_this.saveData.pageNum = thisPageNum; // 将选择的页码保存在公共变量中,方便使用\r\n\t\t\t\t_this.getTableData(); // 点击完页码后,调显示图表的方法\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\t// 任务列表\r\n\t\ttaskDataGet: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t// 调任务列表接口,获取任务列表\r\n\t\t\t// $.get('url', {}, function (response) {\r\n\t\t\t$.getJSON('../../data/task.json', function(response){\r\n\t\t\t\tif(response.code == 1){ // 判断是否成功\r\n\t\t\t\t\tvar taskData = response.data.tasks; // 接口返回的数据\r\n\r\n\t\t\t\t\t_this.taskTableTpl(taskData) // 将接口返回的数据传给表格拼接函数,生成表格\r\n\t\t\t\t}\r\n\r\n\t\t\t\t_this.taskActionPage(); // 调添加任务或编辑任务的函数\r\n\r\n\t\t\t\t_this.showLanuchPage(); // 调进入发起任务界面的函数\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\t// 生成任务表格\r\n\t\ttaskTableTpl: function(data){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\t\t\t// 拼接任务表头\r\n\t\t\tstr += '<thead><tr><th><input class=\"total-check\" type=\"checkbox\"></th><th><button class=\"btn btn-sm btn-warning\">批量发起</button></th><th>任务列表</th><th>服务组</th><th>工作组</th><th>环境</th><th>任务状态</th></tr></thead><tbody>';\r\n\r\n\t\t\t\t// 拼接任务表格主体\r\n\t\t\t\t$.each(data, function(index, value){\r\n\t\t\t\t\tstr += '<tr><td><input class=\"sub-check\" type=\"checkbox\"></td>\\\r\n\t\t\t\t\t\t\t\t<td><button class=\"btn btn-sm btn-warning js-lanuch-task mt-curosr\" data-name=\"' + value.name + '\">发起</button></td>\\\r\n\t\t\t\t\t\t\t\t<td class=\"js-edit-btn mt-curosr\" data-for=\"edit\" data-toggle=\"modal\" data-target=\"#exampleModal\">' + value.name + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.serverGroup + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.jobGroup + '</td>\\\r\n\t\t\t\t\t\t <td>' + value.enviroment + '</td>\\\r\n\t\t\t\t\t\t\t\t<td>' + value.restart + '</td></tr>';\r\n\r\n\t\t\t\t});\r\n\r\n\t\t\t\tstr += '</tbody>';\r\n\r\n\t\t\t\t$('#bodyList').html(str); // 放入父元素中,生成表格\r\n\t\t},\r\n\r\n\t\t// 点击添加按钮,添加任务\r\n\t\ttaskActionPage: function(){\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t// 调用模态框,显示模态框\r\n\t\t\t$('#exampleModal').on('show.bs.modal', function (event) {\r\n\t\t\t\tvar button = $(event.relatedTarget),\r\n\t\t\t\t\tactionType = button.data('for'),\r\n\t\t\t\t\tmodal = $(_this);\r\n\r\n\t\t\t\t// 判断按钮是要添加任务还是编辑任务\r\n\t\t\t\tif(actionType == 'add'){\r\n\t\t\t\t\t$('#exampleModalLabel').text('添加任务'); // 将模态框的title改成添加任务\r\n\t\t\t\t\t_this.addTaskItem(); // 调添加任务的函数\r\n\r\n\t\t\t\t}else if(actionType == 'edit'){\r\n\t\t\t\t\t$('#exampleModalLabel').text('编辑任务'); // 将模态框的title改成编辑任务\r\n\t\t\t\t\t_this.editTaskItem(); // 调编辑任务的函数\r\n\t\t\t\t}\r\n\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\t// 添加任务的模态框\r\n\t\taddTaskItem: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tformTpl = '';\r\n\r\n\t\t\t// 拼接模态框的内容\r\n\t\t\tformTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署环境:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-service\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">代码地址:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-job-name\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署账号:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-serviceGroup\">\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">部署路径:</label>\\\r\n\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-jobGroup\">\\\r\n\t\t\t\t\t \t</div>';\r\n\r\n\t\t\t$('#exampleModal').find('form').html(formTpl);// 将拼接好的字符模板放入父元素中,生成添加模态框的表单\r\n\r\n\r\n\t\t},\r\n\r\n\t\t// 点击任务名称到编辑任务界面\r\n\t\teditTaskItem: function(){\r\n\r\n\t\t},\r\n\r\n\t\t//点击发起名称到发起任务界面\r\n\t\tshowLanuchPage: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t// 点击发起任务按钮\r\n\t\t\t$('.js-lanuch-task').off('click').on('click', function(e){\r\n\t\t\t\t$('.task-list-page').hide(); // 隐藏显示任务界面\r\n\t\t\t\t$('.lanuch-task-page').show().find('.lanuch-job-name').text($(this).data('name')); // 显示发起任务界面并在表头显示该任务的名称\r\n\r\n\t\t\t\t_this.lanuchTaskDetail(); // 调用生成发起任务表格的方法\r\n\t\t\t})\r\n\r\n\t\t\t// 点击发起界面的返回按钮,返回到任务列表界面\r\n\t\t\t$('.content-main').off('click').on('click', '.back-to-list', function(e){\r\n\t\t\t\t$('.task-list-page').show(); // 显示任务列表的页面\r\n\t\t\t\t$('.lanuch-task-page').hide(); // 隐藏发起页面\r\n\r\n\r\n\t\t\t\t//调添加或编辑任务的函数\r\n\t\t\t\t_this.taskActionPage();\r\n\t\t\t})\r\n\t\t},\r\n\t\tlanuchTaskDetail: function () {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tformTpl = '';\r\n\r\n\t\t\t// 拼接发起任务界面的表格\r\n\t\t\tformTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">版本号:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-service\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">部署账号:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-serviceGroup\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">部署路径:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-jobGroup\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">启停动作:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><input type=\"text\" class=\"form-control\" id=\"add-service\"></div>\\\r\n\t\t\t\t\t \t</div>\\\r\n\t\t\t\t\t \t<div class=\"form-group\">\\\r\n\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label col-md-3\">机器列表:</label>\\\r\n\t\t\t\t\t\t <div class=\"col-md-8\"><img class=\"js-machine-sel mt-curosr\" data-target=\"#exampleModal\" data-toggle=\"modal\" src=\"../../img/deploy-edit.svg\"></div>\\\r\n\t\t\t\t\t \t</div>';\r\n\r\n\t\t\t$('.lanuch-task-page').find('.panel-body').html(formTpl); // 将拼接好的字符模板放入父元素中,生成表格\r\n\r\n\t\t\t_this.machineSelect(); // 等表格生成后,调用机器选择功能,即左右选择插件\r\n\r\n\t\t},\r\n\r\n\t\t// 点击机器列表,调用左右选择插件,显示机器列表选择项\r\n\t\tmachineSelect: function(){\r\n\t\t\tvar _this = this,\r\n\t\t\t\tstr = '';\r\n\r\n\t\t\t// 必须给里面加一个该标签,是该插件的要求,便于插件的生成\r\n\t\t\tstr += ' <select multiple=\"multiple\" size=\"10\" name=\"doublebox\" class=\"machineBox\"></select>';\r\n\t\t\t$('#exampleModal').find('form').html(str);\r\n\r\n\t\t\t// 调用doublebox插件,生成左右选择插件,大括号中的是左右选择插件的配置,有注释的是我们需要根据自己需求改的\r\n\t\t\t$('.machineBox').doublebox({\r\n\t\t\t\tnonSelectedListLabel: '产品线下的机器列表', // 插件左侧显示的标题\r\n\t\t\t\tselectedListLabel: '已选择的机器列表', // 插件右侧显示的标题\r\n\t\t\t\tpreserveSelectionOnMove: 'moved',\r\n\t\t\t\tmoveOnSelect: false,\r\n\t\t\t\tnonSelectedList:[{\"roleId\":\"1\",\"roleName\":\"设备1\"},{\"roleId\":\"2\",\"roleName\":\"设备2\"},{\"roleId\":\"3\",\"roleName\":\"设备3\"},{\"roleId\":\"4\",\"roleName\":\"设备4\"}], // 插件左侧显示的数据,从接口中获取\r\n\t\t\t\tselectedList:[{\"roleId\":\"4\",\"roleName\":\"设备4\"},{\"roleId\":\"5\",\"roleName\":\"设备2\"}], // 插件右侧显示的数据,从接口中获取\r\n\t\t\t\toptionValue:\"roleId\",\r\n\t\t\t\toptionText:\"roleName\",\r\n\t\t\t\tdoubleMove:true,\r\n\t\t\t});\r\n\t\t},\r\n\r\n\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\t\t\t_this.taskDataGet(); // 调用该方法,从接口中获取表格的数据\r\n\r\n\t\t\tpublic_func.treeList(); // 调用公共方法,生成左侧的树\r\n\t\t\tpublic_func.checkboxFun(); // 调用公共方法中的checkbox选择事件\r\n\t\t\tpublic_func.subMenuClick(); // 调用公共方法中子菜单点击事件\r\n\t\t\tpublic_func.leftTreeAni(); // 调用公共方法中左侧树动画方法\r\n\r\n\t\t\t// 下面的代买是做了一个测试(可删除):如何获取公共方法中的我们点击的树的某一个节点的id,即当我们要在当前部署js中获取公共的树的节点id时,使用public_func.defaultSet.selTreeNodeId\r\n\t\t\t$('.btn-success').on('click.gtye',function(e) {\r\n\t\t\t\tconsole.log(public_func.defaultSet.selTreeNodeId);\r\n\r\n\t\t\t})\r\n\t\t}\r\n\t};\r\n\r\n\tdeploy.init();\r\n})" }, { "alpha_fraction": 0.560490071773529, "alphanum_fraction": 0.5658499002456665, "avg_line_length": 27.2608699798584, "blob_id": "e3eaf78e27c1d4a0ceb59167de8064e72503a89d", "content_id": "22110efa2ac7f1a6cb3324ca54d576863b064efb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1322, "license_type": "no_license", "max_line_length": 108, "num_lines": 46, "path": "/uplooking_Python/code/lesson09-flask/ops/hosts/models/hostOperateModle.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom libs.db import db\n\nclass HostHandleModle(object):\n\n TABLE = \"tb_device\"\n COLUMN = \"id,hostname,host_type,ip,location\"\n def __init__(self):\n pass\n\n @classmethod\n def host_add(cls, *args):\n '''\n :param args: 主机信息\n :return: bool 成功|失败\n\n '''\n\n add_sql = \"insert into %s (hostname,host_type,ip,location) values(%%s, %%s, %%s, %%s)\"%(cls.TABLE)\n lastId = db.insert(add_sql, *args)\n if lastId > 0:\n return True\n return False\n\n\n\n @classmethod\n def queryIdByHostname(cls, hostname):\n sql = \"select id from %s where hostname=%%s\"%(cls.TABLE)\n hostIdResult = db.query_id(sql, hostname)\n if len(hostIdResult) > 0:\n return hostIdResult[0]\n else:\n return 0\n\n @classmethod\n def getHostsByPage(cls, page, host_type):\n pageNum = page\n if isinstance(page, unicode):\n pageNum = int(page)\n pageSize = 3\n start = (pageNum -1)*pageSize\n sql = \"select %s from %s where host_type=%%s limit %%s,%%s\"%(cls.COLUMN, cls.TABLE)\n hosts = db.query_all(sql, host_type, start, pageSize)\n return [dict((value, i[index]) for index, value in enumerate(cls.COLUMN.split(\",\"))) for i in hosts]\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5517998337745667, "alphanum_fraction": 0.5568481087684631, "avg_line_length": 25.205970764160156, "blob_id": "e190a67bf6a283e621d18270c2b2184c41b11373", "content_id": "46189001de3b6aac4602790fb02856fae9d30a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9612, "license_type": "no_license", "max_line_length": 251, "num_lines": 335, "path": "/uplooking_Python/code/前端/lesson09-bootstrap/web-day-03/monitor-web/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "/**\r\n * Created by dell on 2017/12/27.\r\n */\r\n\r\n$(function () {\r\n\tvar monitor = {\r\n\r\n\t\tdefaultSet: {\r\n\t\t\tselTreeNodeId: -1,\r\n\t\t\tthisPage: 1,\r\n\t\t\tthisMainMenu: 1,\r\n\t\t\tthisSubMenu: 1\r\n\t\t},\r\n\r\n\t\t// 左侧的树\r\n\t\tonTreeClick: function (event, treeId, treeNode) {\r\n\t\t\tvar _this = this;\r\n\t\t\tmonitor.defaultSet.selTreeNodeId = treeNode.id;\r\n\r\n\t\t\tmonitor.tableList('domain', 1);\r\n\t\t},\r\n\t\tbeforeEditName: function (treeId, treeNode) {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\t\tzTree.selectNode(treeNode);\r\n\t\t\tzTree.editName(treeNode);\r\n\r\n\t\t\treturn false;\r\n\t\t},\r\n\t\tbeforeRemove: function (treeId, treeNode) {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\t\tzTree.selectNode(treeNode);\r\n\t\t\treturn monitor.delCheck();\r\n\t\t},\r\n\t\tdelCheck: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t},\r\n\t\tbeforeRename: function (treeId, treeNode, newName, isCancel) {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tif (newName.length == 0) {\r\n\r\n\t\t\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\");\r\n\t\t\t\t\tzTree.cancelEditName();\r\n\t\t\t\t\talert(\"节点名称不能为空.\");\r\n\r\n\t\t\t\treturn false;\r\n\t\t\t}\r\n\t\t\treturn true;\r\n\t\t},\r\n\t\tonRemove: function (e, treeId, treeNode) {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\tvar testDelNodeApi = 'http://www.nodetest.com/node/removeitem', // 删除节点的接口,改成你自己的\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\tparam.thisId = treeNode.id; // 删除节点的id\r\n\r\n\t\t\t//删除节点传的数据即param,结构是 {thisId: 13}\r\n\t\t\t$.post(testDelNodeApi, param, function(response){\r\n\r\n\t\t\t})\r\n\r\n\t\t},\r\n\t\tonRename: function (e, treeId, treeNode, isCancel) {\r\n\t\t\tvar _this = this;\r\n\t\t\tvar testEditNodeApi = 'http://www.nodetest.com/node/edititem',// 编辑节点的接口,改成你自己的\r\n\t\t\t\tparam = {};\r\n\r\n\t\t\tparam.thisId = treeNode.id; // 该编辑节点的id\r\n\t\t\tparam.newName = treeNode.name; //修改后的新名称\r\n\r\n\t\t\t//删除节点传的数据即param,结构是 {thisId: 13, newName:'修改后的名称'}\r\n\t\t\t$.post(testEditNodeApi, param, function(response){\r\n\r\n\t\t\t\tlocation.reload();\r\n\t\t\t})\r\n\r\n\t\t},\r\n\t\tshowRemoveBtn: function (treeId, treeNode) {\r\n\r\n\t\t\treturn !(treeNode.id == -1);\r\n\t\t},\r\n\t\tshowRenameBtn: function (treeId, treeNode) {\r\n\r\n\t\t\treturn true;\r\n\t\t},\r\n\t\taddHoverDom: function (treeId, treeNode) {\r\n\t\t\tvar _this = this;\r\n\t\t\tvar newCount = 1;\r\n\t\t\tvar sObj = $(\"#\" + treeNode.tId + \"_span\");\r\n\t\t\tif (treeNode.editNameFlag || $(\"#addBtn_\" + treeNode.tId).length > 0) return;\r\n\t\t\tvar addStr = \"<span class='button add' id='addBtn_\" + treeNode.tId\r\n\t\t\t\t+ \"' title='add node' data-toggle='modal' data-target='#exampleModal' data-for='添加' onfocus='this.blur();'></span>\";\r\n\r\n\t\t\tsObj.after(addStr);\r\n\t\t\tvar addBtn = $(\"#addBtn_\" + treeNode.tId);\r\n\t\t\tif (addBtn) {\r\n\r\n\t\t\t\t$('#exampleModal').on('show.bs.modal', function (event) {\r\n\t\t\t\t\tvar button = $(event.relatedTarget);\r\n\t\t\t\t\tvar recipient = button.data('for');\r\n\t\t\t\t\tvar modal = $(this);\r\n\r\n\t\t\t\t\tif (recipient == '添加') {\r\n\t\t\t\t\t\tvar strTpl = '';\r\n\t\t\t\t\t\tstrTpl += '<div class=\"form-group\">\\\r\n\t\t\t\t\t\t\t\t\t <label for=\"add-node-box\" class=\"control-label\">节点名称:</label>\\\r\n\t\t\t\t\t\t\t\t\t <input type=\"text\" class=\"form-control\" id=\"add-node-box\">\\\r\n\t\t\t\t\t\t\t\t </div>';\r\n\r\n\t\t\t\t\t\tmodal.find('form').html(strTpl);\r\n\r\n\t\t\t\t\t\tvar submitBtn = modal.find('#confirmBtn');\r\n\r\n\t\t\t\t\t\tsubmitBtn.off('click').on('click', function () {\r\n\t\t\t\t\t\t\tvar zTree = $.fn.zTree.getZTreeObj(\"treelist\"),\r\n\t\t\t\t\t\t\t\taddName = $('#add-node-box').val();\r\n\t\t\t\t\t\t\tzTree.addNodes(treeNode, {id: (100 + newCount), pId: treeNode.id, name: addName});\r\n\r\n\t\t\t\t\t\t\tvar testAddNodeApi = 'http://www.nodetest.com/node/additem', // 添加节点的接口url,改成你自己的\r\n\t\t\t\t\t\t\t\tparam = {};\r\n\t\t\t\t\t\t\tparam.parentId = treeNode.id; // 父节点id\r\n\t\t\t\t\t\t\tparam.thisName = addName; // 该新增节点名称\r\n\r\n\t\t\t\t\t\t\t// 访问传的参数结构即param变量,例如{parentId:13, thisName:'新增节点1'}\r\n\t\t\t\t\t\t\t//$.post(testAddNodeApi, param, function (response) {\r\n\t\t\t\t\t\t\t//\tif(response.code == 0){\r\n\t\t\t\t\t\t\t//\t\t// response中需要你返回给我新的节点的id\r\n\t\t\t\t\t\t\t//\t\tlocation.reload();\r\n\t\t\t\t\t\t\t//\t}\r\n //\r\n\t\t\t\t\t\t\t//\tmodal.modal('hide');\r\n\t\t\t\t\t\t\t//});\r\n\t\t\t\t\t\t\tmodal.modal('hide');\r\n\t\t\t\t\t\t});\r\n\t\t\t\t\t}\r\n\t\t\t\t})\r\n\r\n\t\t\t}\r\n\t\t},\r\n\r\n\t\tremoveHoverDom: function (treeId, treeNode) {\r\n\t\t\t$(\"#addBtn_\" + treeNode.tId).unbind().remove();\r\n\t\t},\r\n\r\n\r\n\r\n\t\t// 表格列表\r\n\t\ttableList: function () {\r\n\t\t\tvar _this = this,\r\n\t\t\t\turl = '',\r\n\t\t\t\tthisMainMenu = _this.defaultSet.thisMainMenu,\r\n\t\t\t\tthisSubMenu = _this.defaultSet.thisSubMenu,\r\n\t\t\t\tparams = {}; // 列表接口的参数\r\n\r\n\t\t\tparams.thisPage = _this.defaultSet.thisPage; // 当前页数名称\r\n\t\t\tparams.thisNode = _this.defaultSet.selTreeNodeId; //当前node节点\r\n\t\t\tparams.mainMenu = thisMainMenu ; // 当前主菜单id\r\n\t\t\tparams.subMenu = thisSubMenu; // 当前子菜单id\r\n\r\n\t\t\turl = 'js/tablelist.json'; // 改成你自己的列表的接口地址\r\n\r\n\t\t\t// 访问传的参数结构即params变量,例如{thisPage:1, thisNode:13, mainMenu:2, subMenu:1}\r\n\t\t\t$.get(url, params, function (res) {\r\n\t\t\t\tvar listData = res.data.detail,\r\n\t\t\t\t\thtmlTpl = '';\r\n\r\n\t\t\t\tif (thisMainMenu == 1) {\r\n\t\t\t\t\thtmlTpl += '<thead><th><td>name</td><td>domain</td><td>enable</td><td>type</td><td>line</td><td>ttl</td></th></thead><tbody>';\r\n\r\n\t\t\t\t} else {\r\n\t\t\t\t\thtmlTpl += '<thead><th><td>主机名</td><td>IP</td><td>宿主机</td><td>机房</td><td>状态</td><td>套餐</td><td>tags</td></th></thead><tbody>';\r\n\t\t\t\t}\r\n\r\n\t\t\t\t$.each(listData, function (index, value) {\r\n\t\t\t\t\tif (thisMainMenu == 1) {\r\n\t\t\t\t\t\thtmlTpl += '<tr><td>' + (index + 1) + '</td><td>' + value.name + '</td><td>' + value.domain + '</td><td>' + value.enable + '</td><td>' + value.type + '</td><td>' + value.line + '</td><td>' + value.ttl + '</td></tr>'\r\n\t\t\t\t\t} else {\r\n\t\t\t\t\t\thtmlTpl += '<tr><td>' + (index + 1) + '</td><td>' + value.zhuji + '</td><td>' + value.ip + '</td><td>' + value.suzhuji + '</td><td>' + value.jifang + '</td><td>' + value.status + '</td><td>' + value.menu + '</td><td>' + value.tags + '</td></tr>'\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t});\r\n\r\n\t\t\t\thtmlTpl += '</tbody>';\r\n\r\n\t\t\t\t$('#bodyList').html(htmlTpl);\r\n\r\n\t\t\t}, \"json\");\r\n\r\n\t\t\t_this.pageSel();\r\n\r\n\t\t},\r\n\r\n\t\ttabMenuSwich: function () {\r\n\t\t\tvar _this = this;\r\n\t\t\t_this.tableList('domain', '');\r\n\r\n\t\t\t// 主菜单切换,默认选中第一个子菜单\r\n\t\t\t$('.js-main-tabs').off('click.choice').on('click.choice', 'li', function (e) {\r\n\t\t\t\tvar thisTab = $(this),\r\n\t\t\t\t\tsubMenuArr = [],\r\n\t\t\t\t\tsubMenuTpl = '',\r\n\t\t\t\t\tsubList = $('.js-sub-tabs');\r\n\t\t\t\tthisTab.parent().find('li').removeClass('active');\r\n\t\t\t\tthisTab.addClass('active');\r\n\r\n\t\t\t\t_this.defaultSet.thisPage = 1;\r\n\t\t\t\t_this.defaultSet.thisMainMenu = $(this).data('menu');\r\n\t\t\t\tswitch (thisTab.attr('id')) {\r\n\t\t\t\t\tcase 'domain':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.domainNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 1;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase 'machine':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.machineNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 4;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase 'nodemsg':\r\n\t\t\t\t\t\tsubList.hide();\r\n\t\t\t\t\t\t$('.nodemsgNav').show();\r\n\t\t\t\t\t\t_this.defaultSet.thisSubMenu = 7;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tdefault:\r\n\t\t\t\t\t\treturn;\r\n\t\t\t\t}\r\n\r\n\r\n\t\t\t\t_this.tableList();\r\n\r\n\t\t\t});\r\n\r\n\t\t\t$('.js-sub-tabs').off('click.open').on('click.open', 'li', function (e) {\r\n\t\t\t\t_this.defaultSet.thisPage = 1;\r\n\t\t\t\t$(this).parent().find('li').removeClass('active');\r\n\t\t\t\t$(this).addClass('active');\r\n\r\n\t\t\t\t_this.defaultSet.thisSubMenu = $(this).data('sub');\r\n\t\t\t\t_this.tableList();\r\n\t\t\t})\r\n\r\n\r\n\t\t},\r\n\r\n\t\t// 分页数据\r\n\t\tpageSel: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t$('.pagination').off('click').on('click', 'a', function (event) {\r\n\t\t\t\t// 一般直接调接口\r\n\t\t\t\tvar thisPageNum = $(this).text();\r\n\t\t\t\t_this.defaultSet.thisPage = parseInt($(this).text());\r\n\t\t\t\t_this.tableList('domain', thisPageNum);\r\n\r\n\t\t\t})\r\n\r\n\t\t},\r\n\r\n\t\ttreeList: function () {\r\n\t\t\tvar _this = this,\r\n\t\t\t\tr;\r\n\t\t\tvar setting = {\r\n\t\t\t\tview: {\r\n\t\t\t\t\taddHoverDom: _this.addHoverDom,\r\n\t\t\t\t\tremoveHoverDom: _this.removeHoverDom,\r\n\t\t\t\t\tselectedMulti: false\r\n\t\t\t\t},\r\n\t\t\t\tedit: {\r\n\t\t\t\t\tenable: true,\r\n\t\t\t\t\teditNameSelectAll: true,\r\n\t\t\t\t\tshowRemoveBtn: _this.showRemoveBtn,\r\n\t\t\t\t\tshowRenameBtn: _this.showRenameBtn\r\n\t\t\t\t},\r\n\t\t\t\tdata: {\r\n\t\t\t\t\tsimpleData: {\r\n\t\t\t\t\t\tenable: true\r\n\t\t\t\t\t}\r\n\t\t\t\t},\r\n\t\t\t\tcallback: {\r\n\t\t\t\t\tonClick: _this.onTreeClick,\r\n\t\t\t\t\tbeforeEditName: _this.beforeEditName,\r\n\t\t\t\t\tbeforeRemove: _this.beforeRemove,\r\n\t\t\t\t\tbeforeRename: _this.beforeRename,\r\n\t\t\t\t\tonRemove: _this.onRemove,\r\n\t\t\t\t\tonRename: _this.onRename\r\n\t\t\t\t}\r\n\t\t\t};\r\n\r\n\t\t\t$.getJSON('js/protocolTree.json', function (response) {\r\n\t\t\t\tvar zNodes = response.data.agreement;\r\n\t\t\t\t$.fn.zTree.init($(\"#treelist\"), setting, zNodes);\r\n\t\t\t}, \"json\");\r\n\r\n\t\t},\r\n\r\n\t\tleftTreeSlide: function () {\r\n\t\t\tvar _this = this;\r\n\t\t\t$('.js-slide-tree').off('click.sel').on('click.sel', function (e) {\r\n\t\t\t\tvar thisEle = $(this);\r\n\t\t\t\tif (thisEle.hasClass('icon-chevron-left')) {\r\n\t\t\t\t\t$('.float-tree').animate({left: '-250px'}, 'slow');\r\n\t\t\t\t\tthisEle.removeClass('icon-chevron-left').addClass('icon-chevron-right');\r\n\r\n\t\t\t\t} else if (thisEle.hasClass('icon-chevron-right')) {\r\n\t\t\t\t\t$('.float-tree').animate({left: '0px'}, 'slow');\r\n\t\t\t\t\tthisEle.removeClass('icon-chevron-right').addClass('icon-chevron-left');\r\n\t\t\t\t}\r\n\t\t\t})\r\n\r\n\t\t},\r\n\t\texitBtn: function () {\r\n\t\t\t$('.exit-btn').off('click').on('click', function (e) {\r\n\t\t\t\twindow.location.pathname = \"/monitor-web/login.html\";\r\n\t\t\t})\r\n\t\t},\r\n\r\n\t\tinit: function () {\r\n\t\t\tvar _this = this;\r\n\r\n\t\t\t_this.treeList();\r\n\t\t\t_this.tabMenuSwich();\r\n\t\t\t_this.leftTreeSlide();\r\n\r\n\t\t\t_this.exitBtn();\r\n\t\t}\r\n\t};\r\n\r\n\tmonitor.init();\r\n});" }, { "alpha_fraction": 0.32044199109077454, "alphanum_fraction": 0.32688766717910767, "avg_line_length": 26.174999237060547, "blob_id": "39a2dd7a195066c0608c892dc555cd8c6e2d7c49", "content_id": "81b48caa6a98d71f28332d9c840b74ed2a7aeab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 81, "num_lines": 40, "path": "/uplooking_Python/code/lesson09-flask/ops/config/staging.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "# coding: utf-8\nDEBUG = False\nAPP_DEV_PORT = 8888\n\nconfig_log = {\n 'version': 1,\n 'formatters':{\n 'simple':{\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n 'simple2':{\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n },\n },\n 'handlers':{\n 'dbfile': {\n 'class': 'logging.FileHandler',\n 'filename': 'db.log',\n 'level': 'DEBUG',\n 'formatter': 'simple'\n },\n\n 'httpfile': {\n 'class': 'logging.FileHandler',\n 'filename': 'http.log',\n 'level': 'DEBUG',\n 'formatter': 'simple'\n },\n },\n 'loggers':{\n 'db':{\n 'handlers': ['dbfile'],\n 'level': 'INFO',\n },\n 'http':{\n 'handlers': ['httpfile'],\n 'level': 'INFO',\n }\n }\n }" }, { "alpha_fraction": 0.5193905830383301, "alphanum_fraction": 0.5207756161689758, "avg_line_length": 29.08333396911621, "blob_id": "57cdb518a2efae79567df1612a163de6257f1c98", "content_id": "395f3eb5f29f20f966f1a1a120a060fbf168b5a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 776, "license_type": "no_license", "max_line_length": 103, "num_lines": 24, "path": "/uplooking_Python/code/lesson05/myapps/monitor/main.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nfrom core.monitor_domains import Monitor_Domain\nfrom logger.logger import get_logger\nfrom conf.exprie_domains import exprie_domains\n\ndef main():\n md = Monitor_Domain()\n logger = get_logger(\"whois domains\")\n for domain in exprie_domains:\n md.whois(domain)\n r_time = md.get_register_time()\n e_time = md.get_expire_time()\n my_days = md.get_expire_days()\n if e_time == None:\n logger.info(\"没查到 %s的有效信息.\"%(domain))\n else:\n print \"===================================================================================\"\n print \"域名:%s,注册时间:%s,到期时间:%s,到期天数:%s天\" % (domain,r_time,e_time,my_days)\n\n\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5725806355476379, "alphanum_fraction": 0.5766128897666931, "avg_line_length": 21.545454025268555, "blob_id": "e64a681639ad099d4560bbd4989d2d6670915926", "content_id": "4405d0145b48ee44175ba55e682e7164803ac54c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 107, "num_lines": 33, "path": "/uplooking_Python/code/lesson06/less06/db/test_db.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom db import MasterDB\ndb = MasterDB()\n\ndef query_all_test():\n # 测试select\n sql = \"select * from test\"\n result = db.query_all(sql)\n column = [\"id\", \"name\"]\n parsed_result = [dict( (value, line[index]) for index, value in enumerate(column) ) for line in result]\n return parsed_result\n\ndef update_test():\n # 测试update\n table = \"test\"\n name = \"xxxx\"\n id_ = 5\n sql = \"update %s set name=%%s where id=%%s\"%table\n print db.update(sql, name, id_)\n\n\ndef insert_test():\n # 测试insert\n column = \"id, name\"\n table = \"test\"\n name = \"xxxx\"\n sql = \"insert into test values(%s, %s)\"\n db.insert(sql, 5, name)\n\nif __name__ == \"__main__\":\n print insert_test()\n" }, { "alpha_fraction": 0.6290507316589355, "alphanum_fraction": 0.6347693204879761, "avg_line_length": 22.008771896362305, "blob_id": "39e3b1983fe3a4b9c89bc56363c7fd3f18d17521", "content_id": "37b7455fc74155ea9528b95eb9d83b1c27fad0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2639, "license_type": "no_license", "max_line_length": 97, "num_lines": 114, "path": "/uplooking_Python/code/flask/app.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\nfrom flask import Flask, request, render_template\nimport MySQLdb\nfrom flask import jsonify\nimport json\n\ndb = MySQLdb.connect(\"localhost\", \"root\", \"123321\", \"python01\",charset='utf8')\n\napp = Flask(__name__)\n\n# index views\[email protected]('/')\ndef show_index():\n\n return render_template('index.t.html')\n\[email protected]('/mindex')\ndef show_modal():\n return render_template('m.html')\n\n\n#get right user info table api\[email protected]('/getUserInfo',methods=['GET','POST']) \ndef show_table():\n cursor = db.cursor()\n sql = \"SELECT * FROM user_ip_info\"\n cursor.execute(sql)\n row_headers=[x[0] for x in cursor.description]\n results = cursor.fetchall()\n \n data=[]\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data) \n\n\n#get left tree table api\[email protected]('/tree/all',methods=['GET','POST'])\ndef get_tree_all():\n cursor = db.cursor()\n sql = \"SELECT * FROM map_tree\"\n cursor.execute(sql)\n row_headers = [x[0] for x in cursor.description]\n results = cursor.fetchall()\n\n data = []\n for result in results:\n data.append(dict(zip(row_headers,result)))\n return json.dumps(data)\n\n\n\n\n\n#add data api\[email protected]('/addUserInfo',methods=['POST'])\ndef insert_sql():\n \n cursor = db.cursor()\n\n id = request.form.get('id')\n username = request.form.get('username')\n position = request.form.get('position')\n ipaddr = request.form.get('ipaddr')\n remark = request.form.get('remark')\n\n sql = \"insert into user_ip_info (id,username,position,ipaddr,remark) values (%s,%s,%s,%s,%s)\"\n params = (id,username,position,ipaddr,remark)\n result = cursor.execute(sql,params)\n db.commit()\n return jsonify(result)\n\n\n#edit user info api\[email protected]('/edit',methods=['POST'])\ndef edit_update():\n print request.args.get('data')\n\n \n\n# rename tree node api\[email protected]('/node/rename',methods=[\"POST\"])\ndef rename_node(name,id):\n id = request.form.get('id')\n name = request.form.get('name')\n\n sql = 'update map_server set name=%s where id=%s'\n params = (id,name)\n\n cursor = db.cursor()\n results = cursor.execute(sql,params)\n cursor.commit()\n cursor.close()\n db.close()\n\n return results\n\[email protected]('/delete',methods=['GET','POST'])\ndef update_sql():\n cursor = db.cursor()\n sql = \"delete from user_ip_info where id=2\"\n cursor.execute(sql)\n db.commit()\n return '将要删除这行数据'\n\n\n# @app.route('/ajax.html',methods=['GET','POST'])\n# def myajax():\n# return render_template('ajax.html')\n# #pass\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5001,debug=True)\n" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6474575996398926, "avg_line_length": 28.5, "blob_id": "7280cb89d795951c1dd09f1b18eb4d50e06b6885", "content_id": "668a67a9613d5f113645d6c79c318b862804de16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 73, "num_lines": 10, "path": "/uplooking_Python/code/lesson05/montorDomains/libs/deco.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom logger.logger import app_logger\nfrom functools import wraps\n\ndef recordLog(func):\n def wapper(*args, **kwargs):\n app_logger.info(\"[%s started][param:%s]\"%(wraps.func_name, args))\n return func(*args, **kwargs)\n return wapper\n" }, { "alpha_fraction": 0.5442478060722351, "alphanum_fraction": 0.5442478060722351, "avg_line_length": 19.545454025268555, "blob_id": "b6971fcc67f848cb005352cede655b6a59ede4bd", "content_id": "e9c6d833e3c8b5652966695ad744d68a208bdf31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 226, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/uplooking_Python/code/前端/lesson09-bootstrap/test/monitor/js/index.js", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "function click(){\n $('.main-menu').off('click').on('click','li',function(e){\n var thisBtn = $(this);\n thisBtn.parent().find('li').removeClass('active');\n thisBtn.addClass('active');\n\n\n })\n}\n\nclick()\n" }, { "alpha_fraction": 0.6234309673309326, "alphanum_fraction": 0.6276150345802307, "avg_line_length": 21.571428298950195, "blob_id": "6cf0e8c6365fbfc0e024d0d321d1cde30dddf58e", "content_id": "6cf839142790bc9b8a0e6170578e068fa0ff7451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/uplooking_Python/code/lesson05/myapps/monitor/utils/register_time.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\n\nimport re\nimport commands\nfrom libs.parser import register_reg\nfrom convert_time import convert_time\n\n#获取域名注册时间的函数\n\n\ndef register_time(info,domain_name):\n\n for reg in register_reg:\n try:\n result = re.search(reg,info)\n register_time = result.group(1)\n #register_day = register_time\n register_day = convert_time(register_time)\n return register_day\n except Exception,e:\n continue\n\n\n\n\n" }, { "alpha_fraction": 0.5580088496208191, "alphanum_fraction": 0.5644319653511047, "avg_line_length": 26.076086044311523, "blob_id": "a5ad50c24674120b9eba845ad6774a48cddd2e3e", "content_id": "471b987e3ef6cdea51b0eeeb31e3531167019f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2491, "license_type": "no_license", "max_line_length": 87, "num_lines": 92, "path": "/uplooking_Python/code/lesson05/montorDomains_bak/libs/parser.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nfrom conf.config import register_reg, expire_reg\nimport datetime\n\nclass ParseTime(object):\n\n def __init__(self, timeStr):\n self.timeStr = timeStr\n\n def parse_time(self):\n parsed_funcs = [self.parsed_ymd, self.parsed_ymd2, self.parsed_dmyhms,\n self.parsed_dmy, self.parsed_dmy2, self.parsed_ymdhms]\n for func in parsed_funcs:\n t = func()\n if not t:\n continue\n return t\n raise Exception(\"Not Fount available parser for [%s]\"%self.timeStr)\n\n def parsed_ymd(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%Y-%m-%d\")\n except Exception, e:\n return False\n return parsed_time\n\n def parsed_ymd2(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%Y/%m/%d\")\n except Exception, e:\n return False\n return parsed_time\n\n def parsed_dmy(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%d/%m/%Y\")\n except Exception, e:\n return False\n return parsed_time\n\n def parsed_dmy2(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%d-%m-%Y\")\n except Exception, e:\n return False\n return parsed_time\n\n def parsed_ymdhms(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%Y-%m-%d %H:%M:%S\")\n except Exception, e:\n return False\n return parsed_time\n\n def parsed_dmyhms(self):\n try:\n parsed_time = datetime.datetime.strptime(self.timeStr, \"%d-%b-%Y %H:%M:%S\")\n except Exception, e:\n return False\n return parsed_time\n\n\ndef getExprDays(t):\n expTime = (t - getLoaclTime()).days\n return expTime\n\ndef getLoaclTime():\n return datetime.datetime.now()\n\ndef register_parser(info):\n for reg in register_reg:\n result = re.search(reg, info)\n if result:\n rtime = re.sub(r'T', \" \", result.group(1))\n return rtime\n continue\n return False\n\ndef expire_parser(info):\n for reg in expire_reg:\n result = re.search(reg, info)\n if result:\n rtime = re.sub(r'T', \" \", result.group(1))\n return rtime\n continue\n return False\n\nif __name__ == '__main__':\n p = ParseTime(\"2018/01/23\")\n print p.parsed_ymd3()\n" }, { "alpha_fraction": 0.6560913920402527, "alphanum_fraction": 0.6560913920402527, "avg_line_length": 19.128204345703125, "blob_id": "ec8d6a6770266b85649c676ea18b71f3dd8715bc", "content_id": "6bb91cb5367184c94b7882dd97964d3cf5d2334c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 63, "num_lines": 39, "path": "/uplooking_Python/code/lesson07-flask/app.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import make_response\nfrom flask import redirect\nfrom flask import g \nfrom flask import request\nfrom b import print_username\n\n\napp = Flask(__name__)\n\n\[email protected](\"/index/<name>\")\ndef login(name):\n username = name\n resp = \"hello %s\" %(username)\n return make_response(resp)\n\n\[email protected]('/hello')\ndef hello():\n return redirect('http://baidu.com')\n\[email protected]_request\ndef login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n g.user = username\n g.password = password\n\n\[email protected](\"/index\",methods=[\"POST\"])\ndef index():\n print_username()\n resp = \"hello %s,your password is :%s\" %(g.user,g.password)\n return make_response(resp)\n\nif __name__ == \"__main__\":\n app.run(debug=True) " }, { "alpha_fraction": 0.5544437766075134, "alphanum_fraction": 0.5600352883338928, "avg_line_length": 29.77570152282715, "blob_id": "9c540d93199f8b7a34873090e9fc4aeb3677c628", "content_id": "a34ca8f813c7377b4f3109cd3669b34ccc03ef07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3910, "license_type": "no_license", "max_line_length": 245, "num_lines": 107, "path": "/0508/zhangmen-htmler.py", "repo_name": "wolfrg/20170508", "src_encoding": "UTF-8", "text": "#coding:utf8\r\n'''\r\nCreated on 2017年4月27日\r\n掌门前端文件自动化部署脚本,该脚本分为两部分,一部分是复制文件的操作,一部分是svn提交文件的操作\r\n@author: Ops\r\n'''\r\n\r\nimport os\r\nimport re\r\nfrom shutil import ignore_patterns, copystat, copy2, Error\r\n\r\n#切换到前端源码目录下\r\nos.chdir(\"E:\\\\update-maven\\\\workspace\\\\zhangmen-htmler\\\\\")\r\n#执行build命令\r\nos.system(\"npm run build\")\r\n\r\n#复制到掌门读项目对应的目录下面\r\n\r\n#复制的函数\r\ndef copytree(src, dst, ignore=ignore_patterns('*.properties')):\r\n \r\n \r\n names = os.listdir(src)\r\n \r\n #得到源目录的文件\r\n #print src\r\n #print names\r\n if ignore is not None:\r\n ignored_names = ignore(src,names)\r\n #print ignored_names\r\n else:\r\n ignored_names = set()\r\n #print ignore_names\r\n errors = [] \r\n for name in names:\r\n if name in ignored_names:\r\n continue\r\n srcname = os.path.join(src,name)\r\n dstname = os.path.join(dst,name)\r\n try:\r\n if os.path.isdir(srcname):\r\n if not os.path.isdir(dstname):\r\n os.makedirs(dstname)\r\n copytree(srcname, dstname, ignore)\r\n #print(\"复制目录成功\")\r\n else:\r\n if os.path.isfile(srcname):\r\n if not os.path.isfile(dstname): \r\n copy2(srcname, dstname) \r\n #print(\"复制文件成功\")\r\n elif os.stat(srcname).st_mtime - os.stat(dstname).st_mtime >1: \r\n copy2(srcname, dstname) \r\n #print(\"复制文件成功\")\r\n #if \"web.xml\" in ignored_names: #web.xml文件要复制\r\n #copy2(\"E:\\\\update-maven\\\\workspace\\\\zhangmen-keya\\\\zm-magic-service\\\\target\\\\zm-magic-service-0.0.1\\\\WEB-INF\\\\web.xml\", \"E:\\\\update-work\\\\zhangmen\\\\zhangmen-keya\\\\Alpha\\\\trunk\\\\WEB-INF\\\\web.xml\") \r\n except (IOError, os.error) as why:\r\n errors.append((srcname, dstname, str(why)))\r\n # catch the Error from the recursive copytree so that we can\r\n # continue with other files\r\n except Error as err:\r\n errors.extend(err.args[0])\r\n try:\r\n copystat(src, dst)\r\n except WindowsError:\r\n # can't copy file access times on Windows\r\n pass\r\n except OSError as why:\r\n errors.extend((src, dst, str(why)))\r\n if errors:\r\n raise Error(errors)\r\n\r\n#调用复制的函数\r\ncopytree(\"E:\\\\update-maven\\\\workspace\\\\zhangmen-htmler\\\\statics\\\\\",\"E:\\\\update-work\\\\zhangmen\\\\zhangmen-reader\\\\Alpha\\\\trunk\\\\statics\\\\\", ignore_patterns(\"*.properties\")) \r\n\r\n\r\n#提交文件的操作\r\n\r\n#==========================================提交文件的操作===========================#\r\n\r\n#切换到svn目录\r\nos.chdir('E:\\\\update-work\\\\zhangmen\\\\zhangmen-reader\\\\Alpha\\\\trunk\\\\statics\\\\') \r\n\r\n#强制add没有受版本控制的文件\r\n#把状态为delete的文件重定向到一个txt文件,每次提交完成后清空文件\r\nos.system('svn add --force *') \r\na = os.system('svn st -u . | findstr ! > E:\\\\svn_delete\\\\notdelete.txt') \r\n\r\n#接下来就是对notdelete.txt文件的操作\r\n\r\n#首先读取文件的每一行\r\nlines = open(\"E:\\\\svn_delete\\\\notdelete.txt\",\"r\").readlines()\r\n\r\n#使用for循环把替换后的内容写入一个新文件\r\nfp = open(\"E:\\\\svn_delete\\\\delete.txt\",\"w\")\r\nfor s in lines:\r\n fp.write(re.sub('!|\\d| ','',s,20)) #把!符号、数字和空格替换掉 \r\nfp.close() #关闭文件\r\n\r\n#第二步操作替换好的文件\r\ndeletefiles = open(\"E:\\\\svn_delete\\\\delete.txt\",\"r\").readlines()\r\n\r\nfor d in deletefiles:\r\n os.system('svn delete %s ' % d)\r\n\r\n#提交全部文件 \r\nos.system('svn commit -m \"zhangmen-htmler Server commit\"') \r\nprint '文件提交成功'" } ]
164
hussein-hub/covid19-tracker
https://github.com/hussein-hub/covid19-tracker
c62289a84d9df6ab0d66306be76007d80bd965f0
d30c506a1e39c3cd6ba1dd65f5e40005747d32dc
3386dcb5ebc05a79f1288a0f481c780530b3739e
refs/heads/main
2023-06-17T13:02:03.226787
2021-07-13T15:08:18
2021-07-13T15:08:18
385,629,438
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7564979195594788, "alphanum_fraction": 0.7633379101753235, "avg_line_length": 39.61111068725586, "blob_id": "0c4b25435394e8c6995892500dc5234ac80aa807", "content_id": "b3569a6f18699082cd6576e14fb192acfa4fcffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 731, "license_type": "no_license", "max_line_length": 172, "num_lines": 18, "path": "/Readme.md", "repo_name": "hussein-hub/covid19-tracker", "src_encoding": "UTF-8", "text": "# Covid19 Tracker using Tkinter\n\n\n### What does this app do ?\nIt uses python as a primary language to get data from different websites such as [worldometer](https://www.worldometers.info/coronavirus/) to scrape data for all countries.\n\nIt also use covid19 India API to get India specific data like country info, state info and city\n### Features:\n- Check Total cases in india\n- Check Total cases in a particular city\n- Check graphs for cases and deaths in a particular city\n- Check cases in a particular state in India \n- Check cases in a particular city in India \n\n### Usage:\n- Install python from [python.org](https://www.python.org/downloads/).\n- Install tkinter a python library for GUI creation.\n- Install bs4 for web scraping\n" }, { "alpha_fraction": 0.49835890531539917, "alphanum_fraction": 0.540912926197052, "avg_line_length": 42.095394134521484, "blob_id": "1b2cd7b2a92804266a6ae3d2d8185c78672c011a", "content_id": "7577bf472a42967c5fd6e5e6adcab7634dc91dcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26202, "license_type": "no_license", "max_line_length": 159, "num_lines": 608, "path": "/main.py", "repo_name": "hussein-hub/covid19-tracker", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tkinter import INSERT, messagebox\nfrom tkinter import *\nimport json\nimport datetime\nfrom tkcalendar import Calendar\nimport matplotlib.pyplot as plt\n\nLARGEFONT = (\"Courier\", 35)\nMEDIUMFONT = (\"Courier\", 20)\nSMALLFONT = (\"Courier\", 15)\n\nclass tkinterApp(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n self.geometry('800x550')\n self.title('Covid19')\n self.configure(bg='#accdf5')\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n for F in (StartPage, countryPage, IndiaPage, stateCityPage):\n frame = F(container, self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(StartPage)\n\n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='#accdf5')\n def initialize():\n label = tk.Label(self, text=\"COVID19 TRACKER\", font=LARGEFONT, bg='#accdf5')\n label.grid(row=0, column=2, padx=150, pady=10, rowspan=2)\n\n WORLD_URL = \"https://www.worldometers.info/coronavirus/\"\n total_request = requests.get(WORLD_URL)\n soup = BeautifulSoup(total_request.content, 'html.parser')\n total_deaths_recevored = soup.findAll('div', attrs={'class': 'maincounter-number'})\n data = []\n for d in total_deaths_recevored:\n data.append(d.span.text)\n\n T1 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T2 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T3 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n # total cases\n total_cases = tk.Label(self, text=\"Total Cases\", bg='#accdf5', font=LARGEFONT)\n total_cases.config(font=(\"Courier\", 14))\n total_cases.place(x=80, y=100)\n total = f' {data[0]}'\n T1.insert(INSERT, total)\n T1.place(x=40, y=125, height=35, width=200)\n\n # total deaths\n total_deaths = tk.Label(self, text=\"Total Deaths\", bg='#accdf5', font=LARGEFONT)\n total_deaths.config(font=(\"Courier\", 14))\n total_deaths.place(x=320, y=100)\n deaths = f' {data[1]}'\n T2.insert(INSERT, deaths)\n T2.place(x=280, y=125, height=35, width=200)\n\n # total recovered\n total_recovered = tk.Label(self, text=\"Total Recovered\", bg='#accdf5', font=LARGEFONT)\n total_recovered.config(font=(\"Courier\", 14))\n total_recovered.place(x=535, y=100)\n recovered = f' {data[2]}'\n T3.insert(INSERT, recovered)\n T3.place(x=520, y=125, height=35, width=200)\n\n # row 2 active cases data\n T4 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T5 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T6 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n\n WORLD_URL = \"https://www.worldometers.info/coronavirus/\"\n total_request = requests.get(WORLD_URL)\n soup = BeautifulSoup(total_request.content, 'html.parser')\n curr = soup.find('div', attrs={'class': 'number-table-main'})\n mild_serious_fetch = soup.findAll('span', attrs={'class': 'number-table'})\n mild_serious = []\n for i in mild_serious_fetch:\n mild_serious.append(i.text)\n\n current = tk.Label(self, text=\"Active Cases\", bg='#accdf5', font=LARGEFONT)\n current.config(font=(\"Courier\", 14))\n current.place(x=80, y=175)\n cur = f' {curr.text}'\n T4.insert(INSERT, cur)\n T4.place(x=40, y=200, height=35, width=200)\n\n mild = tk.Label(self, text=\"Mild Condition\", bg='#accdf5', font=LARGEFONT)\n mild.config(font=(\"Courier\", 14))\n mild.place(x=300, y=175)\n m = f' {mild_serious[0]}'\n T5.insert(INSERT, m)\n T5.place(x=280, y=200, height=35, width=200)\n\n critical = tk.Label(self, text=\"Critical Condition\", bg='#accdf5', font=LARGEFONT)\n critical.config(font=(\"Courier\", 14))\n critical.place(x=520, y=175)\n cri = f' {mild_serious[1]}'\n T6.insert(INSERT, cri)\n T6.place(x=520, y=200, height=35, width=200)\n\n countryinfo = tk.Button(self, text='Country Info', command=lambda: controller.show_frame(countryPage),\n bg='#4e74fc',\n font=MEDIUMFONT)\n Indiainfo = tk.Button(self, text='India Info', command=lambda: controller.show_frame(IndiaPage),\n bg='#4e74fc',\n font=MEDIUMFONT)\n\n countryinfo.place(x=50, y=300)\n countryinfo.config(font=(\"Courier\", 13))\n Indiainfo.place(x=250, y=300)\n Indiainfo.config(font=(\"Courier\", 13))\n\n refresh = tk.Button(self, text='Refresh', command=initialize, bg='#4e74fc', font=(\"Courier\", 10))\n refresh.place(x=30, y=30)\n initialize()\n\n # markAttendance.place(x=560, y=200)\nclass IndiaPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='#accdf5')\n\n # label of frame Layout 2\n label = tk.Label(self, text=\"COVID INDIA\", font=LARGEFONT, bg='#accdf5')\n label.place(x=210, y=10)\n\n back = tk.Button(self, text='back', command=lambda: controller.show_frame(StartPage), bg='#4e74fc',\n font=(\"Courier\", 10))\n back.place(x=30, y=30)\n\n cal = Calendar(self, selectmode='day',\n year=2020, month=5,\n day=22)\n\n cal.place(x=70, y=75)\n\n date_time_obj = datetime.datetime.strptime('01/01/21', '%m/%d/%y')\n\n def getSingleData(date, dailyConfirmed, dailyDeath):\n url = \"https://api.covid19india.org/data.json\"\n page = requests.get(url)\n data = json.loads(page.text)\n # cases.append(int(data[\"cases_time_series\"][i][\"dailydeceased\"]))\n for i in range(len(data[\"cases_time_series\"])):\n if data[\"cases_time_series\"][i][\"dateymd\"] == str(date):\n dailyConfirmed.delete(\"1.0\", \"end\")\n dailyConfirmed.insert(INSERT, data[\"cases_time_series\"][i][\"dailyconfirmed\"])\n dailyDeath.delete(\"1.0\", \"end\")\n dailyDeath.insert(INSERT, data[\"cases_time_series\"][i][\"dailydeceased\"])\n\n def grad_date(dailyConfirmed, dailyDeath):\n date_time_str = cal.get_date()\n date_time_obj = datetime.datetime.strptime(date_time_str, '%m/%d/%y')\n\n print('Date:', date_time_obj.date())\n text = \"Selected Date is: \" + str(date_time_obj.date())\n dateDATA.delete(0, END)\n dateDATA.insert(0, text)\n getSingleData(date_time_obj.date(), dailyConfirmed, dailyDeath)\n\n # Add Button and Label\n getDate = tk.Button(self, text='Get Date', command=lambda: grad_date(dailyConfirmed, dailyDeath),\n bg='#4e74fc',\n font=MEDIUMFONT)\n getDate.place(x=35, y=370)\n getDate.config(font=(\"Courier\", 13))\n\n dateDATA = tk.Entry(self, width=29, borderwidth=2)\n dateDATA.config(font=(\"Courier\", 14))\n dateDATA.place(x=185, y=375, height=35)\n dateDATA.focus()\n\n state_city = tk.Button(self, text='State and City', command=lambda: controller.show_frame(stateCityPage),\n bg='#4e74fc',\n font=MEDIUMFONT)\n state_city.place(x=575, y=370)\n state_city.config(font=(\"Courier\", 13))\n\n cases_label = tk.Label(self, text=\"Cases: \", bg='#accdf5')\n cases_label.config(font=(\"Courier\", 14))\n cases_label.place(x=450, y=100)\n dailyConfirmed = tk.Text(self, height=4, width=52, bg='#accdf5', font=(\"Courier\", 13))\n dailyConfirmed.place(x=525, y=100, height=30, width=125)\n\n death_label = tk.Label(self, text=\"Deaths: \", bg='#accdf5')\n death_label.config(font=(\"Courier\", 14))\n death_label.place(x=430, y=150)\n dailyDeath = tk.Text(self, height=4, width=52, bg='#accdf5', font=(\"Courier\", 13))\n dailyDeath.place(x=525, y=150, height=30, width=125)\n\n def giveGraph(n, type):\n if n.isdigit():\n dates = []\n cases = []\n n = abs(int(n))\n\n url = \"https://api.covid19india.org/data.json\"\n page = requests.get(url)\n data = json.loads(page.text)\n for i in range(-1, (-1) * (n + 1), -1):\n dates.append(data[\"cases_time_series\"][i][\"date\"][:-4])\n # cases.append(int(data[\"cases_time_series\"][i][\"dailydeceased\"]))\n cases.append(int(data[\"cases_time_series\"][i][type]))\n\n dates.reverse()\n cases.reverse()\n # print(dates)\n # print(cases)\n plt.figure(figsize=(8, 8))\n plt.plot(dates, cases, marker='o')\n plt.xlabel('Date')\n plt.xticks(rotation=35)\n plt.ylabel('Number of Cases')\n plt.show()\n else:\n tk.messagebox.showwarning('Wrong Input', 'Enter a number not a string!!!')\n\n\n days = tk.Entry(self, width=10, borderwidth=2)\n days.config(font=(\"Courier\", 14))\n days.place(x=215, y=300, height=35)\n days.focus()\n\n countryinfo = tk.Button(self, text='Cases stats', command=lambda: giveGraph((days.get()), \"dailyconfirmed\"),\n bg='#4e74fc',\n font=MEDIUMFONT)\n countryinfo.place(x=35, y=300)\n countryinfo.config(font=(\"Courier\", 13))\n\n deathStat = tk.Button(self, text='Death stats', command=lambda: giveGraph((days.get()), \"dailydeceased\"),\n bg='#4e74fc',\n font=MEDIUMFONT)\n deathStat.place(x=370, y=300)\n deathStat.config(font=(\"Courier\", 13))\n\n\n '''year = tk.Label(self, text=\"Year\", font=(\"Courier\", 16), bg='#accdf5')\n year.place(x=250, y=100)\n\n yearInput = tk.Entry(self, width=12, borderwidth=6, font=(\"Courier\", 14))\n yearInput.place(x=385, y=100)\n yearInput.focus()\n\n subject = tk.Label(self, text=\"Subject\", font=(\"Courier\", 16), bg='#accdf5')\n subject.place(x=240, y=175)\n\n subjectInput = tk.Entry(self, width=12, borderwidth=6, font=(\"Courier\", 14))\n subjectInput.place(x=385, y=170)\n\n back = tk.Button(self, text='back', font=(\"Courier\", 15), command=lambda: controller.show_frame(StartPage),\n bg='#4e74fc')\n back.place(x=210, y=280)\n\n create = tk.Button(self, text='Create', font=(\"Courier\", 15), bg='#4e74fc')\n create.place(x=520, y=280)\n\n send = tk.Button(self, text='Send Attendance', font=(\"Courier\", 15), bg='#4e74fc')\n send.grid(row=5, column=1, pady=10)\n send.place(x=305, y=280)'''\nclass stateCityPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='#accdf5')\n label = tk.Label(self, text=\"STATE AND CITY INFO\", font=LARGEFONT, bg='#accdf5')\n label.place(x=100, y=10)\n\n back = tk.Button(self, text='Back', command=lambda: controller.show_frame(IndiaPage), bg='#4e74fc',\n font=(\"Courier\", 10))\n back.place(x=30, y=30)\n\n T1 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T2 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T3 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n stateName = tk.Text(self, height=5, width=52, bg='#accdf5', font=MEDIUMFONT)\n districtName = tk.Text(self, height=5, width=52, bg='#accdf5', font=MEDIUMFONT)\n\n def getStateData(state, T1, T2, T3, stateName):\n flag = True\n url = \"https://api.covid19india.org/data.json\"\n page = requests.get(url)\n data = json.loads(page.text)\n stateName.delete(\"1.0\", \"end\")\n stateName.insert(INSERT, state)\n for i in range(len(data[\"statewise\"])):\n if data[\"statewise\"][i][\"state\"] == state:\n flag = False\n T1.delete(\"1.0\", \"end\")\n T1.insert(INSERT, data[\"statewise\"][i][\"active\"])\n T2.delete(\"1.0\", \"end\")\n T2.insert(INSERT, data[\"statewise\"][i][\"confirmed\"])\n T3.delete(\"1.0\", \"end\")\n T3.insert(INSERT, data[\"statewise\"][i][\"deaths\"])\n if flag:\n tk.messagebox.showwarning('Input Error', f'instead of \"{state}\" try \"{state.capitalize()}\" or trying entering state name correctly!!')\n\n def getDistrictData(district, T4, T5, T6, districtName):\n flag = True\n url = \"https://api.covid19india.org/district_wise.json\"\n page = requests.get(url)\n data = json.loads(page.text)\n districtName.delete(\"1.0\", \"end\")\n districtName.insert(INSERT, district)\n print(district)\n for i in range(len(data[\"districts\"])):\n if data[\"districts\"][i][\"district\"] == district:\n flag = False\n T4.delete(\"1.0\", \"end\")\n T4.insert(INSERT, data[\"districts\"][i][\"active\"])\n T5.delete(\"1.0\", \"end\")\n T5.insert(INSERT, data[\"districts\"][i][\"confirmed\"])\n T6.delete(\"1.0\", \"end\")\n T6.insert(INSERT, data[\"districts\"][i][\"deceased\"])\n if flag:\n tk.messagebox.showwarning('Input Error', f'instead of \"{district}\" try \"{district.capitalize()}\" or trying entering district name correctly!!')\n\n data = [0, 0, 0]\n mild_serious = [0, 0]\n\n stateName.place(x=40, y=80, height=35, width=200)\n\n activeCases = tk.Label(self, text=\"Active Cases\", bg='#accdf5', font=LARGEFONT)\n activeCases.config(font=(\"Courier\", 14))\n activeCases.place(x=80, y=140)\n total = f' {data[0]}'\n # T1.insert(INSERT, total)\n T1.place(x=40, y=165, height=35, width=200)\n\n\n # total cases\n Confirmedcases = tk.Label(self, text=\"Confirmed Cases\", bg='#accdf5', font=LARGEFONT)\n Confirmedcases.config(font=(\"Courier\", 14))\n Confirmedcases.place(x=290, y=140)\n total = f' {data[0]}'\n # T2.insert(INSERT, total)\n T2.place(x=280, y=165, height=35, width=200)\n\n # total deaths\n total_deaths = tk.Label(self, text=\"Total Deaths\", bg='#accdf5', font=LARGEFONT)\n total_deaths.config(font=(\"Courier\", 14))\n total_deaths.place(x=540, y=140)\n deaths = f' {data[1]}'\n # T3.insert(INSERT, deaths)\n T3.place(x=520, y=165, height=35, width=200)\n\n stateName_label = tk.Label(self, text=\"State Name: \", bg='#accdf5')\n stateName_label.config(font=(\"Courier\", 14))\n stateName_label.place(x=50, y=435)\n\n state = tk.Entry(self, width=12, borderwidth=2)\n state.config(font=(\"Courier\", 14))\n state.place(x=220, y=435)\n state.focus()\n\n update = tk.Button(self, text=\"Update state\", bg='#4e74fc',\n command=lambda: getStateData(state.get(), T1, T2, T3, stateName))\n update.config(font=(\"Courier\", 12))\n update.place(x=400, y=430)\n\n districtName.place(x=40, y=255, height=35, width=200)\n\n T4 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T5 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T6 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n\n current = tk.Label(self, text=\"Active Cases\", bg='#accdf5', font=LARGEFONT)\n current.config(font=(\"Courier\", 14))\n current.place(x=80, y=315)\n cur = f' {0}'\n # T4.insert(INSERT, cur)\n T4.place(x=40, y=340, height=35, width=200)\n\n mild = tk.Label(self, text=\"Mild Condition\", bg='#accdf5', font=LARGEFONT)\n mild.config(font=(\"Courier\", 14))\n mild.place(x=320, y=315)\n m = f' {mild_serious[0]}'\n # T5.insert(INSERT, m)\n T5.place(x=280, y=340, height=35, width=200)\n\n critical = tk.Label(self, text=\"Critical Condition\", bg='#accdf5', font=LARGEFONT)\n critical.config(font=(\"Courier\", 14))\n critical.place(x=535, y=315)\n cri = f' {mild_serious[1]}'\n # T6.insert(INSERT, cri)\n T6.place(x=520, y=340, height=35, width=200)\n\n districtName_label = tk.Label(self, text=\"District Name: \", bg='#accdf5')\n districtName_label.config(font=(\"Courier\", 14))\n districtName_label.place(x=50, y=500)\n\n district = tk.Entry(self, width=12, borderwidth=2)\n district.config(font=(\"Courier\", 14))\n district.place(x=220, y=500)\n district.focus()\n\n updatedistrict = tk.Button(self, text=\"Update district\", bg='#4e74fc',\n command=lambda: getDistrictData(district.get(), T4, T5, T6, districtName))\n updatedistrict.config(font=(\"Courier\", 12))\n updatedistrict.place(x=400, y=495)\nclass countryPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='#accdf5')\n label = tk.Label(self, text=\"Indivisual Country\", font=LARGEFONT, bg='#accdf5')\n label.place(x=150, y=10)\n\n countryName_label = tk.Label(self, text=\"Country Name: \", bg='#accdf5')\n countryName_label.config(font=(\"Courier\", 14))\n countryName_label.place(x=50, y=300)\n\n country = tk.Entry(self, width=12, borderwidth=2)\n country.config(font=(\"Courier\", 14))\n country.place(x=220, y=300)\n country.focus()\n\n data = [0, 0, 0]\n\n def getData(country, T1, T2, T3, T4, T5, T6):\n WORLD_URL = f\"https://www.worldometers.info/coronavirus/country/{country}\"\n total_request = requests.get(WORLD_URL)\n print(total_request.status_code)\n soup = BeautifulSoup(total_request.content, 'html.parser')\n if str(soup.title) != '<title>404 Not Found</title>':\n total_deaths_recevored = soup.findAll('div', attrs={'class': 'maincounter-number'})\n i = 0\n for d in total_deaths_recevored:\n data[i] = d.span.text\n i = i + 1\n T1.delete(\"1.0\", \"end\")\n T2.delete(\"1.0\", \"end\")\n T3.delete(\"1.0\", \"end\")\n total = f' {data[0]}'\n T1.insert(INSERT, total)\n deaths = f' {data[1]}'\n T2.insert(INSERT, deaths)\n recovered = f' {data[2]}'\n T3.insert(INSERT, recovered)\n getData2(country, T4, T5, T6)\n else:\n tk.messagebox.showwarning('Wrong Country Name', 'Please Enter Correct Country Name!!!')\n\n T1 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T2 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T3 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n # total cases\n total_cases = tk.Label(self, text=\"Total Cases\", bg='#accdf5', font=LARGEFONT)\n total_cases.config(font=(\"Courier\", 14))\n total_cases.place(x=80, y=100)\n total = f' {data[0]}'\n T1.insert(INSERT, total)\n T1.place(x=40, y=125, height=35, width=200)\n\n # total deaths\n total_deaths = tk.Label(self, text=\"Total Deaths\", bg='#accdf5', font=LARGEFONT)\n total_deaths.config(font=(\"Courier\", 14))\n total_deaths.place(x=320, y=100)\n deaths = f' {data[1]}'\n T2.insert(INSERT, deaths)\n T2.place(x=280, y=125, height=35, width=200)\n\n # total recovered\n total_recovered = tk.Label(self, text=\"Total Recovered\", bg='#accdf5', font=LARGEFONT)\n total_recovered.config(font=(\"Courier\", 14))\n total_recovered.place(x=535, y=100)\n recovered = f' {data[2]}'\n T3.insert(INSERT, recovered)\n T3.place(x=520, y=125, height=35, width=200)\n\n # row 2 active cases data\n\n def getData2(country, T4, T5, T6):\n WORLD_URL = f\"https://www.worldometers.info/coronavirus/country/{country}\"\n total_request = requests.get(WORLD_URL)\n soup = BeautifulSoup(total_request.content, 'html.parser')\n curr = soup.find('div', attrs={'class': 'number-table-main'})\n mild_serious_fetch = soup.findAll('span', attrs={'class': 'number-table'})\n mild_serious = [0, 0, 0, 0]\n i = -1\n c = curr.text\n for md in mild_serious_fetch:\n i = i + 1\n mild_serious[i] = md.text\n if country == 'india':\n url = \"https://worldometers.p.rapidapi.com/api/coronavirus/country/India\"\n\n headers = {\n 'x-rapidapi-key': \"4de5bae009msh3d690447f433304p1d9fe1jsnf5600d22cfc9\",\n 'x-rapidapi-host': \"worldometers.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n data = response.json()\n print(data['data']['Active Cases'])\n c = data['data']['Active Cases']\n mild_serious[0] = 0\n mild_serious[1] = data['data']['Critical']\n elif country == 'us':\n url = \"https://worldometers.p.rapidapi.com/api/coronavirus/country/USA\"\n\n headers = {\n 'x-rapidapi-key': \"4de5bae009msh3d690447f433304p1d9fe1jsnf5600d22cfc9\",\n 'x-rapidapi-host': \"worldometers.p.rapidapi.com\"\n }\n\n response = requests.request(\"GET\", url, headers=headers)\n data = response.json()\n print(data['data']['Active Cases'])\n c = data['data']['Active Cases']\n mild_serious[0] = 0\n mild_serious[1] = data['data']['Critical']\n\n T4.delete(\"1.0\", \"end\")\n T5.delete(\"1.0\", \"end\")\n T6.delete(\"1.0\", \"end\")\n cur = f\" {c}\"\n T4.insert(INSERT, cur)\n m = f' {mild_serious[0]}'\n T5.insert(INSERT, m)\n cri = f\" {mild_serious[1]}\"\n T6.insert(INSERT, cri)\n\n\n T4 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T5 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n T6 = tk.Text(self, height=5, width=52, bg='#accdf5', font=SMALLFONT)\n mild_serious = [0, 0, 0, 0]\n c = 0\n\n current = tk.Label(self, text=\"Active Cases\", bg='#accdf5', font=LARGEFONT)\n current.config(font=(\"Courier\", 14))\n current.place(x=80, y=175)\n cur = f' {c}'\n T4.insert(INSERT, cur)\n T4.place(x=40, y=200, height=35, width=200)\n\n mild = tk.Label(self, text=\"Mild Condition\", bg='#accdf5', font=LARGEFONT)\n mild.config(font=(\"Courier\", 14))\n mild.place(x=320, y=175)\n m = f' {mild_serious[0]}'\n T5.insert(INSERT, m)\n T5.place(x=280, y=200, height=35, width=200)\n\n critical = tk.Label(self, text=\"Critical Condition\", bg='#accdf5', font=LARGEFONT)\n critical.config(font=(\"Courier\", 14))\n critical.place(x=535, y=175)\n cri = f' {mild_serious[1]}'\n T6.insert(INSERT, cri)\n T6.place(x=520, y=200, height=35, width=200)\n\n update = tk.Button(self, text=\"update\", bg='#4e74fc',\n command=lambda: getData(country.get(), T1, T2, T3, T4, T5, T6))\n update.config(font=(\"Courier\", 12))\n update.place(x=400, y=295)\n\n def plotGraph(country, n):\n COUNTRY_URL = f\"https://www.worldometers.info/coronavirus/country/{country}\"\n r = requests.get(COUNTRY_URL)\n\n soup = BeautifulSoup(r.content, 'html.parser')\n num = []\n days = []\n\n caseList = soup.findAll('li', attrs={'class': 'news_li'})\n date = soup.findAll('button', attrs={'class': 'btn btn-light date-btn'})\n\n for (cases, day) in zip(caseList, date):\n case = cases.text.split(\" \")\n case[n] = case[n].split(\",\")\n num.append(int(''.join(case[n])))\n days.append(str(day.text[:-1]))\n print(str(day.text[:-1]) + \" : \" + str(''.join(case[n])))\n\n num.reverse()\n days.reverse()\n plt.figure(figsize=(8, 7))\n plt.plot(days[1:], num[:-1], marker='o', label='Case Trend')\n plt.title('Last 5 Day Graph')\n plt.xlabel('Date')\n plt.ylabel('Number of Cases')\n plt.legend()\n plt.show()\n plotCases = tk.Button(self, text=\"Plot Cases\", bg='#4e74fc',\n command=lambda: plotGraph(country.get(), 0))\n plotCases.config(font=(\"Courier\", 12))\n plotCases.place(x=605, y=335)\n\n plotDeaths = tk.Button(self, text=\"Plot Deaths\", bg='#4e74fc',\n command=lambda: plotGraph(country.get(), 4))\n plotDeaths.config(font=(\"Courier\", 12))\n plotDeaths.place(x=600, y=295)\n\n back = tk.Button(self, text='Back', command=lambda: controller.show_frame(StartPage), bg='#4e74fc',\n font=(\"Courier\", 10))\n back.place(x=30, y=30)\napp = tkinterApp()\napp.mainloop()\n" }, { "alpha_fraction": 0.5676532983779907, "alphanum_fraction": 0.5877378582954407, "avg_line_length": 26.852941513061523, "blob_id": "ca21b60d7a74a13dd60e6cd8009b7288ee8ef117", "content_id": "4d3d3d09ed70b0f26d0bc9f2538afe636b3c981e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 77, "num_lines": 34, "path": "/statewiseData.py", "repo_name": "hussein-hub/covid19-tracker", "src_encoding": "UTF-8", "text": "# import requests\n# import json\n# import matplotlib.pyplot as plt\n# def giveGraph(n):\n# dates = []\n# cases = []\n#\n# url = \"https://api.covid19india.org/data.json\"\n# page = requests.get(url)\n# data = json.loads(page.text)\n# for i in range(-1, (-1) * (n + 1), -1):\n# dates.append(data[\"cases_time_series\"][i][\"date\"][:-4])\n# cases.append(int(data[\"cases_time_series\"][i][\"dailydeceased\"]))\n# # cases.append(int(data[\"cases_time_series\"][i][\"dailyconfirmed\"]))\n#\n# dates.reverse()\n# cases.reverse()\n# # print(dates)\n# # print(cases)\n# plt.figure(figsize=(8, 8))\n# plt.plot(dates, cases, marker='o')\n# plt.xlabel('Date')\n# plt.xticks(rotation=35)\n# plt.ylabel('Number of Cases')\n# plt.show()\n#\n# giveGraph(12)\n\nimport datetime\n\ndate_time_str = '08/11/21'\ndate_time_obj = datetime.datetime.strptime(date_time_str, '%d/%m/%y')\n\nprint('Date:', date_time_obj.date())" }, { "alpha_fraction": 0.6489546895027161, "alphanum_fraction": 0.6550522446632385, "avg_line_length": 26.33333396911621, "blob_id": "9acaac6cf62a471429938a2ba48e0a948a430db0", "content_id": "9e698e5907187de81abbbdb59e3b05cc3fc8ceea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "no_license", "max_line_length": 76, "num_lines": 42, "path": "/getWorldometerData.py", "repo_name": "hussein-hub/covid19-tracker", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\n\ncountry = input(\"Enter country name : \")\n\nCOUNTRY_URL = f\"https://www.worldometers.info/coronavirus/country/{country}\"\nr = requests.get(COUNTRY_URL)\n\nsoup = BeautifulSoup(r.content, 'html.parser')\nnum = []\ndays = []\nb = soup.findAll('button', attrs={'class': 'btn btn-light date-btn'})\nd = soup.findAll('div', attrs={'class': 'newsdate_div'})\nfor i, j in zip(d, b):\n print(i.div.ul.li.strong.text + \" : \" + j.text)\n case = i.div.ul.li.strong.text.split(' ')\n case[0] = case[0].split(',')\n num.append(int(''.join(case[0])))\n days.append(j.text)\nnum.reverse()\ndays.reverse()\nplt.figure(figsize=(8, 7))\nplt.plot(days, num, marker='o', label='Case Trend')\nplt.title('Last 5 Day Graph')\nplt.xlabel('Date')\nplt.ylabel('Number of Cases')\nplt.legend()\nplt.show()\n\n#\n# url = \"https://worldometers.p.rapidapi.com/api/coronavirus/country/USA\"\n#\n#\n# headers = {\n# 'x-rapidapi-key': \"4de5bae009msh3d690447f433304p1d9fe1jsnf5600d22cfc9\",\n# 'x-rapidapi-host': \"worldometers.p.rapidapi.com\"\n# }\n#\n# response = requests.request(\"GET\", url, headers=headers)\n# data = response.json()\n# print(data['data'])\n" } ]
4
RQuispeC/mo805-assignment7
https://github.com/RQuispeC/mo805-assignment7
a20bea4d9b7d8338acb34ce72f2c7119d1a1301d
781a06739e1b0aa9076e1483869f549625323f95
fe72f7590c138719e6077582b9fc5471bff397f2
refs/heads/master
2020-05-17T03:08:43.617215
2020-02-14T16:48:34
2020-02-14T16:48:34
183,471,153
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6974790096282959, "alphanum_fraction": 0.756302535533905, "avg_line_length": 25.44444465637207, "blob_id": "79ffa31d13d084d395573d8dc453f9033b77b035", "content_id": "ea52f9fa344f3d381f4a5cde47047a718517761e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 238, "license_type": "permissive", "max_line_length": 48, "num_lines": 9, "path": "/Makefile", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "all:\n\tmkdir MO445-descriptors/examples/mpeg7_pgm\n\tmkdir MO445-descriptors/examples/mpeg7_features\n\tpython3 convert_mpeg_pgm.py\n\tcd MO445-descriptors/examples && \\\n\tpython3 file_name.py && \\\n\tmake && \\\n\t./test\n\tpython3 precision_recall.py\n" }, { "alpha_fraction": 0.69065260887146, "alphanum_fraction": 0.7064155340194702, "avg_line_length": 28.78403663635254, "blob_id": "116a173bfe865c65050d0f9ed4c1129c3fb58414", "content_id": "0f59cd50db6ffd02fd5c009e70111a21c17e019b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12688, "license_type": "permissive", "max_line_length": 111, "num_lines": 426, "path": "/MO445-descriptors/include/MO445.h", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "#ifndef _MO445_H_\n#define _MO445_H_\n\n#define LOW 0\n#define HIGH 1\n#ifndef FALSE\n#define FALSE 0\n#endif\n#ifndef TRUE\n#define TRUE 1\n#endif\n\n#ifndef __cplusplus\n#ifndef _WIN32\n#ifndef __cplusplus\ntypedef enum boolean {false,true} bool;\n#endif\n#else\ntypedef unsigned short ushort;\n#endif\n#endif\n\n#ifndef MAX\n#define MAX(x,y) (((x) > (y))?(x):(y))\n#endif\n\n#ifndef MIN\n#define MIN(x,y) (((x) < (y))?(x):(y))\n#endif\n\n#define SetTieBreak(a,b) a->C.tiebreak=b \n\n#define MSG1 \"Cannot allocate memory space\"\n#define MSG2 \"Cannot open file\"\n#define SIZE 64\n#define PI 3.1415926536\n#define ROUND(x) ((x < 0)?(int)(x-0.5):(int)(x+0.5))\n#define NIL -1\n#define WHITE 0 \n#define GRAY 1\n#define BLACK 2\n#define INCREASING 1\n#define DECREASING 0\n#define HISTOGRAMSIZE 180\n#define LIFOBREAK 1\n#define FIFOBREAK 0\n#define INTERIOR 0\n#define EXTERIOR 1\n#define HEAP_DAD(i) ((i - 1) / 2)\n#define HEAP_LEFTSON(i) (2 * i + 1)\n#define HEAP_RIGHTSON(i) (2 * i + 2)\n#define BOTH 2\n\n#include <stdio.h>\n#include <malloc.h>\n#include <stdlib.h>\n#include <string.h>\n#include <math.h>\n#include <limits.h>\n\n/* Common operations functions ************************/\nint NCFgets(char *s, int m, FILE *f); \nint *AllocIntArray(int n);\ndouble *AllocDoubleArray(int n);\nfloat *AllocFloatArray(int n); \nchar *AllocCharArray(int n); /* It allocates 1D array of n characters */\n\n/* Errors functions *********************************/\nvoid Error(char *msg,char *func); \n\n/* Data structures **********************************/\n/* BIC */\ntypedef struct {\n int color;\n int frequency;\n}Property;\n\ntypedef struct {\n unsigned long colorH[SIZE];\n unsigned long lowH[SIZE];\n unsigned long highH[SIZE];\n}VisualFeature;\n\ntypedef struct {\n unsigned char colorH[SIZE];\n unsigned char lowH[SIZE];\n unsigned char highH[SIZE];\n}CompressedVisualFeature;\n\n/* pgm images */\ntypedef struct _image {\n int *val;\n int ncols,nrows;\n int *tbrow;\n} Image;\n\n/* ppm images */\ntypedef struct cimage {\n Image *C[3];\n} CImage;\n\ntypedef struct _FeatureVector1D {\n double *X;\n int n; \n} FeatureVector1D;\n\ntypedef struct _FeatureVector2D {\n double *X;\n double *Y;\n int n; \n} FeatureVector2D;\n\ntypedef struct _curve {\n double *X;\n double *Y;\n int n; \n} Curve;\n\ntypedef struct _curve3d { /* 3D Curve */\n double *X;\n double *Y;\n double *Z;\n int n; \n} Curve3D;\n\ntypedef struct _adjrel {\n int *dx;\n int *dy;\n int n;\n} AdjRel;\n\ntypedef struct _pixel {\n int x,y;\n} Pixel;\n\ntypedef struct{\n int length; /* length of the boundary */\n int *X; /* X values of each boundary point from 0 to length-1 */\n int *Y; /* Y values of each boundary point from 0 to length-1 */\n} boundary_type;\n\ntypedef struct{\n int length; /* length of the boundary */\n int *mean; /* mean value BAS function */\n int *second; \n int *third;\n} representation_type;\n\ntypedef struct _DImage{\n double *val;\n int ncols,nrows;\n int *tbrow;\n} DImage;\n\ntypedef struct _tensorscale{\n DImage *anisotropy;\n DImage *orientation;\n DImage *thickness;\n\n int m_pairs;\n}TensorScale;\n\ntypedef struct _vector{\n float x;\n float y;\n float z;\n} Vector, Point, Vertex;\n\ntypedef struct _node { \n int next; /* next node */\n int prev; /* prev node */\n char color; /* WHITE=0, GRAY=1, BLACK=2 */\n} Node;\n\ntypedef struct _doublylinkedlists {\n Node *elem; /* all possible doubly-linked lists of the circular queue */\n int nelems; /* total number of elements */\n} DoublyLinkedLists; \n\ntypedef struct _circularqueue { \n int *first; /* list of the first elements of each doubly-linked list */\n int *last; /* list of the last elements of each doubly-linked list */\n int nbuckets; /* number of buckets in the circular queue */\n int current; /* current bucket */\n char tiebreak; /* 1 is LIFO, 0 is FIFO (default) */\n} CircularQueue;\n\ntypedef struct _queue { /* Priority queue by Dial implemented as\n proposed by A. Falcao */\n CircularQueue C;\n DoublyLinkedLists L;\n} Queue;\n\ntypedef struct _polynom { /* Polynomial */\n double *coef; /* a0*x^0 + a1*x^1 + ... + an*x^n */ \n int n; /* degree n */\n} Polynom;\n\ntypedef struct _set {\n int elem;\n struct _set *next;\n} Set;\n\ntypedef struct _annimg {\n Image *img;\n Image *grad;\n Image *cost;\n Image *label;\n Image *pred;\n Image *root;\n Set *seed;\n} AnnImg;\n\ntypedef struct _heap {\n int *cost;\n char *color;\n int *pixel;\n int *pos;\n int last;\n int n;\n} Heap;\n\ntypedef struct _adjpxl {\n int *dp;\n int n;\n} AdjPxl;\n\n/*Working with images *****************************/\n\n/* pgm images */\nImage *CreateImage(int ncols,int nrows);\nvoid DestroyImage(Image **img);\nImage *ReadImage(char *filename);\nImage *MBB(Image *img);\nImage *ROI(Image *img, int xl, int yl, int xr, int yr);\nImage *AddFrame(Image *img, int sz, int value);\nvoid SetImage(Image *img, int value);\nvoid WriteImage(Image *img,char *filename);\n\n/* ppm images */\nCImage *CreateCImage(int ncols, int nrows);\nvoid DestroyCImage(CImage **cimg);\nCImage *ReadCImage(char *filename);\n\n/* auxiliary functions ****************************/\nCurve *CreateCurve(int n);\nvoid DestroyCurve(Curve **curve);\nFeatureVector1D *CurveTo1DFeatureVector(Curve *curve);\nFeatureVector1D *CreateFeatureVector1D(int n);\nvoid DestroyFeatureVector1D(FeatureVector1D **desc);\nvoid WriteFeatureVector1D(FeatureVector1D *desc,char *filename);\nAdjRel *CreateAdjRel(int n);\nvoid DestroyAdjRel(AdjRel **A);\nAdjRel *LeftSide(AdjRel *A);\nAdjRel *RightSide(AdjRel *A);\nAdjRel *Circular(float r);\nbool ValidPixel(Image *img, int x, int y);\nbool ValidContPoint(Image *bin, AdjRel *L, AdjRel *R, int p);\nImage *LabelContPixel(Image *bin);\nCurve3D *CreateCurve3D(int n);\nvoid DestroyCurve3D(Curve3D **curve);\nvoid SortCurve3D(Curve3D *curve, int left, int right, char order);\nint PartCurve3D (Curve3D *curve, int left, int right, char order);\nint FFT(int dir, long nn, double *x, double *y);\nImage *LabelContour(Image *bin);\nvoid Warning(char *msg,char *func); \nImage *Scale(Image *img, float Sx, float Sy) ;\nDImage *CreateDImage(int ncols, int nrows);\nvoid DestroyDImage(DImage **dimg);\nvoid DestroyImage(Image **img);\nint MaximumValue(Image *img);\nQueue *CreateQueue(int nbuckets, int nelems);\nvoid DestroyQueue(Queue **Q);\nint EmptyQueue(Queue *Q);\nvoid InsertQueue(Queue *Q, int bucket, int elem);\nint RemoveQueue(Queue *Q);\nvoid UpdateQueue(Queue *Q, int elem, int from, int to);\nvoid ResetQueue(Queue *Q);\nvoid RemoveQueueElem(Queue *Q, int elem, int bucket);\nCurve *SamplePolynom(Polynom *P, double from, double to, int nbins);\nPolynom *CreatePolynom(int degree);\nvoid DestroyPolynom(Polynom **P);\nPolynom *DerivPolynom(Polynom *P);\nPolynom *Regression(Curve *curve, int degree);\nPolynom *MSFractal(Image *bin,int maxdist,int degree,double lower,double higher,int reg,double from,double to);\nAnnImg *Annotate(Image *img, Image *cost, Image *label);\nAdjRel *ComplAdj(AdjRel *A1, AdjRel *A2);\nvoid InsertSet(Set **S, int elem);\nint RemoveSet(Set **S);\nint Seed(Image *pred, int p);\nImage *CompPaths(Image *pred);\nImage *Perimeter(Image *bin);\nCurve3D *Saliences(Image *bin, int maxdist);\nImage *MSSkel(Image *bin, char side);\nvoid DestroyAdjPxl(AdjPxl **N);\nCurve3D *CompSaliences(AnnImg *aimg, int maxcost);\nImage *RemFrame(Image *fimg, int sz);\nAdjPxl *AdjPixels(Image *img, AdjRel *A);\nImage *Abs(Image *img);\nCurve3D *RemSaliencesByAngle(Curve3D *curve,int radius, int angle);\nImage *LabelBinComp(Image *bin, AdjRel *A);\nCurve3D *SkelSaliences(Image *skel, int maxdist, int angle) ;\nImage *Skeleton(Image *msskel, float perc);\nImage *CompMSSkel(AnnImg *aimg);\nvoid iftDilation(AnnImg *aimg, AdjRel *A); /* by Dial */\nvoid InvertXY(Curve *curve);\nCurve *CopyCurve(Curve *curve);\nvoid SortCurve(Curve *curve, int left, int right, char order);\nint PartCurve (Curve *curve, int left, int right, char order);\nCurve *Histogram(Image *img);\nvoid DestroySet(Set **S);\nvoid DeAnnotate(AnnImg **aimg);\nint FrameSize(AdjRel *A);\nvoid Change(int *a, int *b);\nHeap *CreateHeap(int n, int *cost);\nvoid DestroyHeap(Heap **H);\nbool InsertHeap(Heap *H, int pixel);\nbool RemoveHeap(Heap *H, int *pixel);\nvoid GoUpHeap(Heap *H, int i);\nbool IsEmptyHeap(Heap *H);\nbool HeapIsEmpty(Heap *H);\nvoid GoDownHeap(Heap *H, int i);\nbool IsFullHeap(Heap *H);\nvoid GoUpHeap(Heap *H, int i);\nFeatureVector2D *CurveTo2DFeatureVector(Curve *curve);\n/* Descriptor functions ***************************/\n\n/* BIC */\n/* auxiliary functions */\nCurve *BIC(CImage *img);\nvoid Write_visual_features(char *filename,char *dbname, CompressedVisualFeature *cvf);\nCompressedVisualFeature *Extract_visual_features(CImage *img);\ndouble gray_level_BIC(Image *img1, Image *img2);\n\n/* Fourier Descriptor */\n/* auxiliary functions */\ndouble Cabs(double x, double y);\nCurve *Image2Curve(Image *img);\nCurve *FourierDescriptor(Image *img);\n\n/* Moments Invariant */\n/* auxiliary functions */\ndouble MomentPQ(int p, int q, Image *img, int max);\nCurve *MomentInv(Image *img);\nCurve *MomentInvariant(Image *img); // contorno e objeto inteiro\n\n/* BAS */\n/* auxiliary functions */\nCurve *BAS(Image *in,int rsp,int nsamples);\n\n/* Tensor Scale */\n/* auxiliary functions */\nTensorScale *CreateBinaryTensorScale(Image *bin, int m_pairs);\nvoid DestroyTensorScale(TensorScale **ts);\nfloat *TSOrientationHistogram(TensorScale *ts);\nImage *TSEDistTrans(Image *bin);\n\n/* MultiScale Fractal Dimension*/\n/* auxiliary functions */\nCurve *PolynomToFractalCurve(Polynom *P, double lower, double higher, int nbins);\nCurve *ContourMSFractal(Image *in);\n\n/* Contour Saliences */\n/* auxiliary functions */\nCurve3D *SkelCont(Image *bin, int maxdist, int threshold, int angle, char side);\nCurve3D *iftContourSaliences(Image *bin,int threshold_in,int threshold_out,int angle_in,int angle_out);\nCurve *ContourSaliences(Image *in);\nvoid DescInvertXY(FeatureVector2D *desc);\nFeatureVector2D *CreateFeatureVector2D(int n);\ndouble ContSalieDistance(FeatureVector2D *D1, FeatureVector2D *D2);\nFeatureVector2D *CircularRotation(FeatureVector2D *descriptor, double delta);\nvoid DestroyFeatureVector2D(FeatureVector2D **desc);\nvoid SortFeatureVector2D(FeatureVector2D *desc, int left, int right, char order);\nint PartFeatureVector2D (FeatureVector2D *desc, int left, int right, char order);\nvoid WriteFeatureVector2D(FeatureVector2D *desc,char *filename);\nFeatureVector2D *CopyFeatureVector2D(FeatureVector2D *desc);\nvoid DestroyFeatureVector2D(FeatureVector2D **desc);\ndouble Matching(FeatureVector2D *descriptor1, FeatureVector2D *descriptor2, int order);\n\n/* Segment Saliences */\n/* auxiliary functions */\nCurve *SS_ExtractionAlgorithm_(Image *in, int maxdist, int nsamples, int side);\ndouble SS_OCSMatching(FeatureVector1D *fv_1, FeatureVector1D *fv_2);\ndouble SS_OCS(FeatureVector1D *fv1, FeatureVector1D *fv2);\ndouble SS_getMin(double Dist1, double Dist2, double Dist3);\n\n/* call functions *********************************/\n/* BIC */\nFeatureVector1D *BIC_ExtractionAlgorithm(CImage *in); /*BIC extractor*/\ndouble BIC_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2); /*BIC similarity*/\n\n/* Fourier Descriptor */\nFeatureVector1D *FourierDescriptor_ExtractionAlgorithm(Image *in);/* in is a binary image*/\ndouble Fourier_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2); /*fourier similarity*/\n\n/* Moments Invariant */\nFeatureVector1D *MomentInvariant_ExtractionAlgorithm(Image *in);/*in is a binary image*/\ndouble MomentInvariant_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2);\n\n/* BAS */\nFeatureVector1D *BAS_ExtractionAlgorithm(Image *in,int rsp,int nsamples);\ndouble BAS_DistanceAlgorithm(FeatureVector1D *c1, FeatureVector1D *c2);\n\n/* Tensor Scale */\nFeatureVector1D *TensorScale_ExtractionAlgorithm(Image *in);\ndouble TensorScale_DistanceAlgorithm(FeatureVector1D *c1, FeatureVector1D *c2);\n\n/* Multiscale Fractal Dimension */\nFeatureVector1D *MS_ExtractionAlgorithm(Image *img);\ndouble MS_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2); \n\n/* Contour Saliences */\nFeatureVector2D *CS_ExtractionAlgorithm(Image *img);\ndouble CS_DistanceAlgorithm(FeatureVector2D *descriptor1, FeatureVector2D *descriptor2);\n\n/* Segment Saliences */\nFeatureVector1D *SS_ExtractionAlgorithm(Image *img);\ndouble SS_DistanceAlgorithm(FeatureVector1D *fv1d1, FeatureVector1D *fv1d2);\n\n/* Metrics to measure the similarity between feature vectors*/\ndouble EuclideanDistance(FeatureVector1D *v1, FeatureVector1D *v2);\ndouble L1_Distance(FeatureVector1D *v1, FeatureVector1D *v2);\nfloat TSHistogramMatch(FeatureVector1D *fv1, FeatureVector1D *fv2, int *offset);\ndouble dLog(FeatureVector1D *fv1, FeatureVector1D *fv2);\n\n#endif\n" }, { "alpha_fraction": 0.7578834891319275, "alphanum_fraction": 0.770176351070404, "avg_line_length": 38.787235260009766, "blob_id": "0ab1611960a6bc72f0c2c01f46fd8663ea559b1f", "content_id": "395b4797c9a810e4972e450f822aedc154f4bf8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1871, "license_type": "permissive", "max_line_length": 88, "num_lines": 47, "path": "/MO445-descriptors/readme.txt", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "The purpose of this package is to provide the source code of some\nimage descriptors for the image analysis course (MO445). Each\ndescriptor is represented by a feature vector and a similarity\n(distance) function, and its corresponding paper can be found in the\nsubdirectory docs.\n\nFollow the steps:\n\n1 - Extract the files in MO445.tar.gz.\n\n2 - Go to the folder MO445 and execute the Makefile\n\n3 - Go to the folder examples (MO445/examples), which contains one\nexample file \"test.c\" and two images to execute the program.\n\n4 - Execute the Makefile. It will generate a executable file \"test\"\n\n5 - Run ./test\n\n6 - Some files corresponding to the descriptors will be generated, such as:\n\n- bas_<figureName.txt> -> Descriptor Beam Angle Statistics (BAS) (SHAPE)\n- bic_<figureName.txt>.txt -> Border/Interior Pixel Classification (BIC) (COLOR/TEXTURE)\n- moments_<figureName.txt>.txt -> Moment Invariants (MI) (SHAPE)\n- fourier_<figureName.txt>.txt -> Fourier Descriptor (SHAPE)\n- tensorscale_<figureName.txt>.txt -> Tensor Scale Descriptor (SHAPE)\n- multiscales_<figureName.txt>.txt -> Multiscale Fractal Dimension (SHAPE)\n- contoursaliences_<figureName.txt>.txt -> Contour Saliences (SHAPE)\n- segmentsaliences_<figureName.txt>.txt -> Segment Saliences (SHAPE)\n\nAll descriptors have two basic functions: extraction and\nsimilarity (distance). Example for BAS:\n\n- FeatureVector1D *BAS_ExtractionAlgorithm(Image *in,int rsp,int nsamples);\n- double BAS_DistanceAlgorithm(FeatureVector1D *c1, FeatureVector1D *c2);\n\nThe extraction algorithm extracts the feature vector for an image and\nthe similarity function returns the distance between two images. Fell\nfree to modify the feature vector size.\n\nThe figures and the results obtained can be found in ./figs and\n./results directories, respectively.\n\nIf you have questions, please contact Joao Paulo Papa\n([email protected]) or Alexandre Falcao ([email protected])\n\nEnjoy!\n\n" }, { "alpha_fraction": 0.7488822937011719, "alphanum_fraction": 0.7809240221977234, "avg_line_length": 30.20930290222168, "blob_id": "338c081ca6007fced7188e51229c067e05c790c7", "content_id": "18c50f51f4c9b90763e183ba2015fea74f1463ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1342, "license_type": "permissive", "max_line_length": 226, "num_lines": 43, "path": "/README.md", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "# Precision vs Recall\nThis repo implements the assignment 7 of [MO805](http://www.ic.unicamp.br/~rtorres/mo805A_19s1/07-assignment.pdf) at UNICAMP.\n\n![](precision_recall.png?raw=true)\n\n# Implemented/Modified scripts\n\n* `convert_mpeg_pgm.py` converts the `.gif` images of `mpeg7/`dataset to `.pgm` and stores them inside `MO445-descriptors/examples/mpeg7_pgm`\n\n* `MO445-descriptors/examples/test.c` extracts Multiscale Fractal Dimension and Segment Saliences features and stores them inside `MO445-descriptors/examples/mpeg7_features`\n\n* `MO445-descriptors/examples/file_name.py` is a small scripts that creates files with the paths and names of `MO445-descriptors/examples/mpeg7_pgm`.\n\n* `precision_recall.py` computes the precision x recall curve and plots it in `precision_recall.png`\n\n# Replicate Results\n\nClone the repository\n\n```\ngit clone https://github.com/RQuispeC/mo805-assignment7.git\n```\n\nThen run all the scripts using the available Makefile\n\n```\ncd mo805-assignment7\nmake\n```\n\n# Prerequisites\n\nThe code was tested under a linux distribution with :\n\n* GCC\n* Python3\n* Numpy\n* MatplotLib\n* PIL\n\n# Notes\n\nI used euclidean distance to compare feacture vectors for both Multiscale fractal and Segment salience descriptors. MO455 code offers a different distance function for Segment salience but euclidean may get better performance.\n" }, { "alpha_fraction": 0.5056179761886597, "alphanum_fraction": 0.6029962301254272, "avg_line_length": 18.071428298950195, "blob_id": "d9662fa559a8c53b9f411caafca67e27c4c94b3b", "content_id": "1e513a0fdab141c0f9bc14fa409f07345a522af2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 267, "license_type": "permissive", "max_line_length": 61, "num_lines": 14, "path": "/MO445-descriptors/examples/Makefile", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "CFLAGS = -Wall -O3\nMO445LIB = -L../lib\nMO445FLAGS = -I../include\nCC=gcc\nFLAGS= -O3 -Wall\n\nall: libMO445 test\n\nlibMO445:\n\t$(MAKE) -C ../\ntest: libMO445 \n\t$(CC) $(MO445FLAGS) test.c $(MO445LIB) -o test -lMO445 -lm\nclean:\n\trm -f *~ test.o test ../lib/*.a ../obj/*.o\n" }, { "alpha_fraction": 0.605215311050415, "alphanum_fraction": 0.6264402866363525, "avg_line_length": 19.873416900634766, "blob_id": "543bab6b6825af966db55c15acc121cf76e72131", "content_id": "3db79fe9c47bedbb86032481856591b86d25966c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1649, "license_type": "permissive", "max_line_length": 52, "num_lines": 79, "path": "/MO445-descriptors/examples/test.c", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "#include \"MO445.h\"\n#include <stdio.h>\n#include <string.h>\n\nconst int LIMIT = 1400;\n\nchar* paths[1400];\nchar* names[1400];\nint max_len = 50;\nvoid chomp(char *s){\n\ts[strcspn(s, \"\\n\")] = '\\0';\n}\n\nvoid read_dataset_data(){\n\tFILE *fp;\n\tchar *line = NULL;\n\tsize_t len = 255;\n\tline = malloc(sizeof(char) * max_len);\n\n\tfp = fopen(\"paths.txt\", \"r\");\n\n\tint ind = 0;\n\twhile ((fgets(line, len, fp)) != NULL)\n\t{\n\t\tpaths[ind] = malloc(sizeof(char) * max_len);\n\t\tchomp(line);\n\t\tstrcpy(paths[ind], line);\n\t\t//printf(\"%s\\n\", paths[ind]);\n\t\tind += 1;\n\t}\n\n\tfp = fopen(\"names.txt\", \"r\");\n\n\tind = 0;\n\twhile ((fgets(line, len, fp)) != NULL)\n\t{\n\t\tnames[ind] = malloc(sizeof(char) * max_len);\n\t\tchomp(line);\n\t\tstrcpy(names[ind], line);\n\t\t//printf(\"%s\\n\", names[ind]);\n\t\tind += 1;\n\t}\n\tfree(line);\n}\n\nint main(int argc,char **argv){\n\n\tread_dataset_data();\n\tprintf(\"Loaded data for files\\n\");\n\n\tfor (int i = 0; i < LIMIT; i++){\n\t\tprintf(\"Extracting features of %s\\n\", names[i]);\n\n\t\tImage *img = NULL;\n\t\tFeatureVector1D *fvMS = NULL, *fvSS = NULL;\n\t\tchar *outfile_name = NULL;\n\t\toutfile_name = malloc(sizeof(char) * 3 * max_len);\n\n\t\timg = ReadImage(paths[i]);\n\t\tfvMS = MS_ExtractionAlgorithm(img);\n\t\tstrcpy(outfile_name,\"mpeg7_features/\");\n\t\tstrcat(outfile_name, names[i]);\n\t\tstrcat(outfile_name, \"_MS.txt\");\n\t\tWriteFeatureVector1D(fvMS, outfile_name);\n\n\t\tfvSS = SS_ExtractionAlgorithm(img);\n\t\tstrcpy(outfile_name, \"mpeg7_features/\");\n\t\tstrcat(outfile_name, names[i]);\n\t\tstrcat(outfile_name, \"_SS.txt\");\n\t\tWriteFeatureVector1D(fvSS, outfile_name);\n\n\t\tDestroyFeatureVector1D(&fvSS);\n\t\tDestroyFeatureVector1D(&fvMS);\n\t\tDestroyImage(&img);\n\t}\n\tfprintf(stderr,\"\\n\");\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6238044500350952, "alphanum_fraction": 0.6376195549964905, "avg_line_length": 23.102563858032227, "blob_id": "8f555ac62b0b9bf7aa6ea5537272da24455c6189", "content_id": "64a10c4fb9cc22fc013002fbe075d6ad892161ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "permissive", "max_line_length": 76, "num_lines": 39, "path": "/convert_mpeg_pgm.py", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "from PIL import Image\nimport numpy as np\nimport os\nimport os.path as osp\n\ndef to_string(x):\n\treturn [str(i) for i in x]\n\ndef check_binary(x):\n\tfor pixel in x:\n\t\tif pixel != \"0\" and pixel != \"1\":\n\t\t\tprint(\"ERROR\")\n\t\t\treturn\n\ndef convert_numpy_str(x):\n\tstr_x = to_string(x)\n\tcheck_binary(str_x)\n\tans = \" \".join(str_x)\n\treturn ans\n\ndef convert_image_pgm(img, file_path):\n\timg = np.array(img)\n\timg = img // np.max(img)\n\tfile = open(file_path, \"w\")\n\tfile.write(\"P2\\n\")\n\tfile.write(\"{} {}\\n\".format(img.shape[1], img.shape[0]))\n\tfile.write(\"1\\n\")\n\tfor l in img:\n\t\tfile.write(convert_numpy_str(l)+\" \\n\")\n\tfile.close()\n\nif __name__ == '__main__':\n\tfile_names = os.listdir(\"mpeg7/\")\n\tfor file_name in file_names:\n\t\timg = Image.open(osp.join(\"mpeg7/\", file_name))\n\t\tprint(\"Converting \", file_name)\n\t\tout_file_name = file_name[:-3] + \"pgm\"\n\t\tout_dir = osp.join(\"MO445-descriptors/examples/mpeg7_pgm/\", out_file_name)\n\t\tconvert_image_pgm(img, out_dir)\n\n" }, { "alpha_fraction": 0.4759206771850586, "alphanum_fraction": 0.5722379684448242, "avg_line_length": 15.571428298950195, "blob_id": "283275dbcac9add0090c21817039915bdf658146", "content_id": "f809368cd2a2213890e1b388059a1db11ddd3756", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 353, "license_type": "permissive", "max_line_length": 46, "num_lines": 21, "path": "/MO445-descriptors/Makefile", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "LIB=./lib\nINCLUDE=./include\nSRC=./src\nOBJ=./obj\nFLAGS= -O3 -Wall\n\nlibMO445: $(LIB)/libMO445.a\n\techo \"libMO445.a built...\"\n\n$(LIB)/libMO445.a: \\\n$(OBJ)/MO445.o \\\n\n\tar csr $(LIB)/libMO445.a \\\n$(OBJ)/MO445.o \\\n\n$(OBJ)/MO445.o: $(SRC)/MO445.c\n\tgcc $(FLAGS) -c $(SRC)/MO445.c -I$(INCLUDE) \\\n\t-o $(OBJ)/MO445.o\n\nclean: \n\trm $(LIB)/lib*.a; rm $(OBJ)/*.o\n\n\n\n\n\n" }, { "alpha_fraction": 0.5110331773757935, "alphanum_fraction": 0.5371482968330383, "avg_line_length": 20.825843811035156, "blob_id": "af40a8a3fccf703da8d6f73a316e4bc3f922f025", "content_id": "3afcb9e1b54d1ee03943ed29a89c8ec9c480f514", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 101514, "license_type": "permissive", "max_line_length": 106, "num_lines": 4651, "path": "/MO445-descriptors/src/MO445.c", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "#include \"MO445.h\"\n\n/* Common operations functions ************************/\nint NCFgets(char *s, int m, FILE *f) {\n while(fgets(s,m,f)!=NULL)\n if (s[0]!='#') return 1;\n return 0;\n}\n\nint *AllocIntArray(int n)\n{\n int *v=NULL;\n v = (int *) calloc(n,sizeof(int));\n if (v == NULL)\n Error(MSG1,\"AllocIntArray\");\n return(v);\n}\n\nfloat *AllocFloatArray(int n)\n{\n float *v=NULL;\n v = (float *) calloc(n,sizeof(float));\n if (v == NULL)\n Error(MSG1,\"AllocFloatArray\");\n return(v);\n}\n\ndouble *AllocDoubleArray(int n)\n{\n double *v=NULL;\n v = (double *) calloc(n,sizeof(double));\n if (v == NULL)\n Error(MSG1,\"AllocDoubleArray\");\n return(v);\n}\n\nchar *AllocCharArray(int n)\n{\n char *v=NULL;\n v = (char *) calloc(n,sizeof(char));\n if (v == NULL)\n Error(MSG1,\"AllocCharArray\");\n return(v);\n}\n\n/* Errors functions *********************************/\nvoid Error(char *msg,char *func){ \n fprintf(stderr,\"Error:%s in %s\\n\",msg,func);\n exit(-1);\n}\n\n/*Working with images *****************************/\n/* pgm images */\nImage *CreateImage(int ncols, int nrows)\n{\n Image *img=NULL;\n int i;\n\n img = (Image *) calloc(1,sizeof(Image));\n if (img == NULL){\n Error(MSG1,\"CreateImage\");\n }\n\n img->val = AllocIntArray(nrows*ncols);\n img->tbrow = AllocIntArray(nrows);\n\n img->tbrow[0]=0;\n for (i=1; i < nrows; i++)\n img->tbrow[i]=img->tbrow[i-1]+ncols;\n img->ncols = ncols;\n img->nrows = nrows;\n \n return(img);\n}\n\nvoid DestroyImage(Image **img){\n Image *aux;\n\n aux = *img;\n if(aux != NULL){\n if (aux->val != NULL) free(aux->val); \n if (aux->tbrow != NULL) free(aux->tbrow);\n free(aux); \n *img = NULL;\n }\n}\n\nImage *ReadImage(char *filename)\n{\n FILE *fp=NULL;\n unsigned char *value=NULL;\n char type[10];\n int i,ncols,nrows,n;\n Image *img=NULL;\n char z[256];\n\n fp = fopen(filename,\"rb\");\n if (fp == NULL){\n fprintf(stderr,\"Cannot open %s\\n\",filename);\n exit(-1);\n }\n fscanf(fp,\"%s\\n\",type);\n if((strcmp(type,\"P5\")==0)){\n NCFgets(z,255,fp);\n sscanf(z,\"%d %d\\n\",&ncols,&nrows);\n n = ncols*nrows;\n NCFgets(z,255,fp);\n sscanf(z,\"%d\\n\",&i);\n value = (unsigned char *)calloc(n,sizeof(unsigned char));\n if (value != NULL){\n fread(value,sizeof(unsigned char),n,fp);\n }else{\n fprintf(stderr,\"Insufficient memory in ReadImage\\n\");\n exit(-1);\n }\n fclose(fp);\n img = CreateImage(ncols,nrows);\n for (i=0; i < n; i++)\n img->val[i]=(int)value[i];\n free(value);\n }else{\n if((strcmp(type,\"P2\")==0)){\n NCFgets(z,255,fp);\n sscanf(z,\"%d %d\\n\",&ncols,&nrows);\n n = ncols*nrows;\n NCFgets(z,255,fp);\n sscanf(z,\"%d\\n\",&i);\n img = CreateImage(ncols,nrows);\n for (i=0; i < n; i++)\n\tfscanf(fp,\"%d\",&img->val[i]);\n fclose(fp);\n }else{\n fprintf(stderr,\"Input image must be P2 or P5\\n\");\n exit(-1);\n }\n }\n\n return(img);\n}\n\nImage *MBB(Image *img)\n{\n int x,y;\n Pixel left,right;\n Image *mbb=NULL;\n \n left.x = img->ncols-1;\n left.y = img->nrows-1;\n right.x = 0;\n right.y = 0;\n \n for (y=0; y < img->nrows; y++)\n for (x=0; x < img->ncols; x++){\n if (img->val[x+img->tbrow[y]] > 0){\n\tif (x < left.x)\n\t left.x = x;\n\tif (y < left.y)\n\t left.y = y;\n\tif (x > right.x)\n\t right.x = x;\n\tif (y > right.y)\n\t right.y = y;\t\n }\n }\n \n mbb = ROI(img,left.x,left.y,right.x,right.y);\n\n return(mbb);\t\n}\n\nImage *ROI(Image *img, int xl, int yl, int xr, int yr)\n{\n int x,y,p,i;\n Image *roi=NULL;\n \n if (ValidPixel(img,xl,yl)&&ValidPixel(img,xr,yr)&&\n (xl <= xr)&&(yl <= yr) )\n {\n roi = CreateImage(xr-xl+1,yr-yl+1);\n i=0;\n for (y=yl; y <= yr; y++)\n\tfor (x=xl; x <= xr; x++){\n\t p = x + img->tbrow[y];\n\t roi->val[i] = img->val[p];\n\t i++;\n\t}\n } \n \n return(roi);\t\n}\n\nImage *AddFrame(Image *img, int sz, int value)\n{\n Image *fimg;\n int y,*dst,*src,nbytes,offset;\n\n fimg = CreateImage(img->ncols+(2*sz),img->nrows+(2*sz));\n SetImage(fimg,value);\n nbytes = sizeof(int)*img->ncols;\n offset = sz+fimg->tbrow[sz];\n for (y=0,src=img->val,dst=fimg->val+offset; y < img->nrows;y++,src+=img->ncols,dst+=fimg->ncols){\n memcpy(dst,src,nbytes);\n }\n return(fimg);\n}\n\nvoid SetImage(Image *img, int value)\n{ \n int i,n;\n n = img->ncols*img->nrows;\n for (i=0; i < n; i++){\n img->val[i]=value;\n }\n}\n\nvoid WriteImage(Image *img,char *filename)\n{\n FILE *fp;\n int i, n, Imax;\n\n fp = fopen(filename,\"wb\");\n if (fp == NULL){\n fprintf(stderr,\"Cannot open %s\\n\",filename);\n exit(-1);\n }\n n = img->ncols*img->nrows;\n if ((Imax=MaximumValue(img))==INT_MAX){\n Warning(\"Image with infinity values\",\"WriteImage\");\n Imax = INT_MIN;\n for (i=0; i < n; i++) \n if ((img->val[i] > Imax)&&(img->val[i]!=INT_MAX))\n\tImax = img->val[i];\n fprintf(fp,\"P2\\n\");\n fprintf(fp,\"%d %d\\n\",img->ncols,img->nrows);\n fprintf(fp,\"%d\\n\",Imax+1);\n } else {\n fprintf(fp,\"P2\\n\");\n fprintf(fp,\"%d %d\\n\",img->ncols,img->nrows);\n if (Imax==0) Imax++;\n fprintf(fp,\"%d\\n\",Imax);\n }\n \n for (i=0; i < n; i++) {\n if (img->val[i]==INT_MAX)\n fprintf(fp,\"%d \",Imax+1);\n else\n fprintf(fp,\"%d \",img->val[i]);\n if (((i+1)%17) == 0)\n fprintf(fp,\"\\n\");\n }\n\n fclose(fp);\n}\n\n/* ppm images */\nCImage *CreateCImage(int ncols, int nrows)\n{\n CImage *cimg=NULL;\n int i;\n\n cimg = (CImage *) calloc(1, sizeof(CImage));\n for (i=0; i < 3; i++) \n cimg->C[i] = CreateImage(ncols,nrows);\n return(cimg);\n}\n\nvoid DestroyCImage(CImage **cimg)\n{\n CImage *tmp;\n int i;\n\n tmp = *cimg;\n if (tmp != NULL) {\n for (i=0; i < 3; i++) \n DestroyImage(&(tmp->C[i]));\n free(tmp);\n *cimg = NULL;\n }\n}\n\nCImage *ReadCImage(char *filename)\n{\n CImage *cimg=NULL;\n FILE *fp=NULL;\n char type[10];\n int i,ncols,nrows,n;\n char z[256];\n\n fp = fopen(filename,\"rb\");\n if (fp == NULL){\n fprintf(stderr,\"Cannot open %s\\n\",filename);\n exit(-1);\n }\n fscanf(fp,\"%s\\n\",type);\n if((strcmp(type,\"P6\")==0)){\n NCFgets(z,255,fp);\n sscanf(z,\"%d %d\\n\",&ncols,&nrows);\n n = ncols*nrows;\n NCFgets(z,255,fp);\n sscanf(z,\"%d\\n\",&i);\n cimg = CreateCImage(ncols,nrows);\n for (i=0; i < n; i++){\n cimg->C[0]->val[i] = fgetc(fp);\n cimg->C[1]->val[i] = fgetc(fp);\n cimg->C[2]->val[i] = fgetc(fp);\n }\n fclose(fp);\n }else{\n fprintf(stderr,\"Input image must be P6\\n\");\n exit(-1);\n }\n\n return(cimg);\n}\n\n/* auxiliary functions ****************************/\nCurve *CreateCurve(int n)\n{\n Curve *curve=NULL;\n\n curve = (Curve *) calloc(1,sizeof(Curve));\n if (curve != NULL) {\n curve->X = AllocDoubleArray(n);\n curve->Y = AllocDoubleArray(n);\n curve->n = n;\n } else {\n Error(MSG1,\"CreateCurve\");\n }\n return(curve);\n}\n\nvoid DestroyCurve(Curve **curve)\n{\n Curve *aux;\n\n aux = *curve;\n if (aux != NULL){\n if (aux->X != NULL) free(aux->X);\n if (aux->Y != NULL) free(aux->Y);\n free(aux);\n *curve = NULL;\n }\n}\n\nFeatureVector1D *CreateFeatureVector1D(int n)\n{\n FeatureVector1D *desc=NULL;\n \n desc = (FeatureVector1D *) calloc(1,sizeof(FeatureVector1D));\n if (desc != NULL) {\n desc->X = AllocDoubleArray(n);\n desc->n = n;\n } else {\n Error(MSG1,\"CreateFeatureVector\");\n }\n return(desc);\n}\n\nFeatureVector1D *CurveTo1DFeatureVector(Curve *curve){\n FeatureVector1D *fv;\n\n fv = CreateFeatureVector1D(curve->n);\n memcpy(fv->X,curve->Y,curve->n*sizeof(double));\n\n return fv;\n}\n\nFeatureVector2D *CurveTo2DFeatureVector(Curve *curve){\n FeatureVector2D *fv;\n\n fv = CreateFeatureVector2D(curve->n);\n memcpy(fv->X,curve->X,curve->n*sizeof(double));\n memcpy(fv->Y,curve->Y,curve->n*sizeof(double));\n\n return fv;\n}\n\nvoid DestroyFeatureVector1D(FeatureVector1D **desc)\n{\n FeatureVector1D *aux;\n \n aux = *desc;\n if (aux != NULL){\n if (aux->X != NULL) {\n free(aux->X);\n }\n free(aux);\n *desc = NULL;\n }\n}\n\nvoid WriteFeatureVector1D(FeatureVector1D *desc,char *filename)\n{\n FILE *fp;\n int i;\n \n fp = fopen(filename,\"w\");\n if (fp == NULL){\n fprintf(stderr,\"Cannot open %s\\n\",filename);\n exit(-1);\n }\n for (i=0; i < desc->n; i++)\n fprintf(fp,\"%f\\n\",desc->X[i]);\n \n fclose(fp);\n}\n\n\nAdjRel *CreateAdjRel(int n)\n{\n AdjRel *A=NULL;\n\n A = (AdjRel *) calloc(1,sizeof(AdjRel));\n if (A != NULL){\n A->dx = AllocIntArray(n);\n A->dy = AllocIntArray(n);\n A->n = n;\n } else {\n Error(MSG1,\"CreateAdjRel\");\n }\n\n return(A);\n}\n\nvoid DestroyAdjRel(AdjRel **A)\n{\n AdjRel *aux;\n\n aux = *A;\n if (aux != NULL){\n if (aux->dx != NULL) free(aux->dx);\n if (aux->dy != NULL) free(aux->dy);\n free(aux);\n *A = NULL;\n } \n}\n\nAdjRel *Circular(float r)\n{\n AdjRel *A=NULL;\n int i,j,k,n,dx,dy,r0,r2,d,i0=0;\n float *da,*dr,aux;\n\n n=0;\n\n r0 = (int)r;\n r2 = (int)(r*r);\n for(dy=-r0;dy<=r0;dy++)\n for(dx=-r0;dx<=r0;dx++)\n if(((dx*dx)+(dy*dy)) <= r2)\n\tn++;\n\t\n A = CreateAdjRel(n);\n i=0;\n for(dy=-r0;dy<=r0;dy++)\n for(dx=-r0;dx<=r0;dx++)\n if(((dx*dx)+(dy*dy)) <= r2){\n\tA->dx[i]=dx;\n\tA->dy[i]=dy;\n\tif ((dx==0)&&(dy==0))\n\t i0 = i;\n\ti++;\n }\n\n da = AllocFloatArray(A->n);\n dr = AllocFloatArray(A->n);\n for (i=0; i < A->n; i++) {\n dx = A->dx[i];\n dy = A->dy[i];\n dr[i] = (float)sqrt((dx*dx) + (dy*dy));\n if (i != i0){ \n da[i] = atan2(-dy,-dx)*180.0/PI;\n if (da[i] < 0.0)\n\tda[i] += 360.0;\n }\n }\n da[i0] = 0.0;\n dr[i0] = 0.0;\n\n aux = da[i0];\n da[i0] = da[0];\n da[0] = aux;\n aux = dr[i0];\n dr[i0] = dr[0];\n dr[0] = aux;\n d = A->dx[i0];\n A->dx[i0] = A->dx[0];\n A->dx[0] = d;\n d = A->dy[i0];\n A->dy[i0] = A->dy[0];\n A->dy[0] = d;\n\n for (i=1; i < A->n-1; i++){\n k = i;\n for (j=i+1; j < A->n; j++)\n if (da[j] < da[k]){\n\tk = j;\n }\n aux = da[i];\n da[i] = da[k];\n da[k] = aux;\n aux = dr[i];\n dr[i] = dr[k];\n dr[k] = aux;\n d = A->dx[i];\n A->dx[i] = A->dx[k];\n A->dx[k] = d;\n d = A->dy[i];\n A->dy[i] = A->dy[k];\n A->dy[k] = d;\n }\n\n for (i=1; i < A->n-1; i++){\n k = i;\n for (j=i+1; j < A->n; j++)\n if ((dr[j] < dr[k])&&(da[j]==da[k])){\n\tk = j;\n }\n aux = dr[i];\n dr[i] = dr[k];\n dr[k] = aux;\n d = A->dx[i];\n A->dx[i] = A->dx[k];\n A->dx[k] = d;\n d = A->dy[i];\n A->dy[i] = A->dy[k];\n A->dy[k] = d;\n }\n\n free(dr);\n free(da);\n\n return(A);\n}\n\nAdjRel *LeftSide(AdjRel *A)\n{\n AdjRel *L=NULL;\n int i;\n float d;\n\n L = CreateAdjRel(A->n);\n for (i=0; i < L->n; i++){\n d = sqrt(A->dx[i]*A->dx[i] + A->dy[i]*A->dy[i]);\n if (d != 0){\n L->dx[i] = ROUND(((float)A->dx[i]/2.0)+((float)A->dy[i]/d));\n L->dy[i] = ROUND(((float)A->dy[i]/2)-((float)A->dx[i]/d));\n }\n }\n \n return(L);\n}\n\n\nAdjRel *RightSide(AdjRel *A)\n{\n AdjRel *R=NULL;\n int i;\n float d;\n\n R = CreateAdjRel(A->n);\n for (i=0; i < R->n; i++){\n d = sqrt(A->dx[i]*A->dx[i] + A->dy[i]*A->dy[i]);\n if (d != 0){\n R->dx[i] = ROUND(((float)A->dx[i]/2.0)-((float)A->dy[i]/d));\n R->dy[i] = ROUND(((float)A->dx[i]/d)+((float)A->dy[i]/2.0));\n }\n }\n\n return(R);\n}\n\nbool ValidContPoint(Image *bin, AdjRel *L, AdjRel *R, int p)\n{\n int i,q,n,left,right;\n Pixel u,v,l,r;\n bool found=false;\n\n u.x = p%bin->ncols;\n u.y = p/bin->ncols;\n n = L->n;\n\n for (i=0; i < n; i++) {\n v.x = u.x + L->dx[i];\n v.y = u.y + L->dy[i];\n if (ValidPixel(bin,v.x,v.y)){\n q = v.x + bin->tbrow[v.y];\n if ((bin->val[q]==1)&&(p!=q)){\n\tl.x = u.x + L->dx[i]; \n\tl.y = u.y + L->dy[i];\n\tr.x = u.x + R->dx[i]; \n\tr.y = u.y + R->dy[i];\t\n\tif (ValidPixel(bin,l.x,l.y))\n\t left = l.x + bin->tbrow[l.y];\n\telse\n\t left = -1;\n\tif (ValidPixel(bin,r.x,r.y))\n\t right = r.x + bin->tbrow[r.y];\n\telse\n\t right = -1;\n\tif (((left!=-1)&&(right!=-1)&&(bin->val[left]!=bin->val[right]))||\n\t ((left==-1)&&(right!=-1)&&(bin->val[right]==1)) ||\n\t ((right==-1)&&(left!=-1)&&(bin->val[left]==1))){\n\t found = true;\n\t break;\n\t}\n }\n }\n }\n \n return(found);\n}\n\nbool ValidPixel(Image *img, int x, int y)\n{\n if ((x >= 0)&&(x < img->ncols)&&\n (y >= 0)&&(y < img->nrows))\n return(true);\n else\n return(false);\n}\n\nImage *LabelContPixel(Image *bin)\n{\n Image *bndr=NULL;\n Image *color=NULL,*pred=NULL,*label=NULL;\n int p=0,q,r,i,j,n,left=0,right=0,*LIFO,last,l;\n AdjRel *A,*L,*R;\n Pixel u,v,w;\n \n A = Circular(1.0);\n n = bin->ncols*bin->nrows;\n bndr = CreateImage(bin->ncols,bin->nrows);\n for (p=0; p < n; p++){\n if (bin->val[p]==1){\n u.x = p%bin->ncols;\n u.y = p/bin->ncols;\n for (i=1; i < A->n; i++){\n\tv.x = u.x + A->dx[i];\n\tv.y = u.y + A->dy[i];\n\tif (ValidPixel(bin,v.x,v.y)){\n\t q = v.x + bin->tbrow[v.y];\n\t if (bin->val[q]==0){\n\t bndr->val[p]=1;\n\t break;\n\t }\n\t} else {\n\t bndr->val[p]=1;\n\t break;\n\t}\n }\n }\n } \n DestroyAdjRel(&A);\n\n A = Circular(1.5);\n L = LeftSide(A);\n R = RightSide(A);\n label = CreateImage(bndr->ncols,bndr->nrows);\n color = CreateImage(bndr->ncols,bndr->nrows);\n pred = CreateImage(bndr->ncols,bndr->nrows);\n n = bndr->ncols*bndr->nrows;\n LIFO = AllocIntArray(n);\n last = NIL;\n for (j=0; j < n; j++){\n if ((bndr->val[j]==1)\n\t&&(color->val[j]!=BLACK)\n\t&&ValidContPoint(bin,L,R,j)){\n last++;\n LIFO[last] = j;\n color->val[j] = GRAY;\n pred->val[j] = j;\n while(last != NIL){\n\tp = LIFO[last]; last--;\t\n\tcolor->val[p]=BLACK;\n\tu.x = p%bndr->ncols;\n\tu.y = p/bndr->ncols;\n\tfor (i=1; i < A->n; i++){\n\t v.x = u.x + A->dx[i];\n\t v.y = u.y + A->dy[i];\n\t if (ValidPixel(bndr,v.x,v.y)){\n\t q = v.x + bndr->tbrow[v.y];\n\t if ((q==j)&&(pred->val[p]!=j)){\n\t last = NIL;\n\t break;\n\t }\n\t \n\t w.x = u.x + L->dx[i]; \n\t w.y = u.y + L->dy[i];\n\t if (ValidPixel(bndr,w.x,w.y))\n\t left = w.x + bndr->tbrow[w.y];\n\t else\n\t left = -1;\n\t w.x = u.x + R->dx[i]; \n\t w.y = u.y + R->dy[i];\n\t if (ValidPixel(bndr,w.x,w.y))\n\t right = w.x + bndr->tbrow[w.y];\n\t else\n\t right = -1;\n\t \n\t if ((bndr->val[q]==1)&&\n\t\t(color->val[q] != BLACK)&&\n\t\t(((left!=-1)&&(right!=-1)&&(bin->val[left]!=bin->val[right]))||\n\t\t ((left==-1)&&(right!=-1)&&(bin->val[right]==1)) ||\n\t\t ((right==-1)&&(left!=-1)&&(bin->val[left]==1)))){ \n\t pred->val[q] = p;\n\t if (color->val[q] == WHITE){\n\t\tlast++;\n\t\tLIFO[last] = q;\n\t\tcolor->val[q]=GRAY;\n\t }\n\t } \n\t }\n\t}\t\n }\n r = p;\n l = 1;\n while(pred->val[p]!=p){\n\tlabel->val[p] = l;\n\tp = pred->val[p];\n\tl++;\n }\n if (r != p) {\n\tlabel->val[p] = l;\n }\n }\n }\n\n DestroyAdjRel(&A);\n DestroyAdjRel(&L);\n DestroyAdjRel(&R);\n DestroyImage(&bndr);\n DestroyImage(&color);\n DestroyImage(&pred);\n free(LIFO);\n return(label);\n}\n\nCurve3D *CreateCurve3D(int n)\n{\n Curve3D *curve=NULL;\n\n curve = (Curve3D *) calloc(1,sizeof(Curve3D));\n if (curve != NULL) {\n curve->X = AllocDoubleArray(n);\n curve->Y = AllocDoubleArray(n);\n curve->Z = AllocDoubleArray(n);\n curve->n = n;\n } else {\n Error(MSG1,\"CreateCurve3D\");\n }\n return(curve);\n}\n\nvoid DestroyCurve3D(Curve3D **curve)\n{\n Curve3D *aux;\n\n aux = *curve;\n if (aux != NULL){\n if (aux->X != NULL) free(aux->X);\n if (aux->Y != NULL) free(aux->Y);\n if (aux->Z != NULL) free(aux->Z);\n free(aux);\n *curve = NULL;\n }\n}\n\nvoid SortCurve3D(Curve3D *curve, int left, int right, char order)\n{\n int pivot;\n \n if (left < right) {\n pivot = PartCurve3D(curve,left,right,order);\n SortCurve3D(curve,left,pivot-1,order);\n SortCurve3D(curve,pivot+1,right,order); \n }\n}\n\nint PartCurve3D (Curve3D *curve, int left, int right, char order)\n{\n double z;\n int i;\n double X,Y,Z;\n \n z = curve->Z[left];\n i = left;\n \n do {\n if (order == INCREASING){\n while ((curve->Z[left] <= z)&&(left <= right)) left++;\n while (curve->Z[right] > z) right--;\n } else { /* order = DECREASING */\n while ((curve->Z[left] >= z)&&(left <= right)) left++;\n while (curve->Z[right] < z) right--;\n }\n if (left < right){\n X = curve->X[left];\n Y = curve->Y[left];\n Z = curve->Z[left];\n curve->X[left] = curve->X[right];\n curve->Y[left] = curve->Y[right];\n curve->Z[left] = curve->Z[right];\n curve->X[right] = X;\n curve->Y[right] = Y;\n curve->Z[right] = Z;\n left++; right--;\n }\n } while (left <= right);\n\n left = i;\n\n if (left != right){\n X = curve->X[left];\n Y = curve->Y[left];\n Z = curve->Z[left];\n curve->X[left] = curve->X[right];\n curve->Y[left] = curve->Y[right];\n curve->Z[left] = curve->Z[right];\n curve->X[right] = X;\n curve->Y[right] = Y;\n curve->Z[right] = Z;\n }\n\n return (right);\n}\n\nint FFT(int dir, long nn, double *x, double *y)\n{\n int m;\n long i,i1,j,k,i2,l,l1,l2;\n double c1,c2,tx,ty,t1,t2,u1,u2,z;\n\n m = (int)(log(nn)/log(2)+.00001);\n \n i2 = nn >> 1;\n j = 0;\n for (i=0;i<nn-1;i++) {\n if (i < j) {\n tx = x[i];\n ty = y[i];\n x[i] = x[j];\n y[i] = y[j];\n x[j] = tx;\n y[j] = ty;\n }\n k = i2;\n while (k <= j) {\n j -= k;\n k >>= 1;\n }\n j += k;\n }\n\n c1 = -1.0;\n c2 = 0.0;\n l2 = 1;\n for (l=0;l<m;l++) {\n l1 = l2;\n l2 <<= 1;\n u1 = 1.0;\n u2 = 0.0;\n for (j=0;j<l1;j++) {\n for (i=j;i<nn;i+=l2) {\n i1 = i + l1;\n t1 = u1 * x[i1] - u2 * y[i1];\n t2 = u1 * y[i1] + u2 * x[i1];\n x[i1] = x[i] - t1;\n y[i1] = y[i] - t2;\n x[i] += t1;\n y[i] += t2;\n }\n z = u1 * c1 - u2 * c2;\n u2 = u1 * c2 + u2 * c1;\n u1 = z;\n }\n c2 = sqrt((1.0 - c1) / 2.0);\n if (dir == 1)\n c2 = -c2;\n c1 = sqrt((1.0 + c1) / 2.0);\n }\n\n if (dir == -1) {\n for (i=0;i<nn;i++) {\n x[i] /= (double)nn;\n y[i] /= (double)nn;\n }\n }\n\n return(0);\n}\n\nImage *LabelContour(Image *bin)\n{\n Image *bndr=NULL;\n Image *color=NULL,*pred=NULL,*label=NULL;\n int p=0,q,r,i,j,left=0,right=0,n,*LIFO,last,l=1;\n AdjRel *A,*L,*R;\n Pixel u,v,w;\n \n A = Circular(1.0);\n n = bin->ncols*bin->nrows;\n bndr = CreateImage(bin->ncols,bin->nrows);\n for (p=0; p < n; p++){\n if (bin->val[p]==1){\n\t\t \n u.x = p%bin->ncols;\n u.y = p/bin->ncols;\n for (i=1; i < A->n; i++){\n\tv.x = u.x + A->dx[i];\n\tv.y = u.y + A->dy[i];\n\tif (ValidPixel(bin,v.x,v.y)){\n\t q = v.x + bin->tbrow[v.y];\n\t if (bin->val[q]==0){\n\t bndr->val[p]=1;\n\t break;\n\t }\n\t} else {\n\t bndr->val[p]=1;\n\t break;\n\t}\n }\n }\n }\n DestroyAdjRel(&A);\n\n A = Circular(1.5);\n L = LeftSide(A);\n R = RightSide(A);\n label = CreateImage(bndr->ncols,bndr->nrows);\n color = CreateImage(bndr->ncols,bndr->nrows);\n pred = CreateImage(bndr->ncols,bndr->nrows);\n LIFO = AllocIntArray(n);\n last = NIL;\n for (j=0; j < n; j++){\n if ((bndr->val[j]==1)&&\n\t(color->val[j]!=BLACK)&&\n\tValidContPoint(bin,L,R,j)){ \n last++; LIFO[last] = j;\n color->val[j] = GRAY;\n pred->val[j] = j;\n while(last != NIL){\n\tp = LIFO[last];\tlast--;\t\n\tcolor->val[p]=BLACK;\n\tu.x = p%bndr->ncols;\n\tu.y = p/bndr->ncols;\n\tfor (i=1; i < A->n; i++){\n\t v.x = u.x + A->dx[i];\n\t v.y = u.y + A->dy[i];\n\t if (ValidPixel(bndr,v.x,v.y)){\n\t q = v.x + bndr->tbrow[v.y];\n\t if ((q==j)&&(pred->val[p]!=j)){\n\t last = NIL;\n\t break;\n\t }\n\t w.x = u.x + L->dx[i]; \n\t w.y = u.y + L->dy[i];\n\t if (ValidPixel(bndr,w.x,w.y))\n\t left = w.x + bndr->tbrow[w.y];\n\t else\n\t left = -1;\n\t w.x = u.x + R->dx[i]; \n\t w.y = u.y + R->dy[i];\n\t if (ValidPixel(bndr,w.x,w.y))\n\t right = w.x + bndr->tbrow[w.y];\n\t else\n\t right = -1;\n\t \n\t if ((bndr->val[q]==1)&&\n\t\t(color->val[q] != BLACK)&&\n\t\t(((left!=-1)&&(right!=-1)&&(bin->val[left]!=bin->val[right]))||\n\t\t ((left==-1)&&(right!=-1)&&(bin->val[right]==1)) ||\n\t\t ((right==-1)&&(left!=-1)&&(bin->val[left]==1))) ) {\n\t pred->val[q] = p;\n\t if (color->val[q] == WHITE){\n\t\tlast++; LIFO[last] = q;\n\t\tcolor->val[q]=GRAY;\n\t }\n\t } \n\t }\n\t}\t\n }\n r = p;\n while(pred->val[p]!=p){\n\tlabel->val[p] = l;\n\tp = pred->val[p];\n } \n if (r != p){ \n\tlabel->val[p] = l;\n\tl++;\n }\n }\n }\n\n DestroyAdjRel(&A);\n DestroyAdjRel(&L);\n DestroyAdjRel(&R);\n DestroyImage(&bndr);\n DestroyImage(&color);\n DestroyImage(&pred);\n free(LIFO);\n return(label);\n}\n\nvoid Warning(char *msg,char *func){ \n fprintf(stdout,\"Warning:%s in %s\\n\",msg,func);\n}\n\nImage *Scale(Image *img, float Sx, float Sy) \n{\n float S[2][2],x,y,d1,d2,d3,d4,Ix1,Ix2,If;\n Image *scl;\n Pixel u,v,prev,next;\n \n if (Sx == 0.0) Sx = 1.0;\n if (Sy == 0.0) Sy = 1.0;\n\n S[0][0] = 1.0/Sx;\n S[0][1] = 0;\n S[1][0] = 0;\n S[1][1] = 1.0/Sy;\n\n scl = CreateImage((int)(img->ncols*fabs(Sx) + 0.5),(int)(img->nrows*fabs(Sy) + 0.5)); \n\n for (v.y=0; v.y < scl->nrows; v.y++)\n for (v.x=0; v.x < scl->ncols; v.x++){\n x = ((v.x-scl->ncols/2.)*S[0][0] + (v.y-scl->nrows/2.)*S[0][1]) \n\t+ img->ncols/2.;\n y = ((v.x-scl->ncols/2.)*S[1][0] + (v.y-scl->nrows/2.)*S[1][1]) \n\t+ img->nrows/2.;\n u.x = (int)(x+0.5);\n u.y = (int)(y+0.5);\n if (ValidPixel(img,u.x,u.y)){\n\tif (x < u.x) {\n\t next.x = u.x;\n\t prev.x = u.x - 1;\n\t} else {\n\t next.x = u.x + 1;\n\t prev.x = u.x;\n\t}\n\td1 = next.x - x;\n\td2 = x - prev.x;\n\tif (y < u.y) {\n\t next.y = u.y;\n\t prev.y = u.y - 1;\n\t} else {\n\t next.y = u.y + 1;\n\t prev.y = u.y;\n\t}\n\td3 = next.y - y;\n\td4 = y - prev.y;\n\n\tif (ValidPixel(img,prev.x,prev.y)&&ValidPixel(img,next.x,prev.y))\n\t Ix1 = d1*img->val[prev.x+img->tbrow[prev.y]] + \n\t d2*img->val[next.x+img->tbrow[prev.y]];\n\telse\n\t Ix1 = img->val[u.x+img->tbrow[u.y]];\n\n\tif (ValidPixel(img,prev.x,next.y)&&ValidPixel(img,next.x,next.y))\n\t Ix2 = d1*img->val[prev.x+img->tbrow[next.y]] + \n\t d2*img->val[next.x+img->tbrow[next.y]];\n\telse\n\t Ix2 = img->val[u.x+img->tbrow[u.y]];\n\t\n\tIf = d3*Ix1 + d4*Ix2;\n\n\tscl->val[v.x+scl->tbrow[v.y]] = (int)If;\n }\n }\n \n return(scl);\n}\n\nDImage *CreateDImage(int ncols, int nrows)\n{\n DImage *dimg=NULL;\n int i;\n\n dimg = (DImage *) calloc(1,sizeof(DImage));\n if (dimg == NULL){\n Error(MSG1,\"CreateDImage\");\n }\n\n dimg->val = AllocDoubleArray(nrows*ncols);\n dimg->tbrow = AllocIntArray(nrows);\n\n dimg->tbrow[0]=0;\n for (i=1; i < nrows; i++)\n dimg->tbrow[i]=dimg->tbrow[i-1]+ncols;\n dimg->ncols = ncols;\n dimg->nrows = nrows;\n \n return(dimg);\n}\n\nvoid DestroyDImage(DImage **dimg)\n{\n DImage *aux;\n\n aux = *dimg;\n if(aux != NULL){\n if (aux->val != NULL) free(aux->val); \n if (aux->tbrow != NULL) free(aux->tbrow);\n free(aux); \n *dimg = NULL;\n }\n}\n\nImage *CopyImage(Image *img)\n{\n Image *imgc;\n\n imgc = CreateImage(img->ncols,img->nrows);\n memcpy(imgc->val,img->val,img->ncols*img->nrows*sizeof(int));\n \n return(imgc);\n}\n\nint MaximumValue(Image *img)\n{\n unsigned int i, n, r;\n int max;\n\n max = img->val[0];\n n = img->ncols*img->nrows - 1;\n r = n%4;\n n -= r;\n for (i=1; i < n; i+=4) {\n if (img->val[i] > max)\n max = img->val[i];\n if (img->val[i+1] > max)\n max = img->val[i+1];\n if (img->val[i+2] > max)\n max = img->val[i+2];\n if (img->val[i+3] > max)\n max = img->val[i+3];\n }\n while (r != 0) {\n if (img->val[i+r-1] > max)\n max = img->val[i+r-1];\n --r;\n }\n\n return(max);\n}\n\nQueue *CreateQueue(int nbuckets, int nelems)\n{\n Queue *Q=NULL;\n\n Q = (Queue *) malloc(1*sizeof(Queue));\n \n if (Q != NULL) {\n Q->C.first = (int *)malloc(nbuckets * sizeof(int));\n Q->C.last = (int *)malloc(nbuckets * sizeof(int));\n Q->C.nbuckets = nbuckets;\n if ( (Q->C.first != NULL) && (Q->C.last != NULL) ){\n Q->L.elem = (Node *)malloc(nelems*sizeof(Node));\n Q->L.nelems = nelems;\n if (Q->L.elem != NULL){\n\tResetQueue(Q);\n } else\n\tError(MSG1,\"CreateQueue\");\t\n } else\n Error(MSG1,\"CreateQueue\");\n } else \n Error(MSG1,\"CreateQueue\");\n \n return(Q);\n}\n\nvoid DestroyQueue(Queue **Q)\n{\n Queue *aux;\n\n aux = *Q;\n if (aux != NULL) {\n if (aux->C.first != NULL) free(aux->C.first);\n if (aux->C.last != NULL) free(aux->C.last);\n if (aux->L.elem != NULL) free(aux->L.elem);\n free(aux);\n *Q = NULL;\n }\n}\n\nvoid InsertQueue(Queue *Q, int bucket, int elem)\n{\n if (Q->C.first[bucket] == NIL){ \n Q->C.first[bucket] = elem; \n Q->L.elem[elem].prev = NIL;\n }else {\n Q->L.elem[Q->C.last[bucket]].next = elem;\n Q->L.elem[elem].prev = Q->C.last[bucket];\n }\n \n Q->C.last[bucket] = elem;\n Q->L.elem[elem].next = NIL;\n Q->L.elem[elem].color = GRAY;\n}\n\nint RemoveQueue(Queue *Q)\n{\n int elem=NIL, next, prev;\n int last;\n\n /** moves to next element or returns EMPTY queue **/\n if (Q->C.first[Q->C.current] == NIL) {\n last = Q->C.current;\n \n Q->C.current = (Q->C.current + 1) % (Q->C.nbuckets);\n \n while ((Q->C.first[Q->C.current] == NIL) && (Q->C.current != last)) {\n Q->C.current = (Q->C.current + 1) % (Q->C.nbuckets);\n }\n \n if (Q->C.first[Q->C.current] == NIL)\n return NIL;\n }\n\n if (Q->C.tiebreak == LIFOBREAK) {\n elem = Q->C.last[Q->C.current];\n \n prev = Q->L.elem[elem].prev;\n if (prev == NIL) { /* there was a single element in the list */\n Q->C.last[Q->C.current] = Q->C.first[Q->C.current] = NIL; \n }\n else {\n Q->C.last[Q->C.current] = prev;\n Q->L.elem[prev].next = NIL;\n }\n } else { /* Assume FIFO policy for breaking ties */\n elem = Q->C.first[Q->C.current];\n \n next = Q->L.elem[elem].next;\n if (next == NIL) { /* there was a single element in the list */\n Q->C.first[Q->C.current] = Q->C.last[Q->C.current] = NIL; \n }\n else {\n Q->C.first[Q->C.current] = next;\n Q->L.elem[next].prev = NIL;\n }\n }\n\n Q->L.elem[elem].color = BLACK;\n\n return elem;\n}\n\nvoid UpdateQueue(Queue *Q, int elem, int from, int to)\n{\n RemoveQueueElem(Q, elem, from);\n InsertQueue(Q, to, elem);\n}\n\nint EmptyQueue(Queue *Q)\n{\n int last;\n if (Q->C.first[Q->C.current] != NIL)\n return 0;\n \n last = Q->C.current;\n \n Q->C.current = (Q->C.current + 1) % (Q->C.nbuckets);\n \n while ((Q->C.first[Q->C.current] == NIL) && (Q->C.current != last)) {\n Q->C.current = (Q->C.current + 1) % (Q->C.nbuckets); \n }\n \n return (Q->C.first[Q->C.current] == NIL);\n}\n\nvoid ResetQueue(Queue *Q)\n{\n unsigned int i;\n\n Q->C.current = 0;\n SetTieBreak(Q,FIFOBREAK);\n#if defined(__i386__) && (NIL==-1 || NIL==0)\n i = Q->C.nbuckets*sizeof(int);\n memset(Q->C.first, NIL, i);\n memset(Q->C.last, NIL, i);\n memset(Q->L.elem, NIL, Q->L.nelems*sizeof(Node));\n for (i=0; i<Q->L.nelems; ++i)\n Q->L.elem[i].color = WHITE;\n#else\n for (i=0; i < Q->C.nbuckets; ++i)\n Q->C.first[i]=Q->C.last[i]=NIL;\n for (i=0; i < Q->L.nelems; ++i) {\n Q->L.elem[i].next = Q->L.elem[i].prev = NIL;\n Q->L.elem[i].color = WHITE;\n }\n#endif\n}\n\nvoid RemoveQueueElem(Queue *Q, int elem, int bucket)\n{\n int prev,next;\n\n prev = Q->L.elem[elem].prev;\n next = Q->L.elem[elem].next;\n \n /* if elem is the first element */\n if (Q->C.first[bucket] == elem) {\n Q->C.first[bucket] = next;\n if (next == NIL) /* elem is also the last one */\n Q->C.last[bucket] = NIL;\n else\n Q->L.elem[next].prev = NIL;\n }\n else{ /* elem is in the middle or it is the last */\n Q->L.elem[prev].next = next;\n if (next == NIL) /* if it is the last */\n Q->C.last[bucket] = prev;\n else \n Q->L.elem[next].prev = prev;\n }\n\n Q->L.elem[elem].color = BLACK;\n}\n\nCurve *SamplePolynom(Polynom *P, double from, double to, int nbins)\n{\n Curve *curve=NULL;\n double x = from,val;\n double inc = (to-from)/nbins;\n int i,p;\n \n if ((from <= to)&&(nbins > 0)) {\n curve = CreateCurve(nbins);\n for (p=0; p < nbins; p++){\n val=0.0;\n for (i=0; i <= P->n; i++)\n\tval += pow(x,i)*P->coef[i];\n curve->X[p] = x;\n curve->Y[p] = val;\n x +=inc;\n }\n }\n return(curve);\n}\n\nPolynom *CreatePolynom(int degree)\n{\n Polynom *P=NULL;\n\n P = (Polynom *) calloc(1,sizeof(Polynom)); \n P->coef = AllocDoubleArray(degree + 1);\n P->n = degree;\n return(P);\n}\n\nvoid DestroyPolynom(Polynom **P)\n{\n Polynom *aux=NULL;\n\n aux = *P;\n if(aux != NULL){\n if(aux->coef != NULL) free(aux->coef);\n free(aux);\n *P = NULL;\n }\n}\n\nPolynom *DerivPolynom(Polynom *P)\n{\n Polynom *D;\n int i,j;\n\n D = CreatePolynom(P->n-1);\n j = 0;\n for (i=1; i <= P->n; i++){\n D->coef[j] = i*P->coef[i];\n j++;\n }\n return(D);\n}\n\n\nPolynom *Regression(Curve *curve, int degree)\n{\n Polynom *P=NULL;\n double *A=NULL,*B=NULL;\n int i,j,k;\n double sum, m;\n\n /* Compute Non-Linear System: A*P=B, where P are the coefficients of\n the polynomial */\n\n A = AllocDoubleArray((degree+1)*(degree+1));\n B = AllocDoubleArray(degree+1);\n\n for (i=1; i<= 2*degree; i++){\n sum = 0.0;\n for (k=0; k < curve->n; k++){\n sum += pow(curve->X[k], i);\n }\n if (i<=degree){\n for (j=0; j<=i; j++){ \n\tA[(i-j) + j*(degree+1)] = sum;\n }\n }\n else {\n for (j= (i-degree); j<= degree; j++){ \n\tA[(i-j) + j*(degree+1)] = sum;\n }\n }\n }\n A[0]= curve->n;\n \n for (i=0; i<=degree; i++){\n sum = 0.0;\n for (k=0; k < curve->n; k++){\n sum += pow(curve->X[k], i)*curve->Y[k];\n }\n B[i] = sum;\n }\n\n /* Gauss's Regression Method */\n\n for(k = 0; k < degree; k++){ /* Triangulation of A */\n for(i = k+1; i<= degree; i++){\n m = A[i*(degree+1)+k]/A[k*(degree+1)+k];\n A[i*(degree+1)+k] = 0.0;\n for (j = k+1; j<= degree; j++){\n\tA[i*(degree+1)+j] = A[i*(degree+1)+j] - m *A[k*(degree+1)+j];\n }\n B[i] = B[i] - m * B[k];\n }\n }\n \n P = CreatePolynom(degree);\n\n P->coef[degree] = B[degree]/A[degree*(degree+1) + degree];\n for(k=degree-1; k>=0; k--){\n sum= 0.0;\n for(j=k+1; j<=degree; j++){\n sum += A[k*(degree+1)+j]*P->coef[j];\n }\n P->coef[k] = (B[k]-sum)/A[k*(degree+1)+k];\n }\n \n free(A);\n free(B);\n \n return(P);\n}\n\nvoid DeAnnotate(AnnImg **aimg)\n{\n AnnImg *aux;\n \n aux = *aimg;\n if (aux != NULL){\n DestroyImage(&(aux->cost));\n DestroyImage(&(aux->label));\n DestroyImage(&(aux->pred));\n DestroySet(&(aux->seed));\n free(aux);\n *aimg = NULL;\n }\n}\n\nPolynom *MSFractal(Image *bin, \n\t\t int maxdist, \n\t\t int degree, \n\t\t double lower, \n\t\t double higher,\n\t\t int reg,\n\t\t double from,\n\t\t double to)\n{\n Curve *hist=NULL,*haux=NULL,*ahist=NULL, *aux_ahist=NULL,*loglog=NULL;\n AnnImg *aimg=NULL;\n AdjRel *A=NULL;\n Image *mbb=NULL,*nbin=NULL;\n Polynom *P=NULL,*D=NULL;\n int n,i,j,maxcost=maxdist*maxdist;\n \n mbb = MBB(bin);\n nbin = AddFrame(mbb,maxdist,0);\n DestroyImage(&mbb);\n\n /* Compute Euclidean IFT */\n\n A = Circular(1.5);\n aimg = Annotate(nbin,NULL,nbin);\n iftDilation(aimg, A);\n DestroyAdjRel(&A);\n\n /* Compute MS Fractal */\n\n hist = Histogram(aimg->cost);\n\n /* Compute non-zero points */\n\n n = 0;\n for (i=1; i < maxcost; i++)\n if (hist->Y[i] != 0)\n n++;\n\n haux = CreateCurve(n);\n j=0;\n for (i=1; i < maxcost; i++)\n if (hist->Y[i] != 0){\n haux->X[j] = log(sqrt((double)i));\n haux->Y[j] = hist->Y[i];\n j++;\n }\n \n /* Accumulate values */\n ahist = CreateCurve(n);\n ahist->X[0] = haux->X[0];\n ahist->Y[0] = haux->Y[0];\n for (i=1; i < n; i++) {\n ahist->X[i] = haux->X[i];\n ahist->Y[i] = ahist->Y[i-1] + haux->Y[i];\n }\n\n /* Compute log(Y) */\n for (i=0; i < n; i++)\n ahist->Y[i] = log((double)ahist->Y[i]);\n \n j=0;\n \n for (i=0; i < n; i++)\n if ((ahist->X[i]>from)&&((ahist->X[i]<to)))\n j++;\n \n aux_ahist = CreateCurve(j);\n \n j=0;\n for (i=0; i < n; i++)\n if ((ahist->X[i]>from)&&((ahist->X[i]<to))){\n aux_ahist->X[j] = ahist->X[i];\n aux_ahist->Y[j] = ahist->Y[i];\n j++;\n }\n \n \n /* Compute Regression */\n P = Regression(/*ahist*/aux_ahist,degree);\n \n /* Print loglog curve */\n if (reg){\n loglog = SamplePolynom(P,lower, higher, 100);\n }\n \n /* Compute Fractal Curve */\n D = DerivPolynom(P); \n\n DestroyCurve(&hist);\n DestroyCurve(&haux);\n DestroyCurve(&ahist);\n DestroyCurve(&aux_ahist);\n DestroyCurve(&loglog);\n DestroyPolynom(&P);\n DeAnnotate(&aimg);\n DestroyImage(&nbin);\n return(D);\n}\n\nAnnImg *Annotate(Image *img, Image *cost, Image *label)\n{\n AnnImg *aimg=NULL;\n int p,n;\n\n aimg = (AnnImg *) calloc(1,sizeof(AnnImg));\n if (aimg == NULL)\n Error(MSG1,\"Annotate\");\n\n aimg->img = img;\n aimg->cost = CreateImage(img->ncols,img->nrows); \n aimg->label = CreateImage(img->ncols,img->nrows);\n aimg->pred = CreateImage(img->ncols,img->nrows);\n aimg->root = NULL;\n aimg->seed = NULL;\n\n n = img->ncols*img->nrows;\n\n if ((cost == NULL)&&(label == NULL))\n for (p=0; p < n; p++){\n aimg->cost->val[p] = INT_MAX;\n aimg->label->val[p] = 0;\n aimg->pred->val[p] = p;\n }\n else\n if ((cost == NULL)&&(label != NULL))\n for (p=0; p < n; p++){\n\taimg->pred->val[p] = p;\n\tif (label->val[p] > 0) {\n\t aimg->cost->val[p] = 0;\n\t aimg->label->val[p] = label->val[p];\n\t InsertSet(&(aimg->seed),p);\n\t} else {\n\t aimg->cost->val[p] = INT_MAX;\n\t aimg->label->val[p] = 0;\n\t}\n }\n else\n if ((cost != NULL)&&(label == NULL))\n\tfor (p=0; p < n; p++){\n\t aimg->cost->val[p] = cost->val[p];\n\t aimg->label->val[p] = p;\n\t aimg->pred->val[p] = p;\n\t InsertSet(&(aimg->seed),p);\n\t}\n else\n\tif ((cost != NULL)&&(label != NULL))\t\n\t for (p=0; p < n; p++){\n\t aimg->pred->val[p] = p;\n\t aimg->label->val[p] = label->val[p];\n\t if (label->val[p] > 0) {\n\t aimg->cost->val[p] = cost->val[p];\n\t InsertSet(&(aimg->seed),p);\n\t } else { \n\t aimg->cost->val[p] = INT_MAX;\n\t }\n\t }\n \n return(aimg);\n}\n\nvoid InsertSet(Set **S, int elem)\n{\n Set *p=NULL;\n\n p = (Set *) calloc(1,sizeof(Set));\n if (p == NULL) Error(MSG1,\"InsertSet\");\n if (*S == NULL){\n p->elem = elem;\n p->next = NULL;\n }else{\n p->elem = elem;\n p->next = *S;\n }\n *S = p;\n}\n\nint RemoveSet(Set **S)\n{\n Set *p;\n int elem=NIL;\n \n if (*S != NULL){\n p = *S;\n elem = p->elem;\n *S = p->next;\n //printf(\"RemoveSet before free\");\n free(p);\n //printf(\" RemoveSet after free: elem is %d\\n\",elem);\n //if(*S != NULL) printf(\" *S->elem is %d\\n\",(*S)->elem);\n }\n return(elem);\n}\n\nvoid DestroySet(Set **S)\n{\n Set *p;\n while(*S != NULL){\n p = *S;\n *S = p->next;\n free(p);\n }\n}\n\nint FrameSize(AdjRel *A)\n{\n int sz=INT_MIN,i=0;\n\n for (i=0; i < A->n; i++){\n if (fabs(A->dx[i]) > sz) \n sz = fabs(A->dx[i]);\n if (fabs(A->dy[i]) > sz) \n sz = fabs(A->dy[i]);\n }\n return(sz);\n}\n\nAdjRel *ComplAdj(AdjRel *A1, AdjRel *A2)\n{\n AdjRel *A;\n int i,j,n;\n char *subset=NULL;\n\n if (A1->n > A2->n){\n A = A1;\n A1 = A2;\n A2 = A;\n }\n\n A = NULL;\n subset = AllocCharArray(A2->n);\n n = 0;\n for (i=0; i < A1->n; i++) \n for (j=0; j < A2->n; j++)\n if ((A1->dx[i]==A2->dx[j])&&(A1->dy[i]==A2->dy[j])){\t \n\tsubset[j] = 1;\n\tn++;\n\tbreak;\n }\n n = A2->n - n;\n \n if (n == 0) /* A1 == A2 */ \n return(NULL);\n\n A = CreateAdjRel(n);\n j=0;\n for (i=0; i < A2->n; i++) \n if (subset[i] == 0){\n A->dx[j] = A2->dx[i];\n A->dy[j] = A2->dy[i];\n j++;\n }\n\n free(subset);\n\n return(A);\n}\n\nHeap *CreateHeap(int n, int *cost){\n Heap *H=NULL;\n int i;\n\n if (cost == NULL){\n Warning(\"Cannot create heap without cost map\",\"CreateHeap\");\n return(NULL);\n }\n\n H = (Heap *) calloc(1,sizeof(Heap));\n if (H != NULL) {\n H->n = n;\n H->cost = cost;\n H->color = (char *) calloc(n,sizeof(char));\n H->pixel = (int *) calloc(n,sizeof(int));\n H->pos = (int *) calloc(n,sizeof(int));\n H->last = -1;\n if ((H->color == NULL) || (H->pos == NULL) || (H->pixel == NULL))\n Error(MSG1,\"CreateHeap\");\n for (i=0; i < H->n; i++){\n H->color[i]=WHITE;\n H->pos[i]=-1;\n H->pixel[i]=-1;\n } \n } \n else\n Error(MSG1,\"CreateHeap\");\n \n return(H);\n}\n\nvoid DestroyHeap(Heap **H){\n Heap *aux;\n \n aux = *H;\n if (aux != NULL) {\n if (aux->pixel != NULL) free(aux->pixel);\n if (aux->color != NULL) free(aux->color);\n if (aux->pos != NULL) free(aux->pos);\n free(aux);\n *H = NULL;\n }\n}\n\nbool InsertHeap(Heap *H, int pixel){\n if (!IsFullHeap(H)){\n H->last ++;\n H->pixel[H->last] = pixel;\n H->color[pixel] = GRAY;\n H->pos[pixel] = H->last;\n GoUpHeap(H,H->last); \n return(true);\n } else \n return(false);\n}\n\nbool RemoveHeap(Heap *H, int *pixel){\n if (!IsEmptyHeap(H)){\n *pixel = H->pixel[0];\n H->pos[*pixel]=-1;\n H->color[*pixel] = BLACK;\n H->pixel[0] = H->pixel[H->last];\n H->pos[H->pixel[0]] = 0;\n H->last--;\n GoDownHeap(H,0);\n return(true);\n } else \n return(false);\n}\n\nbool IsEmptyHeap(Heap *H){\n if (H->last == -1)\n return(true);\n else\n return(false);\n}\n\nvoid GoDownHeap(Heap *H, int i){\n int least, left=HEAP_LEFTSON(i), right=HEAP_RIGHTSON(i);\n\n if ((left <= H->last)&&(H->cost[H->pixel[left]] < H->cost[H->pixel[i]]))\n least = left;\n else\n least = i;\n\n if ((right <= H->last)&&(H->cost[H->pixel[right]] < H->cost[H->pixel[least]]))\n least = right;\n\n if (least != i){\n Change(&H->pixel[least],&H->pixel[i]);\n H->pos[H->pixel[i]]=i;\n H->pos[H->pixel[least]]=least;\n GoDownHeap(H,least);\n }\n}\n\nbool IsFullHeap(Heap *H){\n if (H->last == (H->n-1))\n return(true);\n else\n return(false);\n}\n\nbool HeapIsEmpty(Heap *H){\n return IsEmptyHeap(H);\n}\n\nvoid Change(int *a, int *b){\n int c; \n c = *a;\n *a = *b;\n *b = c;\n}\n\nvoid GoUpHeap(Heap *H, int i){\n int j = HEAP_DAD(i);\n\n while((i > 0) && (H->cost[H->pixel[j]] > H->cost[H->pixel[i]])){\n Change(&H->pixel[j],&H->pixel[i]);\n H->pos[H->pixel[i]]=i;\n H->pos[H->pixel[j]]=j;\n i = j;\n j = HEAP_DAD(i);\n }\n}\n\nvoid DestroyAdjPxl(AdjPxl **N)\n{\n AdjPxl *aux;\n\n aux = *N;\n if (aux != NULL){\n if (aux->dp != NULL) free(aux->dp);\n free(aux);\n *N = NULL;\n }\n}\n\nImage *CompPaths(Image *pred)\n{\n Image *seed=NULL;\n int p,n;\n\n seed = CopyImage(pred);\n\n n = seed->ncols*seed->nrows;\n for (p=0; p < n; p++) \n seed->val[p] = Seed(seed,p);\n return(seed);\n}\n\nint Seed(Image *pred, int p)\n{\n if (pred->val[p]==p)\n return(p);\n else \n return(Seed(pred,pred->val[p])); \n}\n\nImage *Perimeter(Image *bin)\n{\n int p,n;\n Image *cont,*perim;\n Curve *hist;\n\n cont = LabelContour(bin);\n n = cont->ncols*cont->nrows;\n perim = CreateImage(cont->ncols,cont->nrows);\n hist = Histogram(cont);\n for (p=0; p < n; p++)\n if (cont->val[p] > 0)\n perim->val[p] = hist->Y[cont->val[p]];\n\n DestroyCurve(&hist);\n DestroyImage(&cont);\n\n return(perim);\n}\n\nCurve3D *Saliences(Image *bin, int maxdist) \n{ \n Image *cont=NULL;\n AdjRel *A=NULL;\n AnnImg *aimg=NULL;\n Curve3D *saliences=NULL;\n \n /* Compute Euclidean IFT */\n \n cont = LabelContPixel(bin);\n aimg = Annotate(bin,NULL,cont); \n A = Circular(1.5);\n iftDilation(aimg,A);\n saliences = CompSaliences(aimg, maxdist*maxdist);\n \n DestroyImage(&cont);\n DestroyAdjRel(&A);\n DeAnnotate(&aimg);\n \n return(saliences);\n}\n\nImage *MSSkel(Image *bin, char side)\n{\n Image *msskel,*cont;\n AdjRel *A;\n AnnImg *aimg;\n int p,n;\n\n /* Compute Euclidean IFT */\n\n cont = LabelContPixel(bin);\n aimg = Annotate(bin,NULL,cont); \n A = Circular(1.5);\n n = aimg->img->ncols*aimg->img->nrows;\n switch (side) {\n case INTERIOR:\n for(p = 0; p < n; p++)\n if (aimg->img->val[p] == 0){\n\taimg->cost->val[p] = 0;\n }\n break;\n case EXTERIOR:\n for(p = 0; p < n; p++)\n if (aimg->img->val[p] != 0){\n\taimg->cost->val[p] = 0;\n }\n break;\n case BOTH:\n default: \n ;\n }\n iftDilation(aimg,A);\n DestroyAdjRel(&A);\n DestroyImage(&cont);\n \n /* Compute MS Skeletons */\n\n msskel = CompMSSkel(aimg);\n DeAnnotate(&aimg);\n\n return(msskel);\n}\n\nCurve3D *CompSaliences(AnnImg *aimg, int maxcost)\n{\n Image *cont=NULL;\n double *inter=NULL,*exter=NULL; \n int p,n,i,Lmax;\n Curve3D *saliences=NULL;\n \n n = aimg->img->ncols*aimg->img->nrows;\n cont = CreateImage(aimg->img->ncols,aimg->img->nrows);\n for(p=0;p<n; p++){\n if (aimg->pred->val[p]==p){\n cont->val[p]=aimg->label->val[p];\n }\n }\n \n Lmax = MaximumValue(aimg->label);\n inter = AllocDoubleArray(Lmax);\n exter = AllocDoubleArray(Lmax);\n \n \n /* Compute influence areas */\n\n for (p=0; p < n; p++){\n if ((aimg->label->val[p] > 0)&&(aimg->cost->val[p] <= maxcost)) {\n if (aimg->img->val[p] != 0){\n\tinter[aimg->label->val[p]-1]++;\n } else {\n\texter[aimg->label->val[p]-1]++;\n }\n }\n }\n \n /* Compute saliences */\n saliences = CreateCurve3D(Lmax);\n \n for (p=0; p < n; p++){\n if (cont->val[p] > 0){\n i = cont->val[p]-1;\n saliences->X[i] = (double)(p%cont->ncols);\n saliences->Y[i] = (double)(p/cont->ncols);\n if (exter[i] > inter[i]){\n\tsaliences->Z[i] = exter[i];\n }else{\n\tif (exter[i] < inter[i]){\n\t saliences->Z[i] = -inter[i];\n\t}else{\n\t saliences->Z[i] = 0.0;\n\t}\n }\n }\n }\n\n DestroyImage(&cont);\n free(inter);\n free(exter);\n\n return(saliences);\n}\n\nImage *RemFrame(Image *fimg, int sz)\n{\n Image *img;\n int y,*dst,*src,nbytes,offset;\n\n img = CreateImage(fimg->ncols-(2*sz),fimg->nrows-(2*sz));\n nbytes = sizeof(int)*img->ncols;\n offset = sz+fimg->tbrow[sz];\n for (y=0,src=fimg->val+offset,dst=img->val; y < img->nrows;y++,src+=fimg->ncols,dst+=img->ncols){\n memcpy(dst,src,nbytes);\n }\n return(img);\n}\n\nAdjPxl *AdjPixels(Image *img, AdjRel *A)\n{\n AdjPxl *N;\n int i;\n\n N = (AdjPxl *) calloc(1,sizeof(AdjPxl));\n if(N != NULL){\n N->dp = AllocIntArray(A->n);\n N->n = A->n;\n for (i=0; i < N->n; i++)\n N->dp[i] = A->dx[i] + img->ncols*A->dy[i];\n }else{\n Error(MSG1,\"AdjPixels\"); \n }\n\n return(N);\n}\n\nImage *Abs(Image *img)\n{\n Image *absimg=NULL;\n int p,n;\n \n n = img->ncols*img->nrows;\n absimg = CreateImage(img->ncols,img->nrows);\n for (p=0; p < n; p++)\n absimg->val[p] = abs(img->val[p]);\n return(absimg);\n}\n\nCurve3D *RemSaliencesByAngle(Curve3D *curve,int radius, int angle)\n{\n Curve3D *scurve;\n double area;\n int i;\n\n scurve = CreateCurve3D(curve->n+1);\n for (i=0; i < curve->n; i++){ \n scurve->X[i] = curve->X[i];\n scurve->Y[i] = curve->Y[i];\n scurve->Z[i] = curve->Z[i];\n }\n \n area = ((double)angle*PI*radius*radius/360.0);\n for (i=0; i < scurve->n; i++){ \n if (fabs(scurve->Z[i]) <= area)\n scurve->Z[i] = 0.0; \n }\n\n return(scurve);\n}\n\nImage *LabelBinComp(Image *bin, AdjRel *A)\n{\n Image *label=NULL,*flabel=NULL,*fbin=NULL;\n int i,j,n,sz,p,q,l=1;\n AdjPxl *N=NULL;\n int *FIFO=NULL;\n int first=0,last=0;\n \n sz = FrameSize(A);\n fbin = AddFrame(bin,sz,INT_MIN);\n flabel = CreateImage(fbin->ncols,fbin->nrows);\n N = AdjPixels(fbin,A);\n n = fbin->ncols*fbin->nrows;\n FIFO = AllocIntArray(n);\n for (j=0; j < n; j++){\n if ((fbin->val[j]==1)&&(flabel->val[j]==0)){\n flabel->val[j]=l;\n FIFO[last]=j; \n last++; \n while(first != last){\n\tp = FIFO[first];\n\tfirst++;\n\tfor (i=1; i < N->n; i++){\n\t q = p + N->dp[i];\n\t if ((fbin->val[q]==1)&&(flabel->val[q] == 0)){\n\t flabel->val[q] = flabel->val[p];\n\t FIFO[last] = q;\n\t last++;\n\t }\n\t}\n }\n l++;\n first=last=0;\n }\n }\n \n label = RemFrame(flabel,sz);\n DestroyAdjPxl(&N);\n DestroyImage(&fbin);\n DestroyImage(&flabel);\n free(FIFO);\n\n return(label);\n}\n\nCurve3D *SkelSaliences(Image *skel, int maxdist, int angle) \n{ \n Image *cont=NULL;\n AdjRel *A=NULL;\n AnnImg *aimg=NULL;\n Curve3D *saliences=NULL;\n Curve3D *auxsalie=NULL;\n\n /* Compute Euclidean IFT */\n A = Circular(0.0);\n cont = LabelBinComp(skel, A);\n aimg = Annotate(skel,NULL,cont);\n DestroyAdjRel(&A); \n\n A = Circular(1.5);\n iftDilation(aimg,A);\n auxsalie = CompSaliences(aimg, maxdist*maxdist);\n saliences = RemSaliencesByAngle(auxsalie,maxdist,angle);\n DestroyImage(&cont);\n DestroyAdjRel(&A);\n DeAnnotate(&aimg);\n DestroyCurve3D(&auxsalie);\n return(saliences);\n}\n\nImage *Skeleton(Image *msskel, float perc)\n{\n Image *skel = NULL;\n int p ,n, thres;\n \n skel = Abs(msskel);\n thres = (int)((MaximumValue(skel)*perc)/100.0); \n n = skel->ncols*skel->nrows;\n for (p=0; p < n; p++)\n if (skel->val[p] >= thres) \n skel->val[p]=1;\n else\n skel->val[p]=0;\n \n return(skel);\n}\n\nImage *CompMSSkel(AnnImg *aimg)\n{\n int i,p,q,n,maxd1,maxd2,d1,d2,MaxD;\n Pixel u,v;\n int sign=1,s2;\n Image *msskel,*cont=NULL,*perim=NULL,*seed=NULL;\n AdjRel *A;\n\n /* Compute MS Skeletons */\n\n cont = LabelContour(aimg->img);\n perim = Perimeter(aimg->img);\n seed = CompPaths(aimg->pred);\n A = Circular(1.0);\n n = aimg->label->ncols*aimg->label->nrows;\n msskel = CreateImage(aimg->label->ncols,aimg->label->nrows);\n\n MaxD = INT_MIN;\n for (p=0; p < n; p++) {\n if (aimg->pred->val[p] != p) { /* It eliminates the countors and\n already takes into account the\n side option */\n u.x = p%aimg->label->ncols;\n u.y = p/aimg->label->ncols;\n maxd1 = maxd2 = INT_MIN;\n for (i=1; i < A->n; i++){\n\tv.x = u.x + A->dx[i];\n\tv.y = u.y + A->dy[i];\n\tif (ValidPixel(aimg->label,v.x,v.y)){\n\t q = v.x + aimg->label->tbrow[v.y];\n\t if (cont->val[seed->val[p]] == cont->val[seed->val[q]]){ \n\t d2 = aimg->label->val[q]-aimg->label->val[p];\n\t s2 = 1;\n\t //\t if (d2 > (perim->val[seed->val[p]]-1-d2)){\n\t if (d2 > (perim->val[seed->val[p]]-d2)){\n\t s2 = -1;\n\t //\t d2 = (perim->val[seed->val[p]]-1-d2);\n\t d2 = (perim->val[seed->val[p]]-d2);\n\t } \n\t if (d2 > maxd2){\n\t maxd2 = d2;\n\t sign = s2;\n\t }\n\t } else {\n\t d1 = cont->val[seed->val[q]] - cont->val[seed->val[p]];\n\t if (d1 > maxd1) \n\t maxd1 = d1;\n\t }\n\t}\n }\n if (maxd1 > 0) {\n\tmsskel->val[p] = INT_MAX;\n } else {\n\tmsskel->val[p] = sign*maxd2;\n\tif (msskel->val[p] > MaxD)\n\t MaxD = msskel->val[p]; \n }\n }\n }\n\n for (p=0; p < n; p++) { /* Set up SKIZ */\n if (msskel->val[p] == INT_MAX)\n msskel->val[p] = MaxD + 1;\n }\n\n DestroyImage(&cont);\n DestroyImage(&perim);\n DestroyImage(&seed);\n DestroyAdjRel(&A);\n\n return(msskel);\n}\n\nvoid iftDilation(AnnImg *aimg, AdjRel *A)\n{\n Image *Dx=NULL,*Dy=NULL;\n Queue *Q=NULL;\n int i,p,q,n,sz;\n Pixel u,v;\n int *sq=NULL,tmp=INT_MAX,dx,dy;\n char *color=NULL;\n\n if (aimg->seed == NULL)\n return;\n\n n = MAX(aimg->img->ncols,aimg->img->nrows);\n sq = AllocIntArray(n);\n for (i=0; i < n; i++) \n sq[i]=i*i;\n\n Dx = CreateImage(aimg->img->ncols,aimg->img->nrows);\n Dy = CreateImage(aimg->img->ncols,aimg->img->nrows);\n n = aimg->img->ncols*aimg->img->nrows;\n color = AllocCharArray(n);\n sz = FrameSize(A);\n Q = CreateQueue(2*sz*(sz+aimg->img->ncols+aimg->img->nrows),n);\n \n while (aimg->seed != NULL){\n p=RemoveSet(&(aimg->seed));\n InsertQueue(Q,aimg->cost->val[p]%Q->C.nbuckets,p);\n color[p]=GRAY;\n }\n\n while(!EmptyQueue(Q)) {\n p=RemoveQueue(Q);\n color[p]=BLACK;\n u.x = p%aimg->img->ncols;\n u.y = p/aimg->img->ncols;\n for (i=1; i < A->n; i++){\n v.x = u.x + A->dx[i];\n v.y = u.y + A->dy[i];\n if (ValidPixel(aimg->img,v.x,v.y)){\n\tq = v.x + aimg->img->tbrow[v.y];\n\tif (color[q] != BLACK){\n\t dx = Dx->val[p] + abs(v.x-u.x);\n\t dy = Dy->val[p] + abs(v.y-u.y);\n\t tmp = sq[dx] + sq[dy];\n\t if (tmp < aimg->cost->val[q])\n\t {\n\t if (color[q] == WHITE){\n\t\tInsertQueue(Q,tmp%Q->C.nbuckets,q);\n\t\tcolor[q]=GRAY;\n\t }else\n\t\tUpdateQueue(Q,q,aimg->cost->val[q]%Q->C.nbuckets,tmp%Q->C.nbuckets);\n\t aimg->cost->val[q] = tmp;\n\t aimg->pred->val[q] = p;\n\t aimg->label->val[q] = aimg->label->val[p];\n\t Dx->val[q] = dx;\n\t Dy->val[q] = dy;\n\t }\n\t}\n } \n }\n }\n free(color);\n free(sq);\n DestroyQueue(&Q);\n DestroyImage(&Dx);\n DestroyImage(&Dy);\n}\n\nCurve *CopyCurve(Curve *curve)\n{\n Curve *curvec;\n\n curvec = CreateCurve(curve->n);\n memcpy(curvec->X,curve->X,curve->n*sizeof(double));\n memcpy(curvec->Y,curve->Y,curve->n*sizeof(double));\n return(curvec);\n}\n\nCurve *Histogram(Image *img)\n{\n int i,p,n,nbins;\n Curve *hist=NULL;\n\n nbins = MaximumValue(img)+1;\n hist = CreateCurve(nbins);\n n = img->ncols*img->nrows;\n for (p=0; p < n; p++)\n hist->Y[img->val[p]]++;\n for (i=0; i < nbins; i++) \n hist->X[i] = i;\n\n return(hist);\n}\n\nvoid SortCurve(Curve *curve, int left, int right, char order)\n{\n int pivot;\n \n if (left < right) {\n pivot = PartCurve(curve,left,right,order);\n SortCurve(curve,left,pivot-1,order);\n SortCurve(curve,pivot+1,right,order); \n }\n}\n\nint PartCurve (Curve *curve, int left, int right, char order)\n{\n double y;\n int i;\n double X,Y;\n \n y = curve->Y[left];\n i = left;\n do {\n if (order == INCREASING){\n while ((curve->Y[left] <= y)&&(left <= right)) left++; \n while (curve->Y[right] > y) right--;\n } else { /* order = DECREASING */\n while ((curve->Y[left] >= y)&&(left <= right)) left++; \n while (curve->Y[right] < y) right--;\n }\n if (left < right){\n X = curve->X[left];\n Y = curve->Y[left];\n curve->X[left] = curve->X[right];\n curve->Y[left] = curve->Y[right];\n curve->X[right] = X;\n curve->Y[right] = Y;\n left++; right--;\n }\n } while (left <= right);\n\n left = i;\n if (left != right){\n X = curve->X[left];\n Y = curve->Y[left];\n curve->X[left] = curve->X[right];\n curve->Y[left] = curve->Y[right];\n curve->X[right] = X;\n curve->Y[right] = Y;\n }\n\n return (right);\n}\n\nvoid InvertXY(Curve *curve)\n{\n double tmp;\n int i;\n for (i=0; i<curve->n; i++){\n tmp = curve->X[i];\n curve->X[i] = curve->Y[i];\n curve->Y[i] = tmp;\n }\n}\n\n/* Descriptor functions ***************************/\n\n/* BIC */\nint *Quantize_colors(CImage *img, int color_dim){\n unsigned long i;\n unsigned long r, g, b;\n unsigned long fator_g, fator_b;\n int *color, n;\n \n n = img->C[0]->nrows * img->C[0]->ncols; \n\n color = (int *) calloc(n, sizeof(int));\n if(color==NULL){\n printf(\"\\nOut of memory \\n\");\n exit(-1);\n }\n \n fator_g = color_dim;\n fator_b = fator_g*color_dim;\n \n for(i=0; i<n; i++){\n r = color_dim*img->C[0]->val[i]/256;\n g = color_dim*img->C[1]->val[i]/256;\n b = color_dim*img->C[2]->val[i]/256;\n \n color[i] = (r + fator_g*g + fator_b*b);\n }\n return color;\n}\n\nunsigned char Compute_log(float value){\n unsigned char result;\n \n value = 255. * value;\n if(value==0.) result=0;\n else if(value<1.) result=1;\n else if(value<2.) result=2;\n else if(value<4.) result=3;\n else if(value<8.) result=4;\n else if(value<16.) result=5;\n else if(value<32.) result=6;\n else if(value<64.) result=7;\n else if(value<128.) result=8;\n else result=9;\n \n return result;\n}\n\nvoid Compress_histogram(unsigned char *ch, unsigned long *h, \n\t\t\tunsigned long max, int size){\n int i;\n unsigned char v;\n \n for(i=0; i<size; i++){\n v = Compute_log((float) h[i] / (float) max);\n ch[i] = (unsigned char)(48 + v);\n }\n}\n\nvoid Compute_frequency_property(Image *img, Property *ppt){\n \n unsigned long x, y, p, q;\n int i, border;\n AdjRel *A;\n Pixel v;\n \n A = Circular(1.0);\n \n for(y=0L; y<img->nrows; y++){\n for(x=0L; x<img->ncols; x++){\n p = x + img->tbrow[y];\n border=FALSE;\n for (i=1; i < A->n; i++){\n\tv.x = x + A->dx[i];\n\tv.y = y + A->dy[i];\n\tif (ValidPixel(img,v.x,v.y)){\n\t q = v.x + img->tbrow[v.y];\n\t if(ppt[p].color!=ppt[q].color){ \n\t border=TRUE;\n\t break;\n\t }\n\t}\n }\n if(border==FALSE) \n\tppt[p].frequency=LOW;\n else ppt[p].frequency=HIGH;\n }\n }\n DestroyAdjRel(&A);\n}\n\nProperty *Compute_pixels_properties(CImage *img)\n{\n Property *p;\n int *color, i, n;\n \n n = img->C[0]->nrows * img->C[0]->ncols; \n \n p = (Property *) calloc(n, sizeof(Property));\n if(p==NULL){\n printf(\"\\nOut of memory \\n\");\n exit(-1);\n }\n \n color = Quantize_colors(img, 4);\n for(i=0; i<n; i++) \n p[i].color=color[i];\n Compute_frequency_property(img->C[0], p);\n \n free(color);\n return p;\n}\n\nVisualFeature *Compute_histograms(Property *p, int n){\n VisualFeature *vf = (VisualFeature *) calloc(1,sizeof(VisualFeature));\n unsigned long i;\n \n for(i=0; i<SIZE; i++){\n vf->colorH[i] = 0;\n vf->lowH[i] = 0;\n vf->highH[i] = 0;\n }\n \n for(i=0; i<n; i++){\n vf->colorH[p[i].color]++;\n \n if(p[i].frequency==LOW) \n vf->lowH[p[i].color]++;\n else \n vf->highH[p[i].color]++;\n }\n return vf;\n}\n\nCompressedVisualFeature *Compress_histograms(VisualFeature *vf, int npixels)\n{\n CompressedVisualFeature *cvf = (CompressedVisualFeature *) calloc(1,sizeof(CompressedVisualFeature));\n \n Compress_histogram(cvf->colorH, vf->colorH, npixels, SIZE);\n Compress_histogram(cvf->lowH, vf->lowH, npixels, SIZE);\n Compress_histogram(cvf->highH, vf->highH, npixels, SIZE);\n \n return cvf;\n}\n\nvoid Write_visual_features(char *filename,char *dbname, CompressedVisualFeature *cvf)\n{\n FILE *file;\n int i;\n \n if((file = fopen(dbname, \"a+t\")) == NULL)\n {\n fprintf(stderr, \"Can't open %s \\n\", dbname);\n exit(-1);\n }\n \n fprintf(file, \"%s\\t\", filename);\n for(i=0;i<SIZE;i++)\n {\n fprintf(file, \"%c%c\", cvf->lowH[i], cvf->highH[i]);\n }\n fprintf(file, \"\\n\");\n \n fclose(file);\n}\n\nCompressedVisualFeature *Extract_visual_features(CImage *img)\n{\n Property *p;\n VisualFeature *vf;\n CompressedVisualFeature *cvf;\n int npixels;\n \n npixels = img->C[0]->nrows * img->C[0]->ncols;\n \n p = Compute_pixels_properties(img);\n vf = Compute_histograms(p, npixels);\n cvf = Compress_histograms(vf, npixels);\n \n free(p);\n return cvf;\n}\n\nCurve *BIC(CImage *img)\n{\n Property *p;\n VisualFeature *vf;\n CompressedVisualFeature *cvf;\n int i, npixels;\n Curve *curve = CreateCurve(2*SIZE);\n \n npixels = img->C[0]->nrows * img->C[0]->ncols;\n \n p = Compute_pixels_properties(img);\n vf = Compute_histograms(p, npixels);\n cvf = Compress_histograms(vf, npixels);\n \n for (i=0; i<SIZE; i++){\n curve->X[i] = i;\n curve->Y[i] = cvf->lowH[i];\n curve->X[i+SIZE] = i+SIZE;\n curve->Y[i+SIZE] = cvf->highH[i];\n }\n\n free(p);\n free(vf);\n free(cvf);\n return curve;\n}\n\n\ndouble gray_level_BIC(Image *img1, Image *img2){\n Property *p1, *p2;\n VisualFeature *vf1, *vf2;\n CompressedVisualFeature *cvf1, *cvf2;\n int i, n1, n2;\n double dist;\n\n n1 = img1->nrows * img1->ncols; \n n2 = img2->nrows * img2->ncols; \n\n p1 = (Property *) calloc(n1, sizeof(Property));\n p2 = (Property *) calloc(n2, sizeof(Property));\n\n if((p1==NULL)||(p2==NULL)){\n printf(\"\\nOut of memory \\n\");\n exit(-1);\n }\n \n for(i=0; i<n1; i++){\n p1[i].color=img1->val[i];\n }\n for(i=0; i<n2; i++){\n p2[i].color=img2->val[i];\n }\n \n Compute_frequency_property(img1,p1);\n Compute_frequency_property(img2,p2);\n \n vf1 = Compute_histograms(p1, n1);\n vf2 = Compute_histograms(p2, n2);\n \n cvf1 = Compress_histograms(vf1, n1);\n cvf2 = Compress_histograms(vf2, n2);\n \n dist = 0.0;\n for (i=0; i<SIZE; i++){\n dist += fabs(cvf1->lowH[i] - cvf2->lowH[i]);\n dist += fabs(cvf1->highH[i] - cvf2->highH[i]);\n }\n \n free(p1);\n free(p2);\n free(vf1);\n free(vf2);\n free(cvf1);\n free(cvf2);\n return dist;\n}\n\nFeatureVector1D *BIC_ExtractionAlgorithm(CImage *in){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n curve = BIC(in);\n fv = CurveTo1DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\ndouble BIC_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2)\n{\n\treturn L1_Distance(fv1, fv2);\n}\n\n/* Fourier Descriptor */\ndouble Cabs(double x, double y) {\n return (sqrt( (x*x) + (y*y)));\n}\n\nCurve *Image2Curve(Image *img) { /* img = binary image */\n Curve3D *curve; //ponto (x,y) e z = ordem no contorno\n Curve *curve2;\n Image *contour = NULL;\n int i;\n int npixels;\n int count = 0;\n int j = 0 ;\n \n contour = LabelContPixel(img);\n npixels = contour->ncols * contour->nrows;\n \n for (i = 0; i < npixels; i++){\n if (contour->val[i]!=0)\n count++;\n }\n \n curve = CreateCurve3D(count+1);\n \n for (i = 0; i < npixels; i++){\n if (contour->val[i]!=0){\n curve->X[j] = i % contour->ncols;\n curve->Y[j] = i / contour->ncols;\n curve->Z[j] = contour->val[i];\n j++;\n }\n } \n \n SortCurve3D(curve, 0, (curve->n - 2), INCREASING);\n \n curve2 = CreateCurve(curve->n);\n for (i=0;i<curve->n;i++){\n curve2->X[i]=curve->X[i];\n curve2->Y[i]=curve->Y[i];\n }\n \n DestroyCurve3D(&curve);\n DestroyImage(&contour);\n return (curve2);\n}\n\nCurve *FourierDescriptor(Image *img) {\n Curve *c = NULL; \n Curve *curve = NULL;\n Curve *mag = NULL;\n int i;\n int z; /* lixo */\n int tam;\n int nn = 0;\n double normfactor = 0;\n\n curve = Image2Curve(img);\n \n tam = curve->n;\n i = 1;\n nn = tam;\n while(nn != 1) {\n nn >>= 1;\n ++i;\n }\n \n for(; i; i--)\n nn <<= 1;\n \n if (nn < 128)\n nn = 128;\n \n c = CreateCurve(nn);\n \n for (i = 0; i < tam ; i++)\n {\n c->X[i] = (double) curve->X[i];\t\n c->Y[i] = (double) curve->Y[i];\n }\n \n for (i = tam ; i < nn ; i++)\n {\n c->X[i] = 0;\n c->Y[i] = 0;\n }\n \n z = FFT(1, nn,c->X ,c->Y);\n\n mag = CreateCurve(126);\n normfactor = Cabs(c->X[0],c->Y[0]);\n \n for (i = 1; i <= 126; i++)\n {\n mag->X[i-1] = i - 1;\n mag->Y[i-1] = (double) (Cabs(c->X[i],c->Y[i])/normfactor);\n }\n \n DestroyCurve(&c); \n DestroyCurve(&curve); \n return (mag);\n}\n\nFeatureVector1D *FourierDescriptor_ExtractionAlgorithm(Image *in){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n \n curve = FourierDescriptor(in);\n fv = CurveTo1DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\ndouble Fourier_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2){\n\treturn EuclideanDistance(fv1, fv2);\n}\n\n/* Moments Invariant */\ndouble MomentPQ(int p, int q, Image *img, int max){\n int i, x, y;\n double sum = 0.0;\n \n for (i = 0; i < img->ncols * img->nrows; i++)\n {\n if (img->val[i] != 0 )\n\t{\n\t x = 1 + i%img->ncols; /* don't do 0^0 */\n\t y = 1 + i/img->ncols; \n\t sum = sum + (pow(x, p) * pow(y,q) * ((double) img->val[i]/max));\n\t}\n }\n \n return (sum);\n}\n\nCurve *MomentInv(Image *img) {\n Curve * curve;\n int i;\n int max;\n \n /* mPQ momentes of order (P+Q) */\n double m00, m01, m10, m11, m20, m02, m12, m21, m03, m30;\n \n /* center moments */\n double xc, yc;\n \n /* normalized central moments (Eta) */\n double n00, n10, n01, n02, n20, n11, n12, n21, n03, n30; \n \n /* invariant moments (Phi) */\n double f1, f2, f3, f4, f5, f6, f7;\n \n float g; /* gamma = (p+q)/2 + 1 */\n \n /* n = Upq/U00 */\n \n max = 0;\n \n for (i = 0; i < img->ncols * img->nrows ; i++)\n {\n if (img->val[i] > max)\n\tmax = img->val[i];\n }\n \n m00 = MomentPQ(0,0, img, max);\n m01 = MomentPQ(0,1, img, max);\n m10 = MomentPQ(1,0, img, max);\n m11 = MomentPQ(1,1, img, max); \n m12 = MomentPQ(1,2, img, max); \n m21 = MomentPQ(2,1, img, max); \n m02 = MomentPQ(0,2, img, max);\n m20 = MomentPQ(2,0, img, max); \n m03 = MomentPQ(0,3, img, max);\n m30 = MomentPQ(3,0, img, max); \n \n xc = (double) m10 / m00;\n yc = (double) m01 / m00;\n \n n00 = 1.0; \n \n n10 = 0.0;\n \n n01 = 0.0;\n \n g = 2.0; \n \n n20 = (double) (m20 - (xc * m10)) / (pow(m00, g));\n \n n02 = (double) (m02 - (yc * m01)) / (pow(m00, g));\n \n n11 = (double) (m11 - (yc * m10)) / (pow(m00, g));\n \n g = 2.5;\n \n n30 = (double) (m30 - (3 * xc * m20) + (2 * (pow(xc,2)) * m10) ) / (pow(m00, g)); \n \n n12 = (double) (m12 - (2 * yc * m11) - (xc * m02) + (2 * pow(yc,2) * m10) ) / (pow(m00, g)) ;\n \n n21 = (double) (m21 - (2 * xc * m11) - (yc * m20) + (2 * pow(xc,2) * m01) ) / (pow(m00, g));\n \n n03 = (double) (m03 - (3 * yc * m02) + (2 * (pow(yc,2)) * m01)) / (pow(m00, g)); \n \n f1 = (double) n20 + n02;\n \n f2 = (double) (pow((n20 - n02),2)) + (4 * (pow(n11,2)));\n \n f3 = (double) (pow((n30 - (3 * n12)),2)) + (pow( ( (3 * n21) - n03),2));\n \n f4 = (double) (pow((n30 + n12),2)) + (pow((n21 + n03),2));\n \n f5 = (double) ((n30 - (3 * n12)) * (n30 + n12) * ((pow((n30 + n12),2)) - (3 * (pow((n21 + n03),2))))) + \n (((3 * n21) - n03) * (n21 + n03) * ((3 * (pow((n30 + n12),2))) - (pow((n21 + n03),2))));\n \n f6 = (double) ((n20 + n02) * ((pow((n30 + n12),2)) - (pow((n21 + n03),2)))) +\n ((4 * n11) * (n30 + n12) * (n21 + n03)); \n \n f7 = (double) (((3 * n21) - n03) * (n30 + n12) * ((pow((n30 + n12),2)) - (3 * (pow((n21 + n03),2))))) +\n (((3 * n12) - n30) * (n21 + n03) * ((3 * (pow((n30 + n12),2))) - (pow((n21 + n03),2))));\n \n curve = CreateCurve(7);\n \n curve->X[0] = 0.0;\n curve->Y[0] = f1;\n \n curve->X[1] = 1.0;\n curve->Y[1] = f2;\n \n curve->X[2] = 2.0; \n curve->Y[2] = f3;\n \n curve->X[3] = 3.0; \n curve->Y[3] = f4;\n \n curve->X[4] = 4.0; \n curve->Y[4] = f5;\n \n curve->X[5] = 5.0; \n curve->Y[5] = f6;\n \n curve->X[6] = 6.0; \n curve->Y[6] = f7;\n\n return (curve);\n}\n\nCurve *MomentInvariant(Image *img) { // contorno e objeto inteiro\n Image *contour = NULL;\n Curve *c1 = NULL;\n Curve *c2 = NULL;\n Curve *curve = NULL;\n int i;\n \n contour = LabelContour(img);\n c1 = MomentInv(contour);\n c2 = MomentInv(img);\n \n curve = CreateCurve(c1->n + c2->n);\n for (i=0; i<c1->n; i++){\n curve->X[i] = i;\n curve->Y[i] = c1->Y[i];\n }\n for (i=0; i<c2->n; i++){\n curve->X[i+c1->n] = i + c1->n;\n curve->Y[i+c1->n] = c2->Y[i];\n }\n \n DestroyCurve(&c1);\n DestroyCurve(&c2);\n DestroyImage(&contour);\n \n return (curve); \n}\n\nFeatureVector1D *MomentInvariant_ExtractionAlgorithm(Image *in){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n \n curve = MomentInvariant(in);\n fv = CurveTo1DFeatureVector(curve);\n\n DestroyCurve(&curve);\n return fv;\n}\n\ndouble MomentInvariant_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2){\n\treturn 1-EuclideanDistance(fv1, fv2);\n}\n\n/* BAS */\n/*resample curve*/\nrepresentation_type *resample(representation_type *curve, int nsamples)\n{\n representation_type *rcurve;\n Image *img1, *img2;\n int x;\n \n img1= CreateImage(curve->length, 3);\n \n for (x=0; x<curve->length; x++){\n img1->val[x] = curve->mean[x];\n img1->val[x+img1->tbrow[1]] = curve->second[x];\n img1->val[x+img1->tbrow[2]] = curve->third[x];\n }\n \n img2 = Scale(img1, (((float) nsamples)/curve->length), 1);\n \n rcurve = (representation_type *) calloc(1, sizeof(representation_type));\n rcurve->length = nsamples;\n rcurve->mean = (int *) calloc(nsamples, sizeof(int));\n rcurve->second =(int *) calloc(nsamples, sizeof(int));\n rcurve->third = (int *) calloc(nsamples, sizeof(int));\n for (x=0; x<nsamples; x++){\n rcurve->mean[x] = img2->val[x];\n rcurve->second[x] = img2->val[x+img2->tbrow[1]];\n rcurve->third[x] = img2->val[x+img2->tbrow[2]];\n }\n \n DestroyImage(&img1);\n DestroyImage(&img2);\n return(rcurve);\n}\n\ndouble find_angle(deltax,deltay)\n int deltax;\n int deltay;\n{\n double angle;\n double pi;\n \n pi=22.0/7.0; \n \n if((deltax==0) && (deltay==0))\n angle=0.0;\n else{\n angle=atan((10.0*abs(deltax))/(10.0*abs(deltay)));\n angle=angle*180.0/pi; \n if((deltax <= 0) && (deltay >= 0)) \n angle=360.0-angle;\n else if((deltax <= 0) && (deltay <=0)) \n angle=180.0 + angle; \n else if((deltax >= 0) && (deltay <=0)) \n angle=180.0 - angle;\n }\n \n return(angle);\n}\n\nrepresentation_type *extract_feature(boun)\n boundary_type *boun;\n{\n representation_type *curve_feature; \n int i,j,x1,x2,x3,y1,y2,y3,curvelength;\n double angle_1,angle_2,curve,total,previous;\n int delta_x, delta_y,mean,second,third;\n int *bearing_array;\n \n curve_feature = (representation_type *) calloc(1, sizeof(representation_type));\n curve_feature->length=boun->length;\n curve_feature->mean= (int *) calloc(boun->length, sizeof(int));\n curve_feature->second=(int *) calloc(boun->length, sizeof(int));\n curve_feature->third=(int *) calloc(boun->length, sizeof(int));\n \n \n curvelength=(int)(boun->length/2);\n bearing_array=(int *) calloc((curvelength-1), sizeof(int));\n for(i=0; i<boun->length; i++){\n total=0.0;\n x1=boun->X[((i-1)+boun->length)%boun->length];\n y1=boun->Y[((i-1)+boun->length)%boun->length];\n x2=boun->X[i];\n y2=boun->Y[i];\n x3=boun->X[((i+1)+boun->length)%boun->length];\n y3=boun->Y[((i+1)+boun->length)%boun->length];\n \n delta_x=x1-x2;\n delta_y=-(y1-y2);\n angle_1=find_angle(delta_x,delta_y);\n delta_x=x3-x2;\n delta_y=-(y3-y2);\n angle_2=find_angle(delta_x,delta_y);\n if(angle_1 >= angle_2)\n curve=angle_1-angle_2;\n else\n curve=360.0 + angle_1-angle_2;\n \n total+=curve;\n bearing_array[0]=(int)curve;\n previous=curve;\n for(j=2; j<curvelength; j++){\n x1=boun->X[((i-j)+boun->length)%boun->length];\n y1=boun->Y[((i-j)+boun->length)%boun->length];\n x2=boun->X[i];\n y2=boun->Y[i];\n x3=boun->X[((i+j)+boun->length)%boun->length];\n y3=boun->Y[((i+j)+boun->length)%boun->length];\n delta_x=x1-x2;\n delta_y=-(y1-y2);\n angle_1=find_angle(delta_x,delta_y);\n delta_x=x3-x2;\n delta_y=-(y3-y2);\n angle_2=find_angle(delta_x,delta_y);\n if(angle_1 >= angle_2)\n\tcurve=angle_1-angle_2;\n else\n\tcurve=360.0 + angle_1-angle_2;\n \n if(j > 3){\n\tif(((curve-previous) > 180))\n\t curve=curve-360.0;\n\telse\n\t if(((previous-curve) > 180))\n\t curve=curve+360.0;\n }\n \n bearing_array[j-1]=(int)curve; \n total+=curve;\n previous=curve;\n }\n \n mean=(int)(total/(double)(curvelength-1));\n total=0.0;\n for(j=0;j<curvelength-1; j++)\n total+=pow((bearing_array[j]-mean),2.0);\n second=pow(total/(double)(curvelength-2),0.5);\n total=0.0;\n for(j=0;j<curvelength-1; j++)\n total+=pow(abs(bearing_array[j]-mean),3.0);\n third=pow(total/(double)(curvelength-2),(1.0/3.0)); \n \n curve_feature->mean[i]=mean;\n curve_feature->second[i]=second;\n curve_feature->third[i]=third;\n } \n free(bearing_array);\n return(curve_feature);\n}\n\nCurve *BAS(Image *in,int rsp,int nsamples){\n Curve *featurevector = NULL;\n Curve *contour = NULL;\n Curve *moment1 = NULL;\n Curve *moment2 = NULL;\n Curve *moment3 = NULL;\n \n boundary_type *bound;\n representation_type *curve, *rcurve;\n int i;\n\n contour = Image2Curve(in);\n bound = (boundary_type *) calloc(1, sizeof(boundary_type));\n bound->length = contour->n;\n bound->X = (int *)calloc(bound->length, sizeof(int));\n bound->Y = (int *)calloc(bound->length, sizeof(int));\n for (i=0; i<bound->length; i++){\n bound->X[i] = (int)contour->X[i];\n bound->Y[i] = (int)contour->Y[i];\n }\n \n curve = extract_feature(bound);\n\n if(rsp == 0)\n {\n \trcurve = resample(curve, curve->length);\n\tnsamples = curve->length;\n }\n else\n\trcurve = resample(curve, nsamples);\n\n moment1 = CreateCurve(nsamples);\n moment2 = CreateCurve(nsamples);\n moment3 = CreateCurve(nsamples);\n \n featurevector = CreateCurve(3*nsamples);\n for (i=0; i<3*nsamples; i++){\n featurevector->X[i] = (double)i;\n }\n \n for (i=0; i<nsamples; i++){\n moment1->X[i]= moment2->X[i]= moment3->X[i]=(double)i;\n featurevector->Y[i] = moment1->Y[i]= (double) rcurve->mean[i];\n featurevector->Y[nsamples+i] = moment2->Y[i]= (double) rcurve->second[i];\n featurevector->Y[2*nsamples+i] = moment3->Y[i]= (double) rcurve->third[i];\n }\n\n free(bound->X);\n free(bound->Y);\n free(bound);\n free(curve->mean);\n free(curve->second);\n free(curve->third);\n free(curve);\n free(rcurve->mean);\n free(rcurve->second);\n free(rcurve->third);\n free(rcurve);\n DestroyCurve(&contour);\n DestroyCurve(&moment1);\n DestroyCurve(&moment2);\n DestroyCurve(&moment3);\n\n return featurevector;\n}\n\nFeatureVector1D *BAS_ExtractionAlgorithm(Image *in,int rsp,int nsamples){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n \n curve = BAS(in,rsp,nsamples);\n fv = CurveTo1DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\n\nlong min(Dist1,Dist2,Dist3)\n long Dist1;\n long Dist2;\n long Dist3;\n{\n if((Dist1<=Dist2) && (Dist1<=Dist3)) \n return(Dist1);\n else if((Dist2<=Dist1) && (Dist2<=Dist3)) \n return(Dist2);\n /*else if((Dist3<=Dist1) && (Dist3<=Dist2)) */\n return(Dist3);\n}\n\n\nlong Cum_Dist_Optimal(fv1,fv2,dim1,dim2,DISTANCE)\n representation_type fv1;\n representation_type fv2;\n int dim1;\n int dim2;\n long *DISTANCE;\n{\n long temp_dist;\n int i,j;\n int penalty;\n \n penalty=300;\n /* OPTIMAL CORRESPONDENCE OF STRINGS\n */\n DISTANCE[0*(dim2+1)+0]=0;\n for(j=1;j<=dim2;j++)\n DISTANCE[0*(dim2+1)+j]=j * penalty;\n \n for(i=1;i<=dim1;i++)\n DISTANCE[i*(dim2+1)+0]=i * penalty;\n \n for(i=1;i<=dim1;i++)\n for(j=1;j<=dim2;j++)\n if(abs(i-j) < (5)){\n\ttemp_dist=abs(fv1.mean[i-1]-fv2.mean[j-1]) +\n\t abs(fv1.second[i-1]-fv2.second[j-1]) +\n\t abs(fv1.third[i-1]-fv2.third[j-1]);\n\t\n\tDISTANCE[i*(dim2+1)+j]= temp_dist +\n\t min(DISTANCE[(i-1)*(dim2+1)+(j-1)],\n\t DISTANCE[(i-1)*(dim2+1)+(j)] + penalty,\n\t DISTANCE[(i)*(dim2+1)+(j-1)] + penalty); \n }\n return(DISTANCE[(dim1)*(dim2+1)+(dim2)]/dim2);\n}\n\nlong find_distance(representation_type fv1, representation_type fv2,\n\t\t int dim1, int dim2)\n{\n long distance,k,i,j,temp_dist;\n representation_type temp_list1, temp_list2;\n long *DISTANCE;\n \n\tDISTANCE=(long *) calloc((dim1+1)*(dim2+1),sizeof(long));\n \n temp_list1.mean=(int *) calloc(dim2,sizeof(int));\n temp_list1.second=(int *) calloc(dim2,sizeof(int));\n temp_list1.third=(int *) calloc(dim2,sizeof(int));\n temp_list2.mean=(int *) calloc(dim2,sizeof(int));\n temp_list2.second=(int *) calloc(dim2,sizeof(int));\n temp_list2.third=(int *) calloc(dim2,sizeof(int));\n \n temp_dist=10000000;\n \n for(i=0;i<dim1+1;i++)\n for(j=0;j<dim2+1;j++)\n DISTANCE[i*(dim2+1)+j]=10000000;\n \n for(k=0; k<dim2; k++){\n for(i=0;i<dim2;i++){\n temp_list1.mean[i]=fv2.mean[(i+k)%dim2];\n temp_list1.second[i]=fv2.second[(i+k)%dim2];\n temp_list1.third[i]=fv2.third[(i+k)%dim2]; \n } \n distance=Cum_Dist_Optimal(fv1,temp_list1,dim1,dim2,DISTANCE);\n if(temp_dist>distance) temp_dist=distance;\n }\n /***Taking the mirror of fv2 *****/\n \n for(i=0;i<dim2;i++){\n temp_list2.mean[i]=fv2.mean[(dim2-1)-i];\n temp_list2.second[i]=fv2.second[(dim2-1)-i];\n temp_list2.third[i]=fv2.third[(dim2-1)-i]; \n }\n \n for(k=0; k<dim2; k++){\n for(i=0;i<dim2;i++){\n temp_list1.mean[i]=temp_list2.mean[(i+k)%dim2];\n temp_list1.second[i]=temp_list2.second[(i+k)%dim2];\n temp_list1.third[i]=temp_list2.third[(i+k)%dim2]; \n }\n distance=Cum_Dist_Optimal(fv1,temp_list1,dim1,dim2,DISTANCE);\n if(temp_dist>distance) temp_dist=distance;\n }\n \n distance=temp_dist;\n \n free(temp_list1.mean);\n free(temp_list1.second);\n free(temp_list1.third);\n free(temp_list2.mean);\n free(temp_list2.second);\n free(temp_list2.third);\n free(DISTANCE);\n return(distance);\n}\n\ndouble BAS_DistanceAlgorithm(FeatureVector1D *c1, FeatureVector1D *c2){\n \n representation_type fv1, fv2;\n int n, m, i;\n long dist;\n \n n = c1->n/3;\n m = c2->n/3;\n fv1.mean=(int *) calloc(n,sizeof(int));\n fv1.second=(int *) calloc(n,sizeof(int));\n fv1.third=(int *) calloc(n,sizeof(int));\n fv2.mean=(int *) calloc(m,sizeof(int));\n fv2.second=(int *) calloc(m,sizeof(int));\n fv2.third=(int *) calloc(m,sizeof(int)); \n \n for (i=0; i<n; i++){\n fv1.mean[i] = (int)c1->X[i];\n fv1.second[i] = (int)c1->X[n + i];\n fv1.third[i] = (int)c1->X[2*n + i];\n }\n\n for (i=0; i<m; i++){\n fv2.mean[i] = (int)c2->X[i];\n fv2.second[i] = (int)c2->X[m + i];\n fv2.third[i] = (int)c2->X[2*m + i];\n }\n \n dist = find_distance(fv1,fv2, n, m);\n \n free(fv1.mean);\n free(fv1.second);\n free(fv1.third);\n free(fv2.mean);\n free(fv2.second);\n free(fv2.third);\n \n return dist;\n}\n\n/* Tensor Scale */\nFeatureVector1D *TensorScale_ExtractionAlgorithm(Image *in){\n\tTensorScale *ts = NULL;\n\tFeatureVector1D *fv = NULL;\n\tfloat *hist = NULL;\n\tint i;\n\t\n\tts = CreateBinaryTensorScale(in,24);\n\thist = TSOrientationHistogram(ts);\n\n\t/*convertendo hist para featurevector1D*/\n\tfv = CreateFeatureVector1D(HISTOGRAMSIZE+1);\n\tfor(i = 0; i < HISTOGRAMSIZE+1; i++)\n\t\tfv->X[i] = hist[i];\n\t\n\tfree(hist);\n\tDestroyTensorScale(&ts);\n\n\treturn fv;\n}\n\nTensorScale *CreateBinaryTensorScale(Image *bin, int m_pairs){\n Image *edt,*edt2,*bin2;\n Point *epsilon;\n TensorScale *ts = NULL;\n Vector *tau;\n int i,j,k,p,v,vi,n;\n float x,y,xc,yc,taux, tauy, aux;\n float a2,b2,b1,teta,u1,v1,u2,v2,aa,acc,wt,w;\n float gSxy, gSy2_x2;\n float sin_teta, cos_teta;\n float *lt_sqrt;\n int d,d1,d2,dmax;\n int ncols,nrows;\n\n ncols = bin->ncols;\n nrows = bin->nrows;\n n = ncols*nrows;\n\n bin2 = CopyImage(bin);\n p = bin2->tbrow[nrows-1];\n for(i=0; i<ncols; i++){\n bin2->val[i] = 0;\n bin2->val[p+i] = 0;\n }\n for(i=0; i<nrows; i++){\n p = bin2->tbrow[i];\n bin2->val[p] = 0;\n bin2->val[p+ncols-1] = 0;\n }\n\n //------ Euclidean distance transform ----------------\n edt2 = TSEDistTrans(bin2);\n\n dmax = MaximumValue(edt2);\n lt_sqrt = (float *)malloc((dmax+1)*sizeof(float));\n for(i=0;i<=dmax;i++)\n lt_sqrt[i] = sqrtf((float)i);\n\n edt = CreateImage(ncols,nrows);\n for(p=0; p<n; p++){\n d = edt2->val[p];\n d = ROUND(lt_sqrt[d]);\n edt->val[p] = d;\n }\n\n //---------------------------------------------------\n\n ts = (TensorScale *)malloc(sizeof(TensorScale));\n ts->orientation = CreateDImage(ncols,nrows);\n ts->anisotropy = CreateDImage(ncols,nrows);\n ts->thickness = CreateDImage(ncols,nrows);\n ts->m_pairs = m_pairs;\n\n tau = (Vector *)malloc(sizeof(Vector)*m_pairs);\n epsilon = (Point *)malloc(sizeof(Point)*m_pairs);\n\n teta = 0.0;\n for(i=0;i<m_pairs;i++){\n tau[i].x = cosf(teta);\n tau[i].y = sinf(teta);\n tau[i].z = 0.0;\n\n teta += ((float)PI/m_pairs); \n }\n\n for(i=1;i<nrows-1;i++){\n for(j=1;j<ncols-1;j++){\n p = bin2->tbrow[i]+j;\n if(bin2->val[p] == 0) continue;\n\n vi = edt->val[p];\n //--------- Sample lines --------------------------------------\n gSxy = gSy2_x2 = 0.0;\n xc = j+0.5; yc = i+0.5;\n for(k=0;k<m_pairs;k++){\n\ttaux = tau[k].x;\n\ttauy = tau[k].y;\n\tv = vi;\n\td1 = d2 = 0;\n\n\twhile(1){\n\t x = v*taux;\n\t y = v*tauy;\n\n\t if(d1==0){\n\t d1 = edt->val[(int)(xc+x) + edt->tbrow[(int)(yc+y)]];\n\t if(d1 == 0)\n\t break;\n\t }\n\t \n\t if(d2==0){\n\t d2 = edt->val[(int)(xc-x) + edt->tbrow[(int)(yc-y)]];\n\t if(d2 == 0)\n\t break;\n\t }\n\n\t d = (d1<d2)?d1:d2;\n\t d1 -= d;\n\t d2 -= d;\n\t v += d;\n\t}\n\n\tepsilon[k].x = x;\n\tepsilon[k].y = -y;\n\n\tgSxy -= x*y; //gSxy += x*(-y);\n\tgSy2_x2 += (y+x)*(y-x); //(y*y-x*x);\n }\n\n //-------------------- TETA -----------------------------------\n \n if(gSy2_x2==0.0){ \n\tif(gSxy>0.0) teta=PI/2.0;\n\telse teta=-PI/2.0;\n }\n else{\n\tteta = atanf((gSxy+gSxy)/gSy2_x2);\n\t\n\tif(gSxy<0.0 && teta<0.0) teta+=PI;\n\telse if(gSxy>0.0 && teta>0.0) teta-=PI;\n\telse if(teta==0.0 && gSy2_x2>0.0) teta=PI;\n }\n teta /= 2.0;\n\n //----------------- A & B ---------------------------------\n b2 = (float)edt2->val[p];\n b1 = lt_sqrt[(int)b2];\n \n acc = wt = 0.0;\n sin_teta = sinf(teta);\n cos_teta = cosf(teta);\n for(k=0;k<m_pairs;k++){\n\tx = epsilon[k].x;\n\ty = epsilon[k].y;\n\t\n\tv1 = y*cos_teta + x*sin_teta;\n\tu1 = x*cos_teta - y*sin_teta;\n\t\n\tv2 = v1*v1;\n\tu2 = u1*u1;\n\n\tif(v2<b2){\n\t aa = b2*u2/(b2-v2);\n\t if(aa>=b2){\n\t w = (v1<0.0)?(b1+v1):(b1-v1);\n\t acc += w*aa;\n\t wt += w;\n\t }\n\t}\n }\n\n if(wt>0.0)\n\ta2 = acc/wt;\n else\n\ta2 = b2;\n\n aux = 1.0-b2/a2;\n if(aux<0.0) aux = 0.0;\n ts->anisotropy->val[p] = sqrtf(aux);\n ts->thickness->val[p] = b1; //sqrtf(b2);\n\n if(teta<0.0) teta+=(float)PI;\n if(teta>PI) teta = PI;\n teta = PI-teta;\n\n ts->orientation->val[p] = teta;\n }\n }\n\n free(tau);\n free(epsilon);\n free(lt_sqrt);\n DestroyImage(&edt);\n DestroyImage(&edt2);\n DestroyImage(&bin2);\n\n return ts;\n}\n\nvoid DestroyTensorScale(TensorScale **ts){\n if(*ts != NULL){\n DestroyDImage(&((*ts)->orientation));\n DestroyDImage(&((*ts)->anisotropy));\n DestroyDImage(&((*ts)->thickness));\n free(*ts);\n }\n *ts = NULL;\n}\n\nfloat *TSOrientationHistogram(TensorScale *ts){\n float *hist;\n float ratio,sum;\n double an,th;\n int w,h,i,j,p,bin;\n \n ratio = (float)HISTOGRAMSIZE/PI;\n hist = (float *)malloc(sizeof(float)*(HISTOGRAMSIZE+1));\n memset(hist, 0, sizeof(float)*(HISTOGRAMSIZE+1));\n w = ts->anisotropy->ncols;\n h = ts->anisotropy->nrows;\n\n for(i=0; i<h; i++){\n for(j=0; j<w; j++){\n p = ts->anisotropy->tbrow[i]+j;\n an = ts->anisotropy->val[p];\n th = ts->thickness->val[p];\n\n if(th>0.0){\n\tbin = ROUND(ts->orientation->val[p]*ratio); \n\thist[bin] += an;\n }\n }\n }\n hist[0] += hist[HISTOGRAMSIZE];\n hist[HISTOGRAMSIZE] = 0.0;\n\n //Normaliza�ao do histograma\n sum = 0.0;\n for(i=0;i<HISTOGRAMSIZE;i++)\n sum += hist[i];\n for(i=0;i<HISTOGRAMSIZE;i++)\n hist[i] /= sum;\n\n return hist;\n}\n\nImage *TSEDistTrans(Image *bin){\n Image *Dx=NULL,*Dy=NULL,*cost;\n Queue *Q=NULL;\n int i,p,q,n;\n Pixel u,v;\n int *sq=NULL,tmp=INT_MAX,dx,dy;\n AdjRel *A;\n\n n = MAX(bin->ncols,bin->nrows);\n sq = AllocIntArray(n);\n for (i=0; i < n; i++) \n sq[i]=i*i;\n\n A = Circular(1.5);\n cost = CreateImage(bin->ncols, bin->nrows);\n Dx = CreateImage(bin->ncols,bin->nrows);\n Dy = CreateImage(bin->ncols,bin->nrows); \n n = bin->ncols*bin->nrows;\n Q = CreateQueue(bin->ncols+bin->nrows,n);\n\n for(p = 0; p < n; p++){\n if(bin->val[p] > 0)\n cost->val[p] = INT_MAX;\t \n else{\n cost->val[p]=0; \n InsertQueue(Q,cost->val[p]%Q->C.nbuckets,p);\n }\n }\n \n while(!EmptyQueue(Q)) {\n p = RemoveQueue(Q);\n u.x = p%bin->ncols;\n u.y = p/bin->ncols;\n for (i=1; i < A->n; i++){\n v.x = u.x + A->dx[i];\n v.y = u.y + A->dy[i];\n if(ValidPixel(bin,v.x,v.y)){\n\tq = v.x + bin->tbrow[v.y];\n\tif (cost->val[p] < cost->val[q]){\n\t dx = Dx->val[p] + abs(v.x-u.x);\n\t dy = Dy->val[p] + abs(v.y-u.y);\n\t tmp = sq[dx] + sq[dy];\n\t if (tmp < cost->val[q]){\n\t if (cost->val[q] == INT_MAX)\n\t InsertQueue(Q,tmp%Q->C.nbuckets,q);\n\t else\n\t UpdateQueue(Q,q,cost->val[q]%Q->C.nbuckets,tmp%Q->C.nbuckets);\n\t cost->val[q] = tmp;\n\t Dx->val[q] = dx;\n\t Dy->val[q] = dy;\n\t }\n\t}\n }\n }\n }\n\n free(sq);\n DestroyQueue(&Q);\n DestroyAdjRel(&A);\n DestroyImage(&Dx);\n DestroyImage(&Dy);\n \n return(cost);\n}\n\ndouble TensorScale_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2){\n\tdouble result;\n\tint offset;\n\n\tresult = (double) TSHistogramMatch(fv1, fv2, &offset);\n\n\treturn (1-result);\n}\n\nfloat TSHistogramMatch(FeatureVector1D *fv1, FeatureVector1D *fv2, int *offset){\n float *newhist;\n float *newh1,*newh2, *hist1, *hist2;\n int i,j,p;\n float max,correlacao;\n float score;\n float dabs,aux;\n //int maxoffset;\n FILE *file1,*file2,*file3;\n\n newhist = (float *)malloc(sizeof(float)*2*HISTOGRAMSIZE);\n newh1=newhist;\n newh2=newhist+HISTOGRAMSIZE;\n\n hist1 = (float *)malloc(sizeof(float)*HISTOGRAMSIZE+1);\n hist2 = (float *)malloc(sizeof(float)*HISTOGRAMSIZE+1);\n \n for(i = 0; i < HISTOGRAMSIZE+1; i++)\n {\n\t hist1[i] = fv1->X[i];\n\t hist2[i] = fv2->X[i];\n }\n\n\n //Ajuste no histograma\n newh1[0] = (2.0*hist1[0]+hist1[HISTOGRAMSIZE-1]+hist1[1])/4.0;\n newh2[0] = (2.0*hist2[0]+hist2[HISTOGRAMSIZE-1]+hist2[1])/4.0;\n for(i=1;i<HISTOGRAMSIZE-1;i++){\n newh1[i] = (2.0*hist1[i]+hist1[i-1]+hist1[i+1])/4.0;\n newh2[i] = (2.0*hist2[i]+hist2[i-1]+hist2[i+1])/4.0;\n }\n newh1[HISTOGRAMSIZE-1] = (2.0*hist1[HISTOGRAMSIZE-1]+hist1[HISTOGRAMSIZE-2]+hist1[0])/4.0;\n newh2[HISTOGRAMSIZE-1] = (2.0*hist2[HISTOGRAMSIZE-1]+hist2[HISTOGRAMSIZE-2]+hist2[0])/4.0;\n\n //Correlacao\n //maxoffset = ROUND((24.0*HISTOGRAMSIZE)/180.0); // 24 graus.\n *offset=0;\n max=0.0;\n for(i=0;i<HISTOGRAMSIZE;i++){\n correlacao=0.0;\n //if(i==maxoffset) i=HISTOGRAMSIZE-maxoffset; //angulo entre -24 e 24.\n for(p=i,j=0;j<HISTOGRAMSIZE;j++,p++){\n if(p==HISTOGRAMSIZE) p=0;\n correlacao+=(newh1[p]*newh2[j]);\n }\n\n if(correlacao>max){\n max=correlacao;\n *offset=i;\n }\n }\n\n file1 = fopen(\"histogram1d.txt\",\"w\");\n file2 = fopen(\"histogram2.txt\",\"w\");\n file3 = fopen(\"histogram1.txt\",\"w\");\n dabs = 0.0;\n for(p=*offset,j=0;j<HISTOGRAMSIZE;j++,p++){\n if(p==HISTOGRAMSIZE) p=0;\n\n fprintf(file1,\"%d %f\\n\",j,newh1[p]);\n fprintf(file2,\"%d %f\\n\",j,newh2[j]);\n fprintf(file3,\"%d %f\\n\",j,newh1[j]);\n\n aux=(newh1[p]-newh2[j]);\n aux=(aux<0.0)?(-aux):(aux);\n dabs+=aux;\n }\n score = 1.0 - dabs;\n\n free(newhist);\n free(hist1);\n free(hist2);\n fclose(file1);\n fclose(file2);\n fclose(file3);\n\n return score;\n}\n\n/* Multiscale Fractal Dimension*/\nCurve *PolynomToFractalCurve(Polynom *P, double lower, double higher, int nbins){\n int i;\n Curve *descriptor = NULL;\n\n descriptor = SamplePolynom(P, lower, higher, nbins);\n for (i=0; i < descriptor->n; i++) \n descriptor->Y[i] = 2.0 - descriptor->Y[i];\n \n return descriptor;\n}\n\nCurve *ContourMSFractal(Image *in)\n{\n Image *cont = NULL;\n Curve *descriptor = NULL;\n Polynom *P;\n double lower = 1.0;\n double higher = 5.0;\n int nbins = 100;\n int degree = 10;\n \n cont = LabelContour(in);\n P = MSFractal(cont, 256, degree, lower, higher, 0, 0.0, 6.0);\n descriptor = PolynomToFractalCurve(P, lower, higher, nbins);\n \n DestroyPolynom(&P);\n DestroyImage(&cont);\n return (descriptor);\n}\n\nFeatureVector1D *MS_ExtractionAlgorithm(Image *img){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n\n curve = ContourMSFractal(img);\n fv = CurveTo1DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\ndouble MS_DistanceAlgorithm(FeatureVector1D *fv1, FeatureVector1D *fv2){\n\treturn EuclideanDistance(fv1, fv2);\n}\n\n/* Contour Saliences */\nvoid DescInvertXY(FeatureVector2D *desc)\n{\n double tmp;\n int i;\n for (i=0; i<desc->n; i++){\n tmp = desc->X[i];\n desc->X[i] = desc->Y[i];\n desc->Y[i] = tmp;\n }\n}\n\nFeatureVector2D *CreateFeatureVector2D(int n)\n{\n FeatureVector2D *desc=NULL;\n \n desc = (FeatureVector2D *) calloc(1,sizeof(FeatureVector2D));\n if (desc != NULL) {\n desc->X = AllocDoubleArray(n);\n desc->Y = AllocDoubleArray(n);\n desc->n = n;\n } else {\n Error(MSG1,\"CreateFeatureVector\");\n }\n return(desc);\n}\n\ndouble ContSalieDistance(FeatureVector2D *D1, FeatureVector2D *D2){\n \n int i;\n int n = MIN(D1->n, D2->n);\n double deltaX, dist, eps = 0.2;\n \n dist = 0.0;\n for (i=0; i<n; i++){\n deltaX = fabs(D1->X[i]-D2->X[i]);\n if (deltaX<=eps){\n dist = dist + sqrt(pow(D1->X[i] - D2->X[i],2)+\n\t\t\t pow(D1->Y[i] - D2->Y[i],2));\n }else\n dist = dist + fabs(D1->Y[i]) + fabs(D2->Y[i]); \n }\n \n for (i=n; i<D1->n; i++){\n dist = dist + fabs(D1->Y[i]);\n }\n \n for (i=n; i<D2->n; i++){\n dist = dist + fabs(D2->Y[i]);\n }\n \n return dist;\n}\n\nFeatureVector2D *CircularRotation(FeatureVector2D *descriptor, double delta){\n \n FeatureVector2D *c = CreateFeatureVector2D(descriptor->n);\n int i;\n \n for (i=0; i<descriptor->n; i++){\n c->Y[i] = descriptor->X[i] + delta;\n if (c->Y[i]<0.0)\n c->Y[i] = 1.0 + c->Y[i];\n if (c->Y[i]>1.0)\n c->Y[i] = 1.0 - c->Y[i];\n c->X[i] = descriptor->Y[i];\n }\n SortFeatureVector2D(c, 0, (c->n-1), INCREASING);\n DescInvertXY(c);\n return c;\n}\n\nvoid DestroyFeatureVector2D(FeatureVector2D **desc)\n{\n FeatureVector2D *aux;\n \n aux = *desc;\n if (aux != NULL){\n if (aux->X != NULL) free(aux->X);\n if (aux->Y != NULL) free(aux->Y);\n free(aux);\n *desc = NULL;\n }\n}\n\nvoid SortFeatureVector2D(FeatureVector2D *desc, int left, int right, char order)\n{\n int pivot;\n \n if (left < right) {\n pivot = PartFeatureVector2D(desc,left,right,order);\n SortFeatureVector2D(desc,left,pivot-1,order);\n SortFeatureVector2D(desc,pivot+1,right,order); \n }\n}\n\nint PartFeatureVector2D (FeatureVector2D *desc, int left, int right, char order)\n{\n double y;\n int i;\n double X,Y;\n \n y = desc->Y[left];\n i = left;\n \n do {\n if (order == INCREASING){\n while ((desc->Y[left] <= y)&&(left <= right)) left++;\n while (desc->Y[right] > y) right--;\n } else { /* order = DECREASING */\n while ((desc->Y[left] >= y)&&(left <= right)) left++;\n while (desc->Y[right] < y) right--;\n }\n if (left < right){\n X = desc->X[left];\n Y = desc->Y[left];\n desc->X[left] = desc->X[right];\n desc->Y[left] = desc->Y[right];\n desc->X[right] = X;\n desc->Y[right] = Y;\n left++; right--;\n }\n } while (left <= right);\n \n left = i;\n \n if (left != right){\n X = desc->X[left];\n Y = desc->Y[left];\n desc->X[left] = desc->X[right];\n desc->Y[left] = desc->Y[right];\n desc->X[right] = X;\n desc->Y[right] = Y;\n }\n \n return (right);\n}\n\nFeatureVector2D *CopyFeatureVector2D(FeatureVector2D *desc)\n{\n FeatureVector2D *descc;\n \n descc = CreateFeatureVector2D(desc->n);\n memcpy(descc->X,desc->X,desc->n*sizeof(double));\n memcpy(descc->Y,desc->Y,desc->n*sizeof(double));\n \n return(descc);\n}\n\ndouble Matching(FeatureVector2D *descriptor1, FeatureVector2D *descriptor2, int order)\n{\n FeatureVector2D *d1 = NULL;\n FeatureVector2D *d2 = NULL;\n FeatureVector2D *D1 = NULL;\n FeatureVector2D *D2 = NULL;\n double max1, max2;\n double dist, distance = INT_MAX;\n int i,j;\n \n d1 = CopyFeatureVector2D(descriptor1);\n d2 = CopyFeatureVector2D(descriptor2);\n SortFeatureVector2D(d1, 0, (d1->n - 1), order);\n SortFeatureVector2D(d2, 0, (d2->n - 1), order);\n \n max1 = fabs(d1->Y[0]);\n max2 = fabs(d2->Y[0]);\n \n i = 0;\n while ((i<d1->n)&&\n\t ((fabs(d1->Y[i]) - max1)<=(fabs(0.2* max1)))){\n j = 0;\n while((j<d2->n)&&\n\t ((fabs(d2->Y[j]) - max2)<=(fabs(0.2 * max2)))){\n if (d1->Y[i]*d2->Y[j]>0.0){\n\tD1 = CircularRotation(descriptor1, -d1->X[i]);\n\tD2 = CircularRotation(descriptor2, -d2->X[j]);\n\tdist = ContSalieDistance(D1, D2);\n\t//WriteInstance(i, j, descriptor1, descriptor2, d1, d2, D1, D2, -d1->X[i], -d2->X[j], dist);\n\tif (dist < distance)\n\t distance = dist;\n\tDestroyFeatureVector2D(&D1);\n\tDestroyFeatureVector2D(&D2);\n }\n j++;\n }\n i++;\n }\n \n DestroyFeatureVector2D(&d1);\n DestroyFeatureVector2D(&d2);\n return distance;\n}\n\nvoid iftFastDilation(AnnImg *aimg, AdjRel *A)\n{\n Image *Dx=NULL,*Dy=NULL;\n Queue *Q=NULL;\n Heap *H=NULL;\n int i,p,q,n,sz;\n Pixel u,v;\n int *sq=NULL,tmp=INT_MAX,dx,dy;\n bool cuisenaire;\n AdjRel *A8=Circular(1.5),*CA=NULL;\n char *color=NULL;\n\n if (aimg->seed == NULL)\n return;\n \n n = MAX(aimg->img->ncols,aimg->img->nrows);\n sq = AllocIntArray(n);\n for (i=0; i < n; i++) \n sq[i]=i*i;\n \n Dx = CreateImage(aimg->img->ncols,aimg->img->nrows);\n Dy = CreateImage(aimg->img->ncols,aimg->img->nrows);\n n = aimg->img->ncols*aimg->img->nrows;\n color = AllocCharArray(n);\n sz = FrameSize(A);\n Q = CreateQueue(2*sz*(sz+aimg->img->ncols+aimg->img->nrows),n);\n \n /* Compute IFT with 8-Adjacency */\n \n while (aimg->seed != NULL){\n p=RemoveSet(&(aimg->seed));\n InsertQueue(Q,aimg->cost->val[p]%Q->C.nbuckets,p);\n color[p]=GRAY;\n }\n \n while(!EmptyQueue(Q)) {\n p=RemoveQueue(Q);\n color[p]=BLACK;\n u.x = p%aimg->img->ncols;\n u.y = p/aimg->img->ncols;\n cuisenaire=true;\n for (i=1; i < A8->n; i++){\n v.x = u.x + A8->dx[i];\n v.y = u.y + A8->dy[i];\n if (ValidPixel(aimg->img,v.x,v.y)){\n\tq = v.x + aimg->img->tbrow[v.y];\n\tif (color[q] != BLACK){\n\t dx = Dx->val[p] + abs(v.x-u.x);\n\t dy = Dy->val[p] + abs(v.y-u.y);\n\t tmp = sq[dx] + sq[dy];\n\t if (tmp < aimg->cost->val[q]){\n\t if (color[q] == WHITE){\n\t InsertQueue(Q,tmp%Q->C.nbuckets,q);\n\t color[q] = GRAY;\n\t }else\n\t\tUpdateQueue(Q,q,aimg->cost->val[q]%Q->C.nbuckets,tmp%Q->C.nbuckets);\n\t aimg->cost->val[q] = tmp;\n\t aimg->pred->val[q] = p;\n\t aimg->label->val[q] = aimg->label->val[p];\n\t Dx->val[q] = dx;\n\t Dy->val[q] = dy;\n\t cuisenaire = false;\n\t }\n\t} \n } \n }\n if (cuisenaire) \n InsertSet(&(aimg->seed),p); \n }\n \n DestroyQueue(&Q);\n free(color);\n \n /* Compute IFT with Complementary Adjacency */\n \n if (A8->n < A->n) {\n \n CA = ComplAdj(A8,A);\n H = CreateHeap(n,aimg->cost->val);\n\n while (aimg->seed != NULL){\n p=RemoveSet(&(aimg->seed));\n InsertHeap(H,p);\n }\n\n while(!HeapIsEmpty(H)) {\n RemoveHeap(H,&p);\n u.x = p%aimg->img->ncols;\n u.y = p/aimg->img->ncols;\n for (i=0; i < CA->n; i++){\n\tv.x = u.x + CA->dx[i];\n\tv.y = u.y + CA->dy[i];\n\tif (ValidPixel(aimg->img,v.x,v.y)){\n\t q = v.x + aimg->img->tbrow[v.y];\n\t if (color[q]!=BLACK){\n\t dx = Dx->val[p] + abs(v.x-u.x);\n\t dy = Dy->val[p] + abs(v.y-u.y);\n\t tmp = sq[dx] + sq[dy];\n\t if (tmp < aimg->cost->val[q]) \n\t {\n\t\taimg->cost->val[q] = tmp;\n\t\taimg->pred->val[q] = p;\n\t\taimg->label->val[q] = aimg->label->val[p];\n\t\tDx->val[q] = dx;\n\t\tDy->val[q] = dy;\n\t\tif (color[q] == WHITE){\n\t\t InsertHeap(H,q);\n\t\t}else\n\t\t GoUpHeap(H,H->pos[q]);\n\t }\n\t }\n\t} \n }\n }\n DestroyAdjRel(&CA);\n DestroyHeap(&H);\n }\n\n DestroyAdjRel(&A8);\n\n free(sq);\n DestroyImage(&Dx);\n DestroyImage(&Dy);\n}\n\nCurve3D *SkelCont(Image *bin, int maxdist, int threshold, int angle, char side) { \n Image *contour=NULL;\n Image *msskel=NULL;\n Image *skel=NULL;\n Image *bin_skel=NULL;\n AdjRel *A=NULL;\n AnnImg *aimg=NULL;\n Curve3D *contour_salie = NULL;\n Curve3D *skelsaliences = NULL;\n Curve3D *saliences = NULL;\n Pixel left, right;\n int i, j, p, q, n, label, imax, maxcont, max, min, imin, x, y, ne, ni, delta = 3; \n double sum;\n \n contour = LabelContPixel(bin);\n aimg = Annotate(bin,NULL,contour); \n A = Circular(1.5);\n iftFastDilation(aimg,A);\n \n msskel = MSSkel(bin, side);\n skel = Skeleton(msskel, threshold);\n bin_skel = Skeleton(msskel, threshold);\n n = bin->ncols*bin->nrows;\n contour_salie = Saliences(bin, maxdist);\n\n maxcont = MaximumValue(contour);\n\n for (p=0; p<n; p++){\n if (skel->val[p]!=0){\n q = Seed(aimg->pred, p);\n label = (aimg->label->val[q] + msskel->val[p]/2 + maxcont)%maxcont;\n skel->val[p] = label;\n if (side == INTERIOR){\n\tif (contour_salie->Z[label-1]<0.0){\n\t max = INT_MIN;\n\t imax = 0;\n\t for (j=-5; j<5; j++){\n\t if (contour_salie->Z[(label-1+j+contour_salie->n)%contour_salie->n] > max){\n\t imax = (label-1+j+contour_salie->n)%contour_salie->n;\n\t max = contour_salie->Z[imax];\n\t }\n\t }\n\t skel->val[p] = imax + 1;\n\t}\n\telse {\n\t skel->val[p] = MAX(label,1);\n\t}\n }\n else{ \n\tif (side == EXTERIOR){\n\t if (contour_salie->Z[label-1]>0.0){\n\t min = INT_MAX;\n\t imin = 0;\n\t for (j=-5; j<5; j++){\n\t if (contour_salie->Z[(label-1+j+contour_salie->n)%contour_salie->n] < min){\n\t\timin = (label-1+j+contour_salie->n)%contour_salie->n;\n\t\tmin = contour_salie->Z[imin];\n\t }\n\t }\n\t skel->val[p] = imin + 1;\n\t }\n\t else {\n\t skel->val[p] = MAX(label, 1);\n\t }\n\t}\n }\n }\n }\n \n skelsaliences = SkelSaliences(bin_skel, maxdist, angle); \n \n if (side==EXTERIOR){\n left.x = bin->ncols-1;\n left.y = bin->nrows-1;\n right.x = 0;\n right.y = 0;\n for (y=0; y < bin->nrows; y++)\n for (x=0; x < bin->ncols; x++){\n\tif (bin->val[x+bin->tbrow[y]] > 0){\n\t if (x < left.x)\n\t left.x = x;\n\t if (y < left.y)\n\t left.y = y;\n\t if (x > right.x)\n\t right.x = x;\n\t if (y > right.y)\n\t right.y = y;\t\n\t}\n }\n \n for (i=0; i<skelsaliences->n; i++){\n if ((skelsaliences->X[i]<left.x)||\n\t (skelsaliences->X[i]>right.x)||\n\t (skelsaliences->Y[i]<left.y)||\n\t (skelsaliences->Y[i]>right.y))\n\tskelsaliences->Z[i] = 0.0;\n }\n }\n \n SortCurve3D(skelsaliences, 0, (skelsaliences->n - 2), DECREASING);\n i=0;\n while (skelsaliences->Z[i]!=0.0)\n i++;\n saliences = CreateCurve3D(i); \n for (i=0; i< saliences->n; i++){\n if (skelsaliences->Z[i]!=0.0){\n p = (int)skelsaliences->X[i]+bin->tbrow[(int)skelsaliences->Y[i]];\n saliences->X[i] = contour_salie->X[skel->val[p]-1];\n saliences->Y[i] = contour_salie->Y[skel->val[p]-1];\n if (side==INTERIOR){\n\tsum = 0.0;\n\tfor (j=-delta; j<=delta; j++){\n\t q = ((skel->val[p]-1) + j + maxcont) % maxcont;\n\t if (contour_salie->Z[q]>0.0)\n\t sum += contour_salie->Z[q];\n\t}\n\tsaliences->Z[i] = sum;\n } \n else{\n\tsum = 0.0;\n\tfor (j=-delta; j<=delta; j++){\n\t q = ((skel->val[p]-1) + j + maxcont) % maxcont;\n\t if (contour_salie->Z[q]<0.0)\n\t sum += contour_salie->Z[q];\n\t}\n\tsaliences->Z[i] = sum;\n }\n } \n }\n\n ne = 0;\n ni = 0;\n for (i=0; i<saliences->n; i++){\n if (saliences->Z[i]>0.0) \n ni += saliences->Z[i];\n else\n if (saliences->Z[i]<0.0) \n\tne += fabs(saliences->Z[i]);\n }\n\n for (i=0; i<saliences->n; i++){\n if (saliences->Z[i]>0.0) \n saliences->Z[i]/=ni;\n else\n if (saliences->Z[i]<0.0) \n\tsaliences->Z[i]/=ne;\n }\n\n \n DestroyImage(&contour);\n DestroyImage(&msskel);\n DestroyImage(&skel);\n DestroyImage(&bin_skel);\n DestroyCurve3D(&contour_salie);\n DestroyCurve3D(&skelsaliences);\n DestroyAdjRel(&A);\n DeAnnotate(&aimg);\n return(saliences);\n\n}\n\nCurve3D *iftContourSaliences(Image *bin,int threshold_in,int threshold_out,int angle_in,int angle_out)\n{\n\n Curve3D *saliences = NULL;\n Curve3D *convex_saliences = NULL;\n Curve3D *concave_saliences = NULL;\n int i;\n int maxdist = 10;\n \n convex_saliences = SkelCont(bin,maxdist,threshold_in, angle_in, INTERIOR);\n concave_saliences = SkelCont(bin, maxdist, threshold_out, angle_out, EXTERIOR);\n saliences = CreateCurve3D(convex_saliences->n + concave_saliences->n);\n for (i=0; i<convex_saliences->n; i++){\n saliences->X[i] = convex_saliences->X[i];\n saliences->Y[i] = convex_saliences->Y[i];\n saliences->Z[i] = convex_saliences->Z[i];\n }\n for (i=convex_saliences->n; i<saliences->n;i++){\n saliences->X[i] = concave_saliences->X[i-convex_saliences->n];\n saliences->Y[i] = concave_saliences->Y[i-convex_saliences->n];\n saliences->Z[i] = concave_saliences->Z[i-convex_saliences->n];\n }\n \n DestroyCurve3D(&convex_saliences);\n DestroyCurve3D(&concave_saliences);\n return saliences;\n}\n\nCurve *ContourSaliences(Image *in)\n{\n Curve3D *saliences = NULL;\n Curve *descriptor = NULL;\n Image *contour = NULL;\n int i, p, max;\n \n contour = LabelContPixel(in);\n saliences = iftContourSaliences(in, 5, 20, 50, 110); \n \n descriptor = CreateCurve(saliences->n);\n max = MaximumValue(contour);\n for (i=0; i<saliences->n; i++){\n descriptor->X[i] = saliences->Z[i];\n p = (int)saliences->X[i]+contour->tbrow[(int)saliences->Y[i]];\n descriptor->Y[i] = (double)(((contour->val[p])-1))/max;\n }\n SortCurve(descriptor, 0, (descriptor->n - 1), INCREASING);\n InvertXY(descriptor); \n \n DestroyCurve3D(&saliences);\n DestroyImage(&contour);\n \n return (descriptor);\n}\n\nvoid WriteFeatureVector2D(FeatureVector2D *desc,char *filename)\n{\n FILE *fp;\n int i;\n \n fp = fopen(filename,\"w\");\n if (fp == NULL){\n fprintf(stderr,\"Cannot open %s\\n\",filename);\n exit(-1);\n }\n for (i=0; i < desc->n; i++)\n fprintf(fp,\"%f\\t%f\\n\",desc->X[i],desc->Y[i]);\n \n fclose(fp);\n}\n\nFeatureVector2D *CS_ExtractionAlgorithm(Image *img){\n Curve *curve = NULL;\n FeatureVector2D *fv = NULL;\n \n curve = ContourSaliences(img);\n fv = CurveTo2DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\ndouble CS_DistanceAlgorithm(FeatureVector2D *descriptor1, FeatureVector2D *descriptor2){\n double convex_distance = INT_MIN;\n double concave_distance = INT_MIN;\n \n convex_distance = Matching(descriptor1, descriptor2, DECREASING);\n concave_distance = Matching(descriptor1, descriptor2, INCREASING);\n return(MIN(convex_distance, concave_distance));\n}\n\n/* Segment Saliences */\nCurve *SS_ExtractionAlgorithm_(Image *in, int maxdist, int nsamples, int side){\n Curve *inner = NULL;\n Curve *outer = NULL;\n Curve *diff = NULL;\n Curve *ninner = NULL;\n Curve *nouter = NULL;\n Curve *ndiff = NULL;\n Curve *output = NULL;\n\n Image *mbb = NULL;\n Image *bin = NULL;\n Image *contour = NULL;\n Image *segments = NULL;\n \n AdjRel *A=NULL;\n AnnImg *aimg= NULL;\n \n int p,i,Lmax, maxcost = maxdist*maxdist;\n double nin, nout, maxin, maxout;\n\n mbb = MBB(in);\n bin = AddFrame(mbb,maxdist,0);\n\n DestroyImage(&mbb);\n \n segments = LabelContPixel(bin);\n\n /* Compute Euclidean IFT */\n contour = LabelContPixel(bin);\n \n aimg = Annotate(bin,NULL,contour); \n A = Circular(1.5);\n iftDilation(aimg,A); \n \n Lmax = MaximumValue(aimg->label);\n //printf(\"Lmax = %d\\n\", Lmax);\n inner = CreateCurve(Lmax);\n outer = CreateCurve(Lmax);\n diff = CreateCurve(Lmax);\n \n for (i=0; i<Lmax; i++){\n diff->X[i] = inner->X[i] = outer->X[i]= (double)(i*nsamples)/Lmax;\n } \n \n /* Compute influence areas */ \n nin = nout = 0.0;\n for (p=0; p < bin->ncols*bin->nrows; p++){\n if (segments->val[p] != 0){\n segments->val[p]=((((segments->val[p]*nsamples)/Lmax))/*%2*/)+1;\n }\n if ((aimg->label->val[p] > 0)&&(aimg->cost->val[p] <= maxcost)) {\n if (aimg->img->val[p] != 0){\n\tnin++;\n\tinner->Y[aimg->label->val[p]-1]++;\n } else {\n\tnout++;\n\touter->Y[aimg->label->val[p]-1]++;\n }\n }\n }\n \n maxin = INT_MIN;\n maxout = INT_MIN;\n for (i=0; i<Lmax; i++){\n if (inner->Y[i] > maxin){\n maxin = inner->Y[i];\n }\n if (outer->Y[i] > maxout){\n maxout = outer->Y[i];\n }\n }\n \n for (i=0; i<Lmax; i++){\n inner->Y[i] /= nin;\n outer->Y[i] /= nout;\n diff->Y[i] = outer->Y[i] - inner->Y[i];\n }\n \n ninner = CreateCurve(nsamples);\n nouter = CreateCurve(nsamples);\n ndiff = CreateCurve(nsamples);\n \n for (i=0; i<nsamples; i++){\n ninner->X[i] = nouter->X[i] = ndiff->X[i] = i;\n }\n for (i=0; i<Lmax; i++){\n ninner->Y[(int)inner->X[i]] += inner->Y[i];\n nouter->Y[(int)outer->X[i]] += outer->Y[i];\n }\n for (i=0; i<nsamples; i++){\n ndiff->Y[i] = nouter->Y[i] - ninner->Y[i];\n }\n \n \n if (side == INTERIOR){\n output = CopyCurve(ninner);\n }\n else if (side==EXTERIOR){\n output = CopyCurve(nouter);\n }\n else if (side == BOTH){\n output = CopyCurve(ndiff);\n }\n else{\n printf(\"Invalid \\\"side\\\" option <%d>\\n\", side);\n exit(-1);\n }\n \n DestroyImage(&segments);\n DestroyCurve(&ninner);\n DestroyCurve(&nouter);\n DestroyCurve(&ndiff);\n\n DestroyImage(&contour);\n DestroyAdjRel(&A);\n DeAnnotate(&aimg);\n\n DestroyImage(&bin);\n DestroyImage(&mbb);\n DestroyCurve(&inner);\n DestroyCurve(&outer);\n DestroyCurve(&diff);\n\n return output;\n}\n\nFeatureVector1D *SS_ExtractionAlgorithm(Image *img){\n Curve *curve = NULL;\n FeatureVector1D *fv = NULL;\n \n curve = SS_ExtractionAlgorithm_(img, 5, 100, BOTH);\n fv = CurveTo1DFeatureVector(curve);\n \n DestroyCurve(&curve);\n return fv;\n}\n\n/***************SS SIMILARITY ALGORITHM*********************/\ndouble SS_getMin(double Dist1, double Dist2, double Dist3){\n if((Dist1<=Dist2) && (Dist1<=Dist3)) \n return(Dist1);\n else if((Dist2<=Dist1) && (Dist2<=Dist3)) \n return(Dist2);\n //else if((Dist3<=Dist1) && (Dist3<=Dist2)) \n return(Dist3);\n}\n\ndouble SS_OCS(FeatureVector1D *fv1, FeatureVector1D *fv2){\n \n int i,j, dim1 = fv1->n, dim2 = fv2->n;\n double temp_dist;\n double penalty;\n double *DISTANCE = NULL;\n \n DISTANCE=(double *) calloc((dim1+1)*(dim2+1),sizeof(double));\n\n penalty=20.0;\n /* OPTIMAL CORRESPONDENCE OF STRINGS\n */\n DISTANCE[0*(dim2+1)+0]=0;\n for(j=1;j<=dim2;j++)\n DISTANCE[0*(dim2+1)+j]=j * penalty;\n \n for(i=1;i<=dim1;i++)\n DISTANCE[i*(dim2+1)+0]=i * penalty;\n \n for(i=1;i<=dim1;i++)\n for(j=1;j<=dim2;j++)\n if(abs(i-j) < (5)){\n\ttemp_dist=abs(fv1->X[i-1]-fv2->X[j-1]);\n\t\t\n\tDISTANCE[i*(dim2+1)+j]= temp_dist +\n\t SS_getMin(DISTANCE[(i-1)*(dim2+1)+(j-1)],\n\t\t DISTANCE[(i-1)*(dim2+1)+(j)] + penalty,\n\t\t DISTANCE[(i)*(dim2+1)+(j-1)] + penalty); \n }\n \n temp_dist = DISTANCE[(dim1)*(dim2+1)+(dim2)]/dim2;\n free(DISTANCE);\n \n return temp_dist;\n}\n\ndouble SS_OCSMatching(FeatureVector1D *fv_1, FeatureVector1D *fv_2){\n double distance,temp_dist;\n int i,k;\n FeatureVector1D *temp1, *temp2, *fv1, *fv2;\n \n fv1 = CreateFeatureVector1D(fv_1->n);\n fv2 = CreateFeatureVector1D(fv_2->n);\n for (i = 0; i<fv1->n; i++){\n fv1->X[i] = 100*fv_1->X[i];\n fv2->X[i] = 100*fv_2->X[i];\n }\n \n temp1 = CreateFeatureVector1D(fv2->n);\n temp2 = CreateFeatureVector1D(fv2->n);\n \n temp_dist=INT_MAX; \n for(k=0; k<fv2->n; k++){\n for(i=0;i<fv2->n;i++){\n temp2->X[i]=fv2->X[(i+k)%fv2->n];\n } \n distance= SS_OCS(fv1,temp2);\n if(temp_dist>distance) \n temp_dist=distance;\n }\n /***Taking the mirror of fv2 *****/\n for(i=0;i<fv2->n;i++){\n temp2->X[i]=fv2->X[(fv2->n-1)-i];\n }\n \n for(k=0; k<fv2->n; k++){\n for(i=0;i<fv2->n;i++){\n temp1->X[i]= temp2->X[(i+k)%fv2->n];\n }\n distance=SS_OCS(fv1,temp1);\n if(temp_dist>distance) \n temp_dist=distance;\n }\n \n distance=temp_dist;\n DestroyFeatureVector1D(&temp1);\n DestroyFeatureVector1D(&temp2);\n DestroyFeatureVector1D(&fv1);\n DestroyFeatureVector1D(&fv2);\n return(distance);\n}\n\ndouble SS_DistanceAlgorithm(FeatureVector1D *fv1d1, FeatureVector1D *fv1d2){\n \n double dist;\n\n dist = SS_OCSMatching(fv1d1, fv1d2);\n \n return dist;\n}\n\n/* Metrics to measure the similarity between feature vectors*/\ndouble EuclideanDistance(FeatureVector1D *v1, FeatureVector1D *v2) { \n int i;\n double sum = 0.0;\n double z = 0.0;\n \n for (i = 0; i < v1->n ; i++){\n z = v1->X[i] - v2->X[i]; \n sum += z*z;\n }\n sum = sqrtf(sum);\n return (sum);\n}\n\ndouble L1_Distance(FeatureVector1D *v1, FeatureVector1D *v2) { \n int i;\n double sum = 0.0;\n \n for (i = 0; i < v1->n ; i++){\n sum += fabs(v1->X[i] - v2->X[i]); \n }\n return (sum);\n}\n\ndouble dLog(FeatureVector1D *fv1, FeatureVector1D *fv2)\n{\n\tint i;\n\tdouble sum = 0.0;\n\tdouble q,d;\n\t\n\tfor(i = 0; i < fv1->n; i++)\n\t{\n\t\n\t\tif(fv1->X[i] == 0)\n\t\t\tq = 0;\n\t\telse\n\t\t\tif((fv1->X[i] > 0) && (fv1->X[i] <= 1))\n\t\t\t\tq = 1;\n\t\t\telse\n\t\t\t\tq = log10(fv1->X[i])/log10(2);\n\t\tif(fv2->X[i] == 0)\n\t\t\td = 0;\n\t\telse\n\t\t\tif((fv2->X[i] > 0) && (fv2->X[i] <= 1))\n\t\t\t\td = 1;\n\t\t\telse\n\t\t\t\td = log10(fv2->X[i])/log10(2);\n\t\t\n\t\tsum = sum + fabs(q-d);\n\t}\n\n\t\n\treturn sum;\n}\n" }, { "alpha_fraction": 0.6212624311447144, "alphanum_fraction": 0.6378737688064575, "avg_line_length": 17.8125, "blob_id": "a6549da711eddf4db3975ba3a5d9bef25c38eb02", "content_id": "8c9bbf96d1d4919aecba5b58b14ce72d9f3336a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "permissive", "max_line_length": 34, "num_lines": 16, "path": "/MO445-descriptors/examples/file_name.py", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "import os\nimport os.path as osp\nimport numpy as np\n\nbase = \"mpeg7_pgm/\"\nfiles_names = os.listdir(base)\nfiles_names.sort()\nf = open(\"names.txt\", \"w\")\nfor l in files_names:\n\tf.write(l[:-4]+ '\\n')\nf.close()\n\nf2 = open(\"paths.txt\", \"w\")\nfor l in files_names:\n\tf2.write(osp.join(base, l)+ '\\n')\nf2.close()\n" }, { "alpha_fraction": 0.646030068397522, "alphanum_fraction": 0.6603707671165466, "avg_line_length": 32.64706039428711, "blob_id": "927be9ad849e7f4bcb9b18aaeae8dcd9b9a1ae98", "content_id": "5e4518f89ebac6d9f78b6662404a7a457b764f22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2859, "license_type": "permissive", "max_line_length": 136, "num_lines": 85, "path": "/precision_recall.py", "repo_name": "RQuispeC/mo805-assignment7", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os.path as osp\nimport matplotlib.pyplot as plt\n\ndef read_file(file_path):\n f = open(file_path, \"r\")\n data = []\n for line in f:\n data.append(line)\n return np.array(data)\n\ndef load_data(names_file, features_dir):\n ms_features = []\n ss_features = []\n ids = []\n names = read_file(names_file)\n names.sort()\n last_class = \"\"\n class_id = 0\n for name in names:\n name = name[:-1] #remove new line end\n ms_file_path = osp.join(features_dir, name + \"_MS.txt\")\n ss_file_path = osp.join(features_dir, name + \"_SS.txt\")\n ms_data = read_file(ms_file_path)\n ss_data = read_file(ss_file_path)\n ms_data = ms_data.astype(np.float)\n ss_data = ss_data.astype(np.float)\n ms_features.append(ms_data)\n ss_features.append(ss_data)\n data_class = name.split(\"-\")[0]\n if data_class != last_class:\n last_class = data_class\n class_id += 1\n ids.append(class_id)\n\n ids = np.array(ids)\n names = np.array(names)\n ms_features = np.array(ms_features)\n ss_features = np.array(ss_features)\n return ids, names, ms_features, ss_features\n\ndef dist(query_features, gallery_features):\n matrix = np.zeros((len(query_features), len(gallery_features)))\n q_pow = np.sum(query_features * query_features, axis = 1)\n g_pow = np.sum(gallery_features * gallery_features, axis = 1)\n prod = 2 * np.dot(query_features, np.transpose(gallery_features))\n matrix += np.transpose(np.tile(q_pow, (len(g_pow), 1)))\n matrix += np.tile(g_pow, (len(q_pow), 1))\n matrix -= prod\n return matrix\n\ndef precision_recall(distmat, q_ids, g_ids, max_rank = 20, gt_samples = 20):\n indices = np.argsort(distmat, axis=1)\n matches = (g_ids[indices] == q_ids[:, np.newaxis]).astype(np.int32)\n cnt = matches.cumsum(axis = 1)[:, :max_rank]\n\n den_precision = np.tile(np.arange(1, max_rank + 1), (len(q_ids), 1))\n den_recall = np.full((len(q_ids), max_rank), gt_samples)\n precision = cnt / den_precision\n recall = cnt / den_recall\n\n precision = np.average(precision, axis = 0)\n recall = np.average(recall, axis = 0)\n return precision, recall\n\nif __name__ == '__main__':\n ids, names, ms_features, ss_features = load_data(\"MO445-descriptors/examples/names.txt\", \"MO445-descriptors/examples/mpeg7_features/\")\n\n max_rank = 1400\n dismat_MS = dist(ms_features, ms_features)\n pms, rms = precision_recall(dismat_MS, ids, ids, max_rank=max_rank)\n\n dismat_SS = dist(ss_features, ss_features)\n pss, rss = precision_recall(dismat_SS, ids, ids, max_rank=max_rank)\n\n print(\"Generating Precision x Recall curve\")\n plt.title('Methods comparison - MO805 - Assignment 7')\n plt.plot(rms, pms, label = 'Multiscale fractal dimension')\n plt.plot(rss, pss, label = 'Segment saliences')\n plt.legend(loc = 'upper right')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('precision')\n plt.xlabel('recall')\n plt.savefig(\"precision_recall.png\")" } ]
11
poonambisht07/Weatherapp
https://github.com/poonambisht07/Weatherapp
025ea11eaea3f01f616d54a00451dca9d2ceb50b
07ec37e4b8f40decec9f6db1cf333266ba55414f
69093d516c0ce0c61f0288ffadb86512baab14ea
refs/heads/main
2023-02-15T03:34:42.184696
2021-01-11T02:59:07
2021-01-11T02:59:07
328,532,851
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5224999785423279, "alphanum_fraction": 0.5400000214576721, "avg_line_length": 32.349998474121094, "blob_id": "a4bd17a3d60dd13a78cac9d281cae0f575b27f8f", "content_id": "0d67e230ad855c5f17d0e4b848ea23bd9459a5fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 117, "num_lines": 60, "path": "/myapp/views.py", "repo_name": "poonambisht07/Weatherapp", "src_encoding": "UTF-8", "text": "import requests\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom .models import *\nfrom datetime import datetime\n\n\n# Create your views here.\ndef index(request):\n url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=56a3270c46d91192ec67911b0a20de10'\n \n context = {}\n context['date_time'] = datetime.now().strftime(\" %A, %d %b %Y | %I:%M:%S %p\")\n\n if request.method == \"POST\":\n new_city = request.POST.get('cname').title()\n \n exist_city = City.objects.filter(name=new_city).count()\n if exist_city == 0:\n r = requests.get(url.format(new_city)).json()\n if r['cod'] == 200:\n nr = City.objects.create(name=new_city) \n nr.save()\n \n else:\n context['msg'] = \"City does not exist in the World!\"\n context['col'] = \"alert-danger\"\n else:\n context['msg'] = \"City Already Exists.\"\n context['col'] = \"alert-danger\"\n \n cities = City.objects.all().order_by('-id')\n \n weather_data = []\n \n for city in cities:\n r = requests.get(url.format(city)).json()\n \n city_weather = {\n 'id' : city.id,\n 'city' : city.name,\n 'temperature' : ((r['main']['temp']-32)*5//9),\n 'description' : (r['weather'][0]['description']).title(),\n 'icon' : r['weather'][0]['icon'],\n 'sunrise' :datetime.fromtimestamp(int(r['sys']['sunrise'])).strftime('%I:%M:%S %p'),\n 'sunset' :datetime.fromtimestamp(int(r['sys']['sunset'])).strftime('%I:%M:%S %p') \n }\n \n weather_data.append(city_weather) \n\n context['city_weather'] = weather_data \n print(context)\n \n return render(request, 'index.html', context)\n \n\ndef del_city(request):\n id = request.GET.get('id')\n City.objects.filter(id=id).delete()\n return redirect ('/')" } ]
1
LJJ-CDD/Python_work
https://github.com/LJJ-CDD/Python_work
ed2821e6682453052b0ab587492ce6f5c6dff670
cf4a565d07bf07756be492db801c0c78736b0b46
41f1b81ce6ce1cdb11b951b68ab1eadcb196d179
refs/heads/master
2023-06-01T13:20:28.687438
2021-06-16T11:15:38
2021-06-16T11:15:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5863128900527954, "alphanum_fraction": 0.6032794117927551, "avg_line_length": 43.57868194580078, "blob_id": "941029810340767d39de84ccb2832b32b5256a42", "content_id": "c24d4b2fea19635f11cde6b0dc29bff616819e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9576, "license_type": "no_license", "max_line_length": 118, "num_lines": 197, "path": "/3.RSA_work/main.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from logging import root\nimport tkinter # 窗体相关\nimport tkinter.messagebox # 实现提示框的组件\nimport tkinter.simpledialog # 对话框\nimport os # 路径\nimport sys # 操作系统交互\nfrom primes import Primes # 生成素数\nimport random\nimport rsa_test\n\ndef get_resource_path(relative_path): # 利用此函数实现资源路径的定位\n if getattr(sys, \"frozen\", False):\n base_path = sys._MEIPASS # 获取临时资源\n else:\n base_path = os.path.abspath(\".\") # 获取当前路径\n return os.path.join(base_path, relative_path) # 绝对路径\n\n#LOGO_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"RSA.ico\")) # 图标文件路径\n#IMAGES_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"Rsa_Icon.png\")) # 图片路径\nLOGO_PATH = get_resource_path(os.path.join(\"resources\", \"RSA.ico\")) # 图标文件路径\nIMAGES_PATH = get_resource_path(os.path.join(\"resources\", \"Rsa_Icon.png\")) # 图片路径\n\nimport tkinter # 导入相关的窗体模块\nclass MainForm: # 定义窗体类\n def __init__(self):\n self.root = tkinter.Tk() # 创建一个窗体\n self.root.title(\"CalculatorText\") # 设置标题\n self.root.iconbitmap(LOGO_PATH) # 设置logo的资源\n self.root.geometry(\"505x195\") # 设置初始化尺寸\n self.root[\"background\"] = \"LightSlateGray\" # 背景色\n #self.photo = tkinter.PhotoImage(file=IMAGES_PATH) # 定义图片组件\n self.root.protocol(\"WM_DELETE_WINDOW\", self.close_handle) # 窗体关闭事件\n\n self.content_P = tkinter.StringVar() # 素数P\n self.content_Q = tkinter.StringVar() # 素数Q\n self.content_expressly = tkinter.StringVar() # 明文\n self.content_ciphertext = tkinter.StringVar() # 密文\n self.content_PK = tkinter.StringVar() # 公钥\n self.content_SK = tkinter.StringVar() # 私钥\n self.content_N1 = tkinter.StringVar() # N1\n self.content_N2 = tkinter.StringVar() # N2\n self.content_E = tkinter.StringVar() # 公钥 E\n self.content_D = tkinter.StringVar() # 私钥 D\n\n self.font_button = (\"微软雅黑\", 10)\n self.font_entry = (\"微软雅黑\", 15)\n self.label_width = 6\n\n ''' 组件 '''\n self.input_frame_1()\n self.input_frame_2()\n self.input_frame_3()\n self.root.mainloop() # 显示窗体\n\n def input_frame_1(self): # 生成公私钥框\n input_frame = tkinter.Frame(self.root, width=50) # 创建容器\n label_P = tkinter.Label(input_frame, text=\"素数P: \", width=self.label_width)\n entry_P = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_P)\n label_Q = tkinter.Label(input_frame, text=\"素数Q: \", width=self.label_width)\n entry_Q = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_Q)\n button_random = tkinter.Button(input_frame, text=\"随机\", fg=\"black\", width=4, font=self.font_button)\n button_random.bind(\"<Button-1>\", lambda event: self.button_handle_random(event)) # 绑定按钮(随机素数)事件\n\n label_PK = tkinter.Label(input_frame, text=\"公钥:\", width=self.label_width)\n entry_pk = tkinter.Entry(input_frame, \n width=22, font=(\"微软雅黑\", 10), textvariable=self.content_PK)\n label_SK = tkinter.Label(input_frame, text=\"私钥:\", width=self.label_width)\n entry_SK = tkinter.Entry(input_frame, \n width=22, font=(\"微软雅黑\", 10), textvariable=self.content_SK)\n button_create = tkinter.Button(input_frame, text=\"生成\", fg=\"black\", width=4, font=self.font_button)\n button_create.bind(\"<Button-1>\", lambda event: self.button_handle_create(event)) # 绑定按钮(生成公私钥)事件\n\n label_P.grid(row=0, column=0)\n entry_P.grid(row=0, column=1)\n label_Q.grid(row=0, column=2)\n entry_Q.grid(row=0, column=3)\n button_random.grid(row=0, column=4)\n\n label_PK.grid(row=1, column=0)\n entry_pk.grid(row=1, column=1)\n label_SK.grid(row=1, column=2)\n entry_SK.grid(row=1, column=3)\n button_create.grid(row=1, column=4)\n\n input_frame.pack()\n\n def input_frame_2(self): # 加密解密框\n input_frame = tkinter.Frame(self.root, width=50) # 创建容器\n label_N1 = tkinter.Label(input_frame, text=\"N1: \", width=self.label_width)\n entry_N1 = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_N1)\n label_E = tkinter.Label(input_frame, text=\"E: \", width=self.label_width)\n entry_E = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_E)\n label_N2 = tkinter.Label(input_frame, text=\"N2: \", width=self.label_width)\n entry_N2 = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_N2)\n label_D = tkinter.Label(input_frame, text=\"D: \", width=self.label_width)\n entry_D = tkinter.Entry(input_frame, \n width=15, font=self.font_entry, textvariable=self.content_D)\n\n button_encryption = tkinter.Button(input_frame, text=\"加密\", fg=\"black\", width=4, font=self.font_button) # 加密\n button_decrypt = tkinter.Button(input_frame, text=\"解密\", fg=\"black\", width=4, font=self.font_button) # 解密\n button_encryption.bind(\"<Button-1>\", lambda event: self.button_handle_encryption(event)) # 绑定按钮(加密)事件\n button_decrypt.bind(\"<Button-1>\", lambda event: self.button_handle_decrypt(event)) # 绑定按钮(解密)事件\n\n label_N1.grid(row=0, column=0)\n entry_N1.grid(row=0, column=1)\n label_E.grid(row=0, column=2)\n entry_E.grid(row=0, column=3)\n button_encryption.grid(row=0, column=4)\n\n label_N2.grid(row=1, column=0)\n entry_N2.grid(row=1, column=1)\n label_D.grid(row=1, column=2)\n entry_D.grid(row=1, column=3)\n button_decrypt.grid(row=1, column=4)\n\n input_frame.pack()\n\n def input_frame_3(self): # 明文和密文框\n input_frame = tkinter.Frame(self.root, width=50) # 创建一个内部容器\n label_expressly = tkinter.Label(input_frame, text=\"明文: \", width=self.label_width)\n label_ciphertext = tkinter.Label(input_frame, text=\"密文: \", width=self.label_width)\n entry_expressly = tkinter.Entry(input_frame, \n width=38, font=self.font_entry, textvariable=self.content_expressly) # 明文\n entry_ciphertext = tkinter.Entry(input_frame, \n width=38, font=self.font_entry, textvariable=self.content_ciphertext) # 密文\n \n label_expressly.grid(row=0, column=0)\n entry_expressly.grid(row=0, column=1)\n label_ciphertext.grid(row=1, column=0)\n entry_ciphertext.grid(row=1, column=1)\n input_frame.pack()\n\n def button_handle_random(self, event): # 按钮(随机素数)事件\n a= Primes(2, 5000, 1000)\n p = random.choice(a)\n q = random.choice(a)\n self.content_P.set(\"%s\" % p)\n self.content_Q.set(\"%s\" % q)\n #print(\"素数p=%s, 素数q=%s\" % (p,q))\n\n def button_handle_create(self, event): # 按钮(生成公私钥)事件\n try:\n content_P = int(self.content_P.get())\n content_Q = int(self.content_Q.get())\n except ValueError:\n tkinter.messagebox.showinfo(title=\"消息提示(只能素数)\", message=\"请输入正确的素数 P 和 Q \")\n else:\n n = content_P * content_Q\n s = (content_P-1) * (content_Q-1)\n e = rsa_test.co_prime(s)\n #print(\"根据e和(p-1)*(q-1))互质得到: e=%s s=%s\" % (e,s))\n d = rsa_test.find_d(e,s)\n #print(\"根据(e*d) 模 ((p-1)*(q-1)) 等于 1 得到 d=\", d)\n\n self.content_PK.set(\"(N1=%s E=%s)\" % (n,e))\n self.content_SK.set(\"(N2=%s D=%s)\" % (n,d))\n self.content_N1.set(\"%s\" % n)\n self.content_N2.set(\"%s\" % n)\n self.content_E.set(\"%s\" % e)\n self.content_D.set(\"%s\" % d)\n\n def button_handle_encryption(self, event): # 按钮(加密)事件\n try:\n content_expressly = int(self.content_expressly.get())\n content_N1= int(self.content_N1.get())\n content_E = int(self.content_E.get())\n except ValueError:\n tkinter.messagebox.showinfo(title=\"消息提示(只能整数)\", message=\"请输入正确的 N1、E、明文\")\n else:\n B = pow(content_expressly,content_E) % content_N1 # 加密\n self.content_ciphertext.set(\"%s\" % B) \n \n def button_handle_decrypt(self, event): # 按钮(解密)事件\n try:\n content_ciphertext = int(self.content_ciphertext.get())\n content_N2= int(self.content_N2.get())\n content_D = int(self.content_D.get())\n except ValueError:\n tkinter.messagebox.showinfo(title=\"消息提示(只能整数)\", message=\"请输入正确的 N2、D、密文\")\n else:\n C = pow(content_ciphertext,content_D) % content_N2 # 解密\n self.content_expressly.set(\"%s\" % C)\n\n def close_handle(self):\n if tkinter.messagebox.askyesnocancel(\"程序关闭确认!\", \"是否确认关闭程序?\"):\n self.root.destroy() # 关闭程序\n\ndef main(): # 主函数 \n MainForm() # 实例化窗体类\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5285714268684387, "alphanum_fraction": 0.5532467365264893, "avg_line_length": 17.35714340209961, "blob_id": "8b3f9ad50677be3d1fa89cd36c10bbe7466f48a7", "content_id": "2875efbfa46b52f41df49a981731cb9558ce81ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 41, "num_lines": 42, "path": "/3.RSA_work/primes.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "'''\n埃氏筛法\n(1)先把1删除(现今数学界1既不是质数也不是合数)\n(2)读取队列中当前最小的数2,然后把2的倍数删去\n(3)读取队列中当前最小的数3,然后把3的倍数删去\n(4)读取队列中当前最小的数5,然后把5的倍数删去\n(5)如上所述直到需求的范围内所有的数均删除或读取\n'''\n\n# 生成一个奇数生成器。\ndef odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n# 过滤掉n的倍数的数。\ndef not_divisible(n):\n return lambda x: x % n > 0\n\n# 获取当前序列的第一个元素,然后删除后面序列该元素倍数的数,然后构造新序列。\ndef count():\n yield 2\n it = odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(not_divisible(n), it)\n\n# 获取 start 到 stop 之间的 num 个素数\ndef Primes(start , stop, num):\n primes = []\n k = 0\n for n in count():\n if n > start and n < stop:\n primes.append(n)\n k += 1\n elif n > stop:\n break\n if k==num:\n break\n return primes" }, { "alpha_fraction": 0.5874528288841248, "alphanum_fraction": 0.6075044870376587, "avg_line_length": 48.382354736328125, "blob_id": "e6847f32da8ff57452cecc37b4fadae5fd56f641", "content_id": "8af312705659d5bf673ca77c84e78993a7d62f4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5727, "license_type": "no_license", "max_line_length": 119, "num_lines": 102, "path": "/3.RSA_work/CalculatorText.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from logging import root\nimport tkinter # 窗体相关\nimport tkinter.messagebox # 实现提示框的组件\nimport tkinter.simpledialog # 对话框\nimport os # 路径\nimport sys # 操作系统交互\nimport re\nfrom typing import Pattern # 正则\n\nimport rsa_test\n\ndef get_resource_path(relative_path): # 利用此函数实现资源路径的定位\n if getattr(sys, \"frozen\", False):\n base_path = sys._MEIPASS # 获取临时资源\n else:\n base_path = os.path.abspath(\".\") # 获取当前路径\n return os.path.join(base_path, relative_path) # 绝对路径\n\nLOGO_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"RSA.ico\")) # 图标文件路径\nIMAGES_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"Rsa_Icon.png\")) # 图片路径\nEMAIL_PATTERN = r\"[a-zA-Z0-9]\\w+@\\w+\\.(cn|com|com.cn|gov|net)\" # 正则表达式\n#LOGO_PATH = get_resource_path(os.path.join(\"resources\", \"RSA.ico\")) # 图标文件路径\n#IMAGES_PATH = get_resource_path(os.path.join(\"resources\", \"Rsa_Icon.png\")) # 图片路径\n\nimport tkinter # 导入相关的窗体模块\nclass MainForm: # 定义窗体类\n def __init__(self):\n self.root = tkinter.Tk() # 创建一个窗体\n self.root.title(\"CalculatorText\") # 设置标题\n self.root.iconbitmap() # 设置logo的资源\n self.root.geometry(\"231x280\") # 设置初始化尺寸\n self.root[\"background\"] = \"LightSlateGray\" # 背景色\n #self.photo = tkinter.PhotoImage(file=IMAGES_PATH) # 定义图片组件\n #self.root.protocol(\"WM_DELETE_WINDOW\", self.close_handle) # 窗体关闭事件\n\n ''' 组件 '''\n self.button_frame()\n self.input_frame()\n self.root.mainloop() # 显示窗体\n\n def input_frame(self): # 定义输入组\n self.input_frame = tkinter.Frame(self.root, width=20) # 创建一个内部容器\n self.content = tkinter.StringVar() # 修改标签文字\n self.entry = tkinter.Entry(self.input_frame, \n width=14, font=(\"微软雅黑\", 20), textvariable=self.content) # 用entry 控制单行输入\n self.entry.pack(fill=\"x\", expand=1) # x轴全填充\n self.clean = False # 清除标记,每一次计算完成之后清除\n self.input_frame.pack(side=\"top\")\n\n def button_frame(self): # 定义按钮组\n self.button_frame = tkinter.Frame(self.root, width=50) # 创建容器\n self.button_list = [[], [], [], []] # 一共定义了4组组件\n self.button_list[0].append(tkinter.Button(self.button_frame, text=\"1\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[0].append(tkinter.Button(self.button_frame, text=\"2\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[0].append(tkinter.Button(self.button_frame, text=\"3\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[0].append(tkinter.Button(self.button_frame, text=\"+\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n \n self.button_list[1].append(tkinter.Button(self.button_frame, text=\"4\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[1].append(tkinter.Button(self.button_frame, text=\"5\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[1].append(tkinter.Button(self.button_frame, text=\"6\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[1].append(tkinter.Button(self.button_frame, text=\"-\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n \n self.button_list[2].append(tkinter.Button(self.button_frame, text=\"7\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[2].append(tkinter.Button(self.button_frame, text=\"8\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[2].append(tkinter.Button(self.button_frame, text=\"9\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[2].append(tkinter.Button(self.button_frame, text=\"*\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n\n self.button_list[3].append(tkinter.Button(self.button_frame, text=\".\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[3].append(tkinter.Button(self.button_frame, text=\"0\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[3].append(tkinter.Button(self.button_frame, text=\"=\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n self.button_list[3].append(tkinter.Button(self.button_frame, text=\"/\", fg=\"black\", width=3, font=(\"微软雅黑\", 20)))\n \n self.row = 0 # 进行行数的控制\n for group in self.button_list:\n self.column = 0 # 进行列的控制\n for button in group:\n button.bind(\"<Button-1>\", lambda event: self.button_handle(event)) # 绑定事件\n button.grid(row=self.row, column=self.column)\n self.column += 1 # 每次列+1\n self.row += 1\n \n self.button_frame.pack(side=\"bottom\")\n\n def button_handle(self, event):\n oper = event.widget[\"text\"] # 获取组件中的文本\n if self.clean: # 第二次计算\n self.content.set(\"\") # 清除标记中的数据\n self.clean = False # 留给下一次计算输入\n if oper != \"=\": # 意味着计算\n self.entry.insert(\"end\", oper)\n elif oper == \"=\": # 执行运算\n result = 0 # 保存程序的计算结果\n exp = self.entry.get()\n result = eval(exp)\n self.entry.insert(\"end\", \"=%s\" % result) \n self.clean = True\n\ndef main(): # 主函数 \n MainForm() # 实例化窗体类\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.49096208810806274, "alphanum_fraction": 0.559183657169342, "avg_line_length": 34, "blob_id": "59d7fd92afbcea116c691d2dcfef095dc2b9dad0", "content_id": "255e81daf5c242b1030a945bda8fc44887078ffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 194, "num_lines": 49, "path": "/1.spider_maoyan/spider_board.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from urllib import request,parse\nimport random\nimport time\n\nclass MaoYanSpider(object):\n ''' 爬取网页 '''\n def __init__(self, root):\n self.url=\"https://maoyan.com/board/4?offset={}\"\n self.ua_list = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)'\n ]\n #用于记录页数\n self.page = 1\n self.root = root\n\n '''发送请求函数'''\n def get_page(self,url):\n #定义一个headers\n headers={\n 'User-Agent':random.choice(self.ua_list),\n }\n #构造请求头\n req=request.Request(url=url,headers=headers)\n res=request.urlopen(req)\n html=res.read().decode(\"utf-8\")\n # 将获取的html写到本地\n self.parse_page(html)\n\n '''定义一个函数写到本地'''\n def parse_page(self,html):\n filename = self.root + '/第{}页.html'.format(self.page)\n print(filename)\n with open(filename,'w+',encoding='utf-8') as f:\n f.write(html)\n\n '''主函数'''\n def main(self):\n '''拼接url地址'''\n for offset in range(0,60,10):\n url=self.url.format(offset)\n print(url)\n #发送请求 获取响应\n self.get_page(url)\n time.sleep(random.randint(5,8))\n print(\"第%d页爬取完成\"%self.page)\n self.page+=1\n" }, { "alpha_fraction": 0.5963488817214966, "alphanum_fraction": 0.5983772873878479, "avg_line_length": 21.454545974731445, "blob_id": "da19e47dcb432ee7d78c28d8e2fac4485f70e7de", "content_id": "7420e5a49153bffb664e80d2feba0010149ccfa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 41, "num_lines": 22, "path": "/2.JSON_analysis/main.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom analysis import JXSpider #解析\nfrom spider_board import TXSpider # 爬取网页\n\ndef judge_os():\n root = os.getcwd()\n root = root + '/招聘信息'\n root = root.replace(os.sep, '/')\n if(not os.path.exists(root)):\n os.mkdir(root) \n return root\n\nif __name__ == '__main__':\n root = judge_os()\n start=time.time()\n spider=TXSpider(root)\n spider.main()\n spider=JXSpider(root)\n spider.main()\n end=time.time()\n print('执行时间%.2f'%(end-start))" }, { "alpha_fraction": 0.4467253088951111, "alphanum_fraction": 0.45698925852775574, "avg_line_length": 36.21818161010742, "blob_id": "8e806fe5b3ea78c3fff3c581210d482969823755", "content_id": "1bfe2ea47882b00484c00b584b6063f437ddcfd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2174, "license_type": "no_license", "max_line_length": 110, "num_lines": 55, "path": "/1.spider_maoyan/analysis.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from urllib import request\nimport requests\nimport re\nimport csv\nimport os\n\nclass JieXiBangDanSpider(object):\n ''' 解析榜单 '''\n def __init__(self, root):\n self.root = root\n\n def parse_page(self, html):\n ''' 正则匹配 '''\n pattern = re.compile('<dd>.*?<a.*?class=\"image-link\".*?<img.*?data-src=\"(.*?)\".*?</a>.*?' # 图片\n '<div class=\"movie-item-info\">.*?class=\"name\">.*?' # 电影名称\n '<a.*?title=\"(.*?)\".*?class=\"star\">(.*?)</p>.*?' #主演\n 'class=\"releasetime\">(.*?)</p>.*?' # 上映时间\n 'class=\"score\">.*?<i.*?>(.*?)</i>.*?<i.*?>(.*?)</i></p>' # 评分\n , re.S) \n r_list = pattern.findall(html)\n self.write_page(r_list)\n\n def write_page(self, r_list):\n ''' 写入csv '''\n filename = self.root + '/maoyan.csv'\n film_list = []\n with open(filename, 'a', encoding='utf-8') as f:\n writer = csv.writer(f)\n for rt in r_list:\n one_film = [\n rt[1].strip(), rt[2].strip()[5:15],rt[3].strip()[5:],rt[4].strip()+rt[5].strip(),rt[0]\n ]\n film_list.append(one_film)\n self.download_image(rt[0], rt[1]) # 下载图片\n writer.writerows(film_list)\n \n def save_image(self, content, name):\n ''' 下载图片 '''\n file_path = '{0}/{1}.{2}'.format(self.root, name, 'jpg')#注意斜杠是/\n if not os.path.exists(file_path):#os.path.exists(file_path)判断文件是否存在,存在返回1,不存在返回0\n with open(file_path, 'wb') as f:\n f.write(content)\n f.close()\n\n def download_image(self, url, name):#保存图片链接\n r = requests.get(url)\n r.raise_for_status()\n self.save_image(r.content, name)\n\n def main(self):\n for i in range(1,6):\n filename = self.root + '/第{}页.html'.format(i)\n f=open(filename,'r',encoding='utf-8')\n html=f.read()\n self.parse_page(html)" }, { "alpha_fraction": 0.5727580189704895, "alphanum_fraction": 0.5922166109085083, "avg_line_length": 32.79999923706055, "blob_id": "5ca47b024bb52afcfba767b2148fd1e4bc009983", "content_id": "1d7427e815a9a6e7ac092575ac615d6950e18b48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 168, "num_lines": 35, "path": "/2.JSON_analysis/spider_board.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from fake_useragent import UserAgent\nimport requests\nimport time\nimport random\nclass TXSpider(object):\n '''爬取和写入到本地json'''\n def __init__(self,root):\n self.url='https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1619657326691&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=' \\\n '&attrId=&keyword=&pageIndex={}&pageSize=10&language=zh-cn&area=cn'\n self.headers={\n 'user-agent':UserAgent().random\n }\n self.page=1\n self.root = root\n\n def get_page(self,url):\n res=requests.get(url=url,headers=self.headers)\n html=res.content.decode('utf-8')\n return html\n\n def write_page(self,html):\n '''将json写到本地'''\n filename = self.root + '/第{}页.json'.format(self.page)\n print(filename)\n with open(filename,'w+',encoding='utf-8') as f:\n f.write(html)\n\n def main(self):\n for page in range(1,6):\n url=self.url.format(page)\n html=self.get_page(url)\n self.write_page(html)\n time.sleep(random.randint(3,5))\n print(\"第{}页爬取成功\".format(self.page))\n self.page+=1" }, { "alpha_fraction": 0.6177605986595154, "alphanum_fraction": 0.6196911334991455, "avg_line_length": 22.545454025268555, "blob_id": "7c84141679b1ce7d9b9ce8befdcdce8fe5d073b0", "content_id": "9f4e84dfccbf7f83a15af0b8a35d955d56fd0fdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/1.spider_maoyan/main.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "import os\nimport time\nfrom analysis import JieXiBangDanSpider\nfrom spider_board import MaoYanSpider # 爬取榜单网页\n\ndef judge_os():\n root = os.getcwd()\n root = root + '/榜单'\n root = root.replace(os.sep, '/')\n if(not os.path.exists(root)):\n os.mkdir(root) \n return root\n\nif __name__ == '__main__':\n root = judge_os()\n start=time.time()\n #spider=MaoYanSpider(root)\n #spider.main()\n spider=JieXiBangDanSpider(root)\n spider.main()\n end=time.time()\n print('执行时间%.2f'%(end-start))\n" }, { "alpha_fraction": 0.5414551496505737, "alphanum_fraction": 0.5448392629623413, "avg_line_length": 32.79999923706055, "blob_id": "d3567ce844c22a0dbcf2250ea1599043b95cc33e", "content_id": "306da3ca37c259090babd41b5b0e9983570be00b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1226, "license_type": "no_license", "max_line_length": 104, "num_lines": 35, "path": "/2.JSON_analysis/analysis.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "import json\nimport csv\n\nclass JXSpider(object):\n '''将本地json转换为CSV'''\n def __init__(self, root):\n self.root = root\n\n def parse_page(self,json_str):\n '''将json字符串转成python字符串'''\n python_dict=json.loads(json_str)\n python_list=python_dict['Data']['Posts']\n for car in python_list:\n career_dict={}\n career_dict['RecruitPostName']=car['RecruitPostName']\n career_dict['LocationName']=car['LocationName']\n career_dict['Responsibility']= car['Responsibility'].replace('\\n','').replace('\\r','') # 格式化\n self.write_page(career_dict)\n\n def write_page(self, career_dict):\n ''' 写入csv '''\n filename = self.root + '/TX.csv'\n film_list = []\n with open(filename, 'a', encoding='utf-8') as f: \n writer = csv.writer(f)\n for K,V in career_dict.items():\n film_list.append(V)\n writer.writerow(film_list)\n\n def main(self):\n for i in range(1,6):\n filename = self.root + '/第{}页.json'.format(i)\n f=open(filename,'r',encoding='utf-8')\n json_str=f.read()\n self.parse_page(json_str)" }, { "alpha_fraction": 0.5891246795654297, "alphanum_fraction": 0.6010609865188599, "avg_line_length": 35.25, "blob_id": "8627baa98307d8bfb8a22ed0e028c0b373f01a71", "content_id": "40ba2b1cc945036da9e973561946eb9d8a77a9dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4414, "license_type": "no_license", "max_line_length": 106, "num_lines": 104, "path": "/3.RSA_work/main_test.py", "repo_name": "LJJ-CDD/Python_work", "src_encoding": "UTF-8", "text": "from logging import root\nimport tkinter # 窗体相关\nimport tkinter.messagebox # 实现提示框的组件\nimport tkinter.simpledialog # 对话框\nimport os # 路径\nimport sys # 操作系统交互\nimport re # 正则\n\nimport rsa_test\n\ndef get_resource_path(relative_path): # 利用此函数实现资源路径的定位\n if getattr(sys, \"frozen\", False):\n base_path = sys._MEIPASS # 获取临时资源\n else:\n base_path = os.path.abspath(\".\") # 获取当前路径\n return os.path.join(base_path, relative_path) # 绝对路径\n\nLOGO_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"RSA.ico\")) # 图标文件路径\nIMAGES_PATH = get_resource_path(os.path.join(\"Python_work\\\\3.RSA_work\\\\resources\", \"Rsa_Icon.png\")) # 图片路径\nEMAIL_PATTERN = r\"[a-zA-Z0-9]\\w+@\\w+\\.(cn|com|com.cn|gov|net)\" # 正则表达式\n#LOGO_PATH = get_resource_path(os.path.join(\"resources\", \"RSA.ico\")) # 图标文件路径\n#IMAGES_PATH = get_resource_path(os.path.join(\"resources\", \"Rsa_Icon.png\")) # 图片路径\n\nimport tkinter # 导入相关的窗体模块\nclass MainForm: # 定义窗体类\n def __init__(self):\n self.root = tkinter.Tk() # 创建一个窗体\n self.root.title(\"Liu_RsaText\") # 设置标题\n self.root.iconbitmap() # 设置logo的资源\n self.root.geometry(\"500x300\") # 设置初始化尺寸\n self.root.maxsize(1000, 400) # 设置窗体的最大尺寸\n self.root[\"background\"] = \"LightSlateGray\" # 背景色\n\n self.photo = tkinter.PhotoImage(file=IMAGES_PATH) # 定义图片组件\n self.content = tkinter.StringVar() # 修改标签文字\n self.root.protocol(\"WM_DELETE_WINDOW\", self.close_handle) # 窗体关闭事件\n\n ''' 组件 '''\n #self.label_photo()\n #self.TK_text()\n #self.label_text()\n self.TK_button()\n\n self.root.mainloop() # 显示窗体\n\n def label_text(self):\n # 进行文本标签定义\n Lambda_text = tkinter.Label(self.root, textvariable=self.content, width=200, height=200, \n bg=\"#223011\", fg=\"#ffffff\",font=(\"微软雅黑\", 10), justify=\"right\")\n Lambda_text.pack() # 组件显示\n\n def label_photo(self):\n # 进行文本图片标签定义\n Label_photo = tkinter.Label(self.root, image=self.photo) # 图片标签\n Label_photo.pack() # 标签显示 \n\n def TK_text(self):\n # 定义文本组件窗口\n text = tkinter.Text(self.root, width=50, height=10, font=(\"微软雅黑\", 10))\n #text.image_create(\"end\", image=self.photo)\n text.insert(\"current\", \"请输入正确的Email信息....\") # 默认提示信息\n text.bind(\"<Button-1>\", lambda event :\n self.event_handle_text(event, text, \" \"))\n text.bind(\"<KeyPress>\",lambda even: \n self.event_handle_keyboard(even, text))\n text.bind(\"<KeyRelease>\",lambda even: \n self.event_handle_keyboard(even, text))\n text.pack() # 显示文本组件\n\n def TK_button(self):\n # 定义按钮组件\n button = tkinter.Button(self.root, text=\"Liu\" , image=self.photo,\n compound=\"bottom\", fg=\"black\", font=(\"微软雅黑\", 10)) # 图片文本混合按钮\n button.bind(\"<Button-1>\", lambda event :\n self.event_handle(event, \"Hello World\")) # 事件\n button.pack()\n\n def event_handle(self, event, info):\n # 消息框事件\n input_message = tkinter.simpledialog.askstring(\"提示信息\", \"请输入要显示的信息: \")\n self.label_text()\n tkinter.messagebox.showinfo(title=\"Liu消息提示\", message=input_message)\n \n def event_handle_text(self, event, text, info):\n # 文本删除事件\n text.delete(\"0.0\", \"end\")\n\n def event_handle_keyboard(self, event, text):\n # 获得文本框信息输入事件\n email = text.get(\"0.0\", \"end\")\n if re.match(EMAIL_PATTERN, email, re.I | re.X):\n self.content.set(\"Email邮箱输入正确,内容为: %s\" % email)\n else:\n self.content.set(\"Email数据输入错误!\")\n\n def close_handle(self):\n if tkinter.messagebox.askyesnocancel(\"程序关闭确认!\", \"是否确认关闭程序?\"):\n self.root.destroy() # 关闭程序\n \ndef main(): # 主函数 \n MainForm() # 实例化窗体类\n\nif __name__ == \"__main__\":\n main()\n" } ]
10
rikenshah/Well-thy
https://github.com/rikenshah/Well-thy
80536111c41c69eff4730db00782912bcee930c8
20462a880313f839147795bb414da4927424e126
d8a2a85398ece4ed192deee4616fc9f500df59c5
refs/heads/master
2022-12-10T09:13:26.372684
2019-10-21T20:28:27
2019-10-21T20:28:47
125,094,461
0
0
MIT
2018-03-13T18:08:29
2019-10-21T20:28:49
2022-12-07T23:50:28
Python
[ { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16, "blob_id": "1f0d7178c1302cd49ad3f7e2a3af3e1eecc42249", "content_id": "9db719110b79436214e1918571b0f2833261b807", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/pyScripts/analysis.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ndatapath = \"../datasets/merged.csv\"\n\ndf = pd.read_csv(datapath)\n" }, { "alpha_fraction": 0.6770293712615967, "alphanum_fraction": 0.7081174254417419, "avg_line_length": 30.581817626953125, "blob_id": "5fb9bc817afb3dfd1081945de96c61e17e31689f", "content_id": "30efff522f35abaaad20dcab3b84b5944f892ff4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "permissive", "max_line_length": 131, "num_lines": 55, "path": "/pyScripts/gov.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# This is a magic script that transforms two datasets into one smartly by comparison of resonse parameters\n\nimport pandas as pd\n\ndatapath = \"../datasets/healthcaregov/data.csv\"\ndatapath2 = \"../datasets/prudentialLifeInsurance/train.csv\"\nsavepath1 = \"../datasets/merged1.csv\"\n\ndf = pd.read_csv(datapath)\n\ndf2 = df[df[\"Individual Tobacco Rate\"].notnull()]\n\nindividual_rate = df2[\"Individual Rate\"]\n# normalized_individual_rate = ((individual_rate-individual_rate.mean())/individual_rate.std())*4+4\n# normalized_individual_rate = (individual_rate-individual_rate.min())/(individual_rate.max()-individual_rate.min())\n\nindividual_tobacco_rate = df2[\"Individual Tobacco Rate\"]\n# normalized_individual_tobacco_rate = ((individual_tobacco_rate-individual_tobacco_rate.mean())/individual_tobacco_rate.std())*4+4\n\n# here multiplying by 8 does not give a good range\nrate_diff = (individual_tobacco_rate-individual_rate)*16/individual_rate\ndf2[\"rate_diff\"] = pd.Series(rate_diff)\n\nresponse = 1\n\nmapping_dict = {}\n\ndef init_map(df2,i):\n\tif i == 0:\n\t\tmapping_dict[0] = df2.loc[(df2.rate_diff < 0.5)].iterrows()\n\telif i == 1:\n\t\tmapping_dict[1] = df2.loc[(df2.rate_diff < 1) & (df2.rate_diff >0.5)].iterrows()\n\telif i in range(2,8):\n\t\tmapping_dict[i] = df2.loc[(df2.rate_diff < i) & (df2.rate_diff >(i-1))].iterrows()\n\telif i == 8:\n\t\tmapping_dict[8] = df2.loc[(df2.rate_diff >7)].iterrows()\n\telse:\n\t\treturn\n\nfor i in range(9):\n\tinit_map(df2,i)\n\n## Loading second dataset\ndf3 = pd.read_csv(datapath2)\ndf3 = df3[df3.Response.notnull()]\n\nfor i, row in df3.iterrows():\n\ttry:\n\t\tnew_tuple = next(mapping_dict[row.Response])\n\texcept:\n\t\tinit_map(df2,row.Response)\n\tfor key,value in new_tuple[1].iteritems():\n\t\tdf3.set_value(i,key,value)\n\ndf3.to_csv(savepath1)\n" }, { "alpha_fraction": 0.4479283392429352, "alphanum_fraction": 0.4616461396217346, "avg_line_length": 27.12598419189453, "blob_id": "717887825ed0b266b29b04471b581d799fc52da5", "content_id": "2b374e42f0f6adc1a0dacc3b3d42a1db74bced54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3572, "license_type": "permissive", "max_line_length": 86, "num_lines": 127, "path": "/templates/health/profile.html", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "\n{% extends 'base.html' %}\n\n{% block content %}\n\t<ul>\n\t{% if health_profile %}\n\t<!-- <a class=\"btn btn-success\" href=\"{% url 'health:handle' %}\">Edit Profile</a> -->\n\t<h3>Following is your health profile </h3>\n\t<br>\n\t\t<div class=\"row\">\n\t\t\t<div class=\"col-md-6\">\n\t\t\t\t<table class=\"table table-striped table-bordered\">\n\t\t\t\t <thead>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"col\">Attribute</th>\n\t\t\t\t <th scope=\"col\">Value</th>\n\t\t\t\t </tr>\n\t\t\t\t </thead>\n\t\t\t\t <tbody>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Age</th>\n\t\t\t\t <td>{{health_profile.age}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Height</th>\n\t\t\t\t <td>{{health_profile.height}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Weight</th>\n\t\t\t\t <td>{{health_profile.weight}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Ailments</th>\n\t\t\t\t <td>{{health_profile.ailments}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Health Care Costs (per year)</th>\n\t\t\t\t <td>{{health_profile.healthcare_costs}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Tobacco</th>\n\t\t\t\t <td>{{health_profile.tobacco}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Smoke</th>\n\t\t\t\t <td>{{health_profile.smoke}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Drink</th>\n\t\t\t\t <td>{{health_profile.drink}}</td>\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Exercise<br></th>\n\t\t\t\t {% if health_profile.exercise == 2 %}\n\t\t\t\t \t<td>>15 hours/week</td>\n\t\t\t\t {% elif health_profile.exercise == 1 %}\n\t\t\t\t \t<td>6-15 hours/week</td>\n\t\t\t\t {% elif health_profile.exercise == 0 %}\n\t\t\t\t \t<td><6 hours/week</td>\n\t\t\t\t\t {% endif %}\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Travel</th>\n\t\t\t\t {% if health_profile.travel_time == 2 %}\n\t\t\t\t \t<td>>10 hours/week</td>\n\t\t\t\t {% elif health_profile.travel_time == 1 %}\n\t\t\t\t \t<td>5-10 hours/week</td>\n\t\t\t\t {% elif health_profile.travel_time == 0 %}\n\t\t\t\t \t<td><5 hours/week</td>\n\t\t\t\t\t {% endif %}\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Sleep</th>\n\t\t\t\t {% if health_profile.sleep_time == 2 %}\n\t\t\t\t \t<td>>8 hours/day</td>\n\t\t\t\t {% elif health_profile.sleep_time == 1 %}\n\t\t\t\t \t<td>6-8 hours/day</td>\n\t\t\t\t {% elif health_profile.sleep_time == 0 %}\n\t\t\t\t \t<td><6 hours/day</td>\n\t\t\t\t\t {% endif %}\n\t\t\t\t </tr>\n\t\t\t\t <tr>\n\t\t\t\t <th scope=\"row\">Job Type</th>\n\t\t\t\t <td>{{health_profile.job_type}}</td>\n\t\t\t\t </tr>\n\t\t\t\t </tbody>\n\t\t\t\t</table>\n\t\t\t</div>\n\t\t\t<div class=\"col-md-6\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t\t<div class=\"col-md-6\">\n\t\t\t\t\t\t<div class=\"panel panel-success\">\n\t\t\t\t\t\t\t<div class=\"panel-heading\">\n\t\t\t\t\t\t\t\t<h3>Health Score <br><small>(out of 1000)</small></h3> \n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"panel-body\">\n\t\t\t\t\t\t\t\t<h2>{{health_score}}</h2>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div class=\"col-md-6\">\n\t\t\t\t\t\t<div class=\"panel panel-success\">\n\t\t\t\t\t\t\t<div class=\"panel-heading\">\n\t\t\t\t\t\t\t\t<h3>Possible Savings <br><small>(per year)</small></h3> \n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div class=\"panel-body\">\n\t\t\t\t\t\t\t\t<h2>${{savings}}</h2>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t\t<div class=\"col-md-12\">\n\t\t\t\t\t<center><h3>Recommendations</h3></center>\n\t\t\t\t\t{% for r in recommendations%}\n\t\t\t\t\t<div class=\"jumbotron\">\n\t\t\t\t\t\t<p>{{r}}</p>\n\t\t\t\t\t</div>\n\t\t\t\t\t{% endfor %}\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>\n\t{% else %}\n\t\t<h4>Please create your profile</h4>\n\t\t<a class=\"btn btn-success\" href=\"{% url 'health:handle' %}\">Create Profile</a>\n\t{% endif %}\n\t</ul>\n\n{% endblock %}" }, { "alpha_fraction": 0.6979513764381409, "alphanum_fraction": 0.7246307730674744, "avg_line_length": 52.846153259277344, "blob_id": "b81fb26dd58a784a475cbe34a123369dc01e1d37", "content_id": "fd018b670b2b0745d038d1a71dea8ded1e262d78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2099, "license_type": "permissive", "max_line_length": 176, "num_lines": 39, "path": "/health/models.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.contrib.auth.models import User\n\nclass HealthProfile(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE)\n\tage = models.IntegerField(validators=[MaxValueValidator(100), MinValueValidator(0)],null=True,blank=True,help_text=\"Enter age :\")\n\theight = models.FloatField(validators=[MaxValueValidator(300), MinValueValidator(20)],null=True,blank=True,help_text=\"Enter height (In Inches) :\")\n\tweight = models.FloatField(validators=[MaxValueValidator(300), MinValueValidator(20)],null=True,blank=True,help_text=\"Enter weight (In Lbs) :\")\n\tailments = models.TextField(max_length=1000, null=True, blank=True, help_text='Enter comma separated list of pre-existing ailments :')\n\ttobacco = models.BooleanField(help_text=\"Do you consume tobacco?\", default=False)\n\tsmoke = models.BooleanField(help_text=\"Do you consume smoke?\", default=False)\n\tdrink = models.BooleanField(help_text=\"Do you consume drink?\", default=False)\n\thealthcare_costs = models.FloatField(validators=[MaxValueValidator(50000), MinValueValidator(0)],null=True,blank=True,help_text=\"Enter your total healthcare costs (per year)\")\n\tPOSS_EXERCISE = (\n\t (2, '>15 hours/week'),\n\t (1, '6-15 hours/week'),\n\t (0, '<6 hours/week'),\n\t)\n\texercise = models.IntegerField(choices=POSS_EXERCISE, default=1, help_text='Select how much do you exercise?')\n\tPOSS_TRAVEL = (\n\t\t(2, '>10 hours/week'),\n\t\t(1, '5-10 hours/week'),\n\t\t(0, '<5 hours/week'),\n\t)\n\ttravel_time = models.IntegerField(choices=POSS_TRAVEL, default=1, help_text='Select how much do you travel?')\n\tPOSS_SLEEP = (\n\t\t(2, '>8 hours/day'),\n\t\t(1, '6-8 hours/day'),\n\t\t(0, '<6 hours/day'),\n\t)\n\tsleep_time = models.IntegerField(choices=POSS_SLEEP, default=POSS_SLEEP[1], help_text='Select how much do you sleep?')\n\tjob_type = models.TextField(max_length=1000, null=True, blank=True, help_text='Enter your job description :')\n\n\tdef __str__(self):\n\t \"\"\"\n\t String for representing the Model object (in Admin site etc.)\n\t \"\"\"\n\t return self.user.first_name" }, { "alpha_fraction": 0.5141242742538452, "alphanum_fraction": 0.5913370847702026, "avg_line_length": 28.5, "blob_id": "ce6514a87d0459a235412d3878d0cde363ae443e", "content_id": "b709e62b07e8d6697f70c6678692ecd1013e74f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "permissive", "max_line_length": 185, "num_lines": 18, "path": "/health/migrations/0005_auto_20180426_1550.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-26 15:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('health', '0004_auto_20180426_1547'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='healthprofile',\n name='sleep_time',\n field=models.IntegerField(choices=[(2, '>8 hours/day'), (1, '6-8 hours/day'), (0, '<6 hours/day')], default=(1, '6-8 hours/day'), help_text='Select how much do you sleep?'),\n ),\n ]\n" }, { "alpha_fraction": 0.624950647354126, "alphanum_fraction": 0.6510066986083984, "avg_line_length": 69.36111450195312, "blob_id": "b1cc2c7dd34f305a06c397c845d24e842214e898", "content_id": "a02a74af8b92a83d86064a97d43d57f454c2dd8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2533, "license_type": "permissive", "max_line_length": 219, "num_lines": 36, "path": "/health/migrations/0001_initial.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-10 16:31\n\nfrom django.conf import settings\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HealthProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('age', models.IntegerField(blank=True, help_text='Enter age :', null=True, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)])),\n ('height', models.FloatField(blank=True, help_text='Enter height (In Centimeter) :', null=True, validators=[django.core.validators.MaxValueValidator(300), django.core.validators.MinValueValidator(20)])),\n ('weight', models.FloatField(blank=True, help_text='Enter weight (In Lbs) :', null=True, validators=[django.core.validators.MaxValueValidator(300), django.core.validators.MinValueValidator(20)])),\n ('ailments', models.TextField(blank=True, help_text='Enter comma separated list of pre-existing ailments :', max_length=1000, null=True)),\n ('tobacco', models.BooleanField(default=False, help_text='Do you consume tobacco?')),\n ('smoke', models.BooleanField(default=False, help_text='Do you consume smoke?')),\n ('drink', models.BooleanField(default=False, help_text='Do you consume drink?')),\n ('exercise', models.IntegerField(blank=True, choices=[(2, '>15 hours/week'), (1, '6-15 hours/week'), (0, '<6 hours/week')], default=1, help_text='Select how much do you exercise?')),\n ('travel_time', models.IntegerField(blank=True, choices=[(2, '>10 hours/week'), (1, '5-10 hours/week'), (0, '<5 hours/week')], default=1, help_text='Select how much do you travel?')),\n ('sleep_time', models.IntegerField(blank=True, choices=[(2, '>8 hours/day'), (1, '6-8 hours/day'), (0, '<6 hours/day')], default=1, help_text='Select how much do you sleep?')),\n ('job_type', models.TextField(blank=True, help_text='Enter your job description :', max_length=1000, null=True)),\n ('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5242236256599426, "alphanum_fraction": 0.5875776410102844, "avg_line_length": 34, "blob_id": "60fedcdd3a8887350b136fa1f43ec33e18781b1e", "content_id": "60a36f78b987a1e46de8b24ff30845601e6d1ec9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "permissive", "max_line_length": 174, "num_lines": 23, "path": "/health/migrations/0006_auto_20180426_1551.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-26 15:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('health', '0005_auto_20180426_1550'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='healthprofile',\n name='exercise',\n field=models.IntegerField(choices=[(2, '>15 hours/week'), (1, '6-15 hours/week'), (0, '<6 hours/week')], default=1, help_text='Select how much do you exercise?'),\n ),\n migrations.AlterField(\n model_name='healthprofile',\n name='travel_time',\n field=models.IntegerField(choices=[(2, '>10 hours/week'), (1, '5-10 hours/week'), (0, '<5 hours/week')], default=1, help_text='Select how much do you travel?'),\n ),\n ]\n" }, { "alpha_fraction": 0.6122112274169922, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 30.894737243652344, "blob_id": "bef3ea9a48b34a9b1c8909cc46c30e9bb53f8943", "content_id": "5be1384a0ec9e531b8e1a9320987c38a7212b7b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "permissive", "max_line_length": 224, "num_lines": 19, "path": "/health/migrations/0003_auto_20180426_1527.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-26 15:27\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('health', '0002_auto_20180414_0114'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='healthprofile',\n name='healthcare_costs',\n field=models.FloatField(blank=True, help_text='Enter your total healthcare costs (per year)', null=True, validators=[django.core.validators.MaxValueValidator(50000), django.core.validators.MinValueValidator(0)]),\n ),\n ]\n" }, { "alpha_fraction": 0.7688679099082947, "alphanum_fraction": 0.7809224128723145, "avg_line_length": 56.818180084228516, "blob_id": "b406eac8d9e1a23bd5dc83b1a49207f263c0c455", "content_id": "2737c57ad0025cf932f9aa0b37287b242f2ede69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1908, "license_type": "permissive", "max_line_length": 646, "num_lines": 33, "path": "/README.md", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Well-thy\n\nDDDM Project Spring 2018\n\n### Abstract\n\nHealthcare costs are one of the primary attributes that impact virtually everyone. The goal of our system is to analyze habits and attributes of users to produce recommendations to improve their health. These recommendations reduce health care costs and show the user how much they could save by making the suggested habit changes. There are current systems that give healthy habit suggestions and financial recommendations separately, but no current application can quantitatively define the health attributes with actual dollar value of savings. Wellthy fully integrates health and finances to help users save money while they become healthier.\n\n### Technology stack\n\n1. Python : We used python in the backend to build the model and other functionalities. The recommendation engine as well as the prediction model is developed in Python.\n\n2. Pandas : We used pandas to load the data and perform different operations on the data. The slicing and other data manipulation methods of pandas were very useful in preprocessing steps.\n\n3. Scikit learn : We used this library to make use of in-built machine learning packages in python.\n\n4. Django : We used django to build the UI and take user input. It provided a nice MVC architecture incorporating separation of concerns as well as faster development.\n\n5. Github : We used github to do version control and collaborate amongst each other.\n\n### Steps to run\n\n- Install dependencies using `pip install -r requirements.txt`.\n- Run server using `python manage.py runserver` (Make sure you are in this folder only).\n- Go to `127.0.0.1:8000` in your browser and the application should be running.\n\n### Team\n\n- [Riken Shah](https://github.com/rikenshah/)\n- [Ankit Jain](https://github.com/ankit13jain)\n- [James Henderson](https://github.com/Prohunt)\n- [Carolyn Thompson](https://github.com/Carolyn-May)\n- [Harry Aneja](https://github.com/hardik42)\n" }, { "alpha_fraction": 0.6422111392021179, "alphanum_fraction": 0.6622202396392822, "avg_line_length": 42.15121841430664, "blob_id": "88b8606b06f375ef3e8ba70201571a39a09ae4b8", "content_id": "340f98ab0055b7b3acc6cb8afff451a381979f11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8846, "license_type": "permissive", "max_line_length": 539, "num_lines": 205, "path": "/pyScripts/get_recommendations.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "'''\n Generates recommendation for the user based on\n bmi, smoking, tobacco usage, alcohol consumption, exercise\n travel time, sleep time, job type.\n'''\nimport csv, re\n\nfeatureWeights_dict={}\n\nhealthy_bmi = 0\nmoderate_travel = 1\nexcess_travel = 2\nlow_sleep = 0\nmoderate_sleep = 1\nno_exercise = 0\nmoderate_exercise = 1\noptimal_exercise = 2\n\ndef preprocessData(data):\n # print(\"Recoomendation preprocess\")\n # print(data)\n data[\"exercise\"] = [data[\"exercise\"],3]\n data[\"travel_time\"] = [data[\"travel_time\"],3]\n data[\"sleep_time\"] = [data[\"sleep_time\"],3]\n data[\"drink\"] = [1 if data[\"drink\"] else 0,2] \n data[\"tobacco\"] = [1 if data[\"tobacco\"] else 0,2]\n data[\"smoke\"] = [1 if data[\"smoke\"] else 0,2]\n\n \"\"\"Bag of words to identify past ailments and dangerous job types\"\"\"\n\n ailments=set(['heart','brain','kidney','liver','breating','asthema'])\n job_type=set(['army','defence','factory'])\n #pattern = re.compile(\"\\s+|^\\s+|\\s*,*\\s*|\\s+$\")\n pattern = re.compile(\"\\s+,*\\s*\")\n current_ailments = set([ x for x in pattern.split(data[\"ailments\"]) if x])\n current_jobtype = set([ x for x in pattern.split(data[\"job_type\"]) if x])\n data[\"ailments\"] = [1 if current_ailments.intersection(ailments) else 0,2]\n data[\"job_type\"] = [1 if current_jobtype.intersection(job_type) else 0,2]\n\n \"\"\"Identifying Healthy BMI & Age range\"\"\"\n \n data[\"age\"]=[0 if data[\"age\"]>18 and data[\"age\"]<45 else 1,2]\n data[\"bmi\"]=data[\"weight\"]/(data[\"height\"]*data[\"height\"])*703\n data[\"bmi\"]=[0 if data[\"bmi\"]>18.5 and data[\"bmi\"]<24.9 else 1,2]\n # print(\"preprocess\",data)\n return data\n\ndef initialize_feature_weights():\n reader = csv.reader(open('pyScripts/feature_weights.csv'))\n for row in reader:\n value=[]\n split_row= row[0].split('\\t')\n key=split_row[0]\n value=split_row[1:]\n featureWeights_dict[key]=value\n # print(featureWeights_dict)\n return featureWeights_dict\n\n#Calculates the number of points healthscore will improve, rounded to 2 decimals\ndef getPointsForImprovement(current,levels, weight, maxHealthScore):\n return (round((float(weight) * maxHealthScore * (current/levels)) , 2))\n \ndef getBmiRec(bmi_data):\n if bmi_number != healthy_bmi:\n return (\"If you get your bmi (body-mass-index) in the healthly range \"\n \"(18.5 - 24. 9) your healthscore will improve 100 points.\")\n return None\n\ndef getDrinkRec(drinks):\n if drinks: #drinks alcohol\n return (\"If you stop drinking alcohol your healthscore will improve by \"\n \" 50 points.\")\n return None\n\ndef getExerciseRec(exercise):\n if exercise == no_exercise:\n return (\"If start exercising 6 hours a week your healthscore will improve \"\n \" 17 points.\")\n elif exercise == moderate_exercise:\n return (\"If you exercise more than 15 hours a week \"\n \" your healthscore will improve 17 points.\")\n return None\n\ndef getSmokeRec(smokes):\n if smokes:\n return (\"If you quit smoking your healthscore will improve 50 points.\")\n return None\n\ndef getTobaccoRec(uses_tobacco):\n if uses_tobacco:\n return (\"If you stop using tobacco your healthscore will improve 50 points.\")\n return None\n\ndef getTravelRec(travel_time):\n if travel_time == excess_travel:\n return (\"If you reduce your travel_time to under 10 hours \"\n \"your healthscore will improve 17 points.\")\n elif travel_time == moderate_travel:\n return (\"If you reduce your travel_time to under 5 hours \"\n \"your healthscore will improve 17 points.\")\n return None\n\ndef getSleepRec(sleep):\n if sleep == low_sleep:\n return (\"If you increase sleep to more than 6 hours a day \"\n \"your healthscore will improve 17 points.\")\n elif sleep == moderate_sleep:\n return (\"If you increase your sleep to more than 8 hours a day \"\n \"your healthscore will improve 17 points.\")\n return None\n\n#Calculates improvement for a key\ndef getRecommendationPointsForKey(data, featureWeight, maxHealthScore):\n if featureWeight[1] == 'negative':\n return getNegativeRecommendation(data, featureWeight[0], maxHealthScore)\n return getPositiveRecommendation(data, featureWeight[0], maxHealthScore)\n#Calculates improvement for a key that has a negative relationship\ndef getNegativeRecommendation(data, weight, maxHealthScore):\n if data[0] == 0:\n return None\n return getPointsForImprovement(data[0],data[1], weight, maxHealthScore)\n \n#Calculates improvement for a key that has a positive relationship\ndef getPositiveRecommendation(data, weight, maxHealthScore):\n if data[0] != 2:\n print(\"New method\")\n print(type(weight))\n print(weight)\n print(data)\n # return getPointsForImprovement(data[1], weight, maxHealthScore)\n return float(weight)*((data[1]-data[0]-1)/data[1])*maxHealthScore\n return None\n\n\ndef initializeStrDic():\n return{\"smoke\" : [\"\", \"stop smoking\"], \"exercise\" : [\"increase your exercise to atleast 6 hours a week\", \"increase your exercise to more than 15 hours a week\"], \"sleep_time\": [\"increase the amount you sleep to atleast 6 hours a day\",\"increase the amount you sleep to above 8 hours a day\"], \"bmi\": [\"\", \"get your bmi in the healthy range (18.5 - 24 .9)\"],\"drink\": [\"\", \"stop drinking\"], \"tobacco\": [\"\", \"stop using tobacco\"], \"travel_time\" : [\"\", \"reduce the travel time to less than 5 hours\",\"reduce the travel to less than 10 hours\"]}\n\ndef processRecommendations(data, maxHealthScore):\n '''\n recs = {}\n recs[\"bmi\"] = getBmiRec(data[\"bmi\"], featureWeights[\"bmi\"], maxHealthScore)\n recs[\"drink\"] = getDrinkRec(data[\"drink\"][0], featureWeights[\"drink\"], maxHealthScore)\n recs[\"exercise\"] = getExerciseRec(data[\"exercise\"][0], featureWeights[\"exercise\"], maxHealthScore)\n recs[\"smoke\"] = getSmokeRec(data[\"smoke\"][0], featureWeights[\"smoke\"], maxHealthScore)\n recs[\"tobacco\"] = getTobaccoRec(data[\"tobacco\"][0], featureWeights[\"tobacco\"], maxHealthScore)\n recs[\"travel_time\"] = getTravelRec(data[\"travel_time\"][0], \n featureWeights[\"travel_time\"], maxHealthScore)\n recs[\"sleep_time\"] = getSleepRec(data[\"sleep_time\"][0], featureWeights[\"sleep_time\"], maxHealthScore )\n '''\n\n print(\"processRecommendations\")\n data = preprocessData(data)\n print(data)\n all_recommendations = []\n print(\"end\")\n featureWeights = initialize_feature_weights()\n points = 0.0\n resultStrings = []\n recStrDic = initializeStrDic() \n print(\"recStrDict : \",recStrDic)\n for key in [\"exercise\",\"sleep_time\",\"drink\",\"tobacco\",\"smoke\",\"bmi\",\"travel_time\"]:\n result = getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)\n if result is not None:\n points += result\n print(\"Result is \",result);\n resultStrings.append(recStrDic[key][data[key][0]])\n all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],result))\n\n all_recommendations.append(getRecommendationString(resultStrings, points))\n\n # for key in [\"exercise\",\"sleep_time\",\"drink\",\"tobacco\",\"smoke\",\"bmi\",\"travel_time\"]:\n # all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)))\n all_recommendations = [all_recommendations[-1]]+all_recommendations[0:len(all_recommendations)-1]\n return all_recommendations,round(((points/maxHealthScore)*data[\"healthcare_costs\"]),2)\n \ndef getRecommendationString(resultStrings, points):\n recommendationString = \"If you \"\n resultStringsLength = len(resultStrings)\n if resultStringsLength == 0:\n return [\"You are in good shape.\"]\n for index in (range(resultStringsLength - 1)):\n recommendationString += (resultStrings[index] + \", \")\n if len(resultStrings) == 1:\n recommendationString += (resultStrings[resultStringsLength -1] + \" your healthscore will improve by \" + str(round(points, 2)) + \" points.\")\n else:\n recommendationString += (\"and \" + resultStrings[resultStringsLength -1] + \" your healthscore will improve by \" + str(round(points, 2)) + \" points.\")\n return recommendationString\n \n \nif __name__ == \"__main__\":\n data = {}\n data[\"exercise\"] = [0,3]\n data[\"travel_time\"] = [0,3]\n data[\"sleep_time\"] = [0,3]\n data[\"drink\"] = [1,2] \n data[\"tobacco\"] = [1,2]\n data[\"smoke\"] = [1,2]\n data[\"bmi\"] = [1,2]\n \n featureWeights = {'age': ['0.1', 'negative'], 'bmi': ['0.2', 'negative'], \n 'ailments': ['0.2', 'negative'], 'tobacco': ['0.1', 'negative'], 'smoke': ['0.1', 'negative'],\n 'drink': ['0.1', 'negative'], 'exercise': ['0.05', 'positive'], 'travel_time': ['0.05', 'negative'], \n 'sleep_time': ['0.05', 'positive'], 'job_type': ['0.05', 'negative']}\n \n print(processRecommendations(data, 1000))\n" }, { "alpha_fraction": 0.60318922996521, "alphanum_fraction": 0.6249616742134094, "avg_line_length": 38.28915786743164, "blob_id": "10b3c35511d94164246b7d3f44becfab5d21f6d4", "content_id": "4724e1eb95c3acffb1f9381e866d16613add918c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3261, "license_type": "permissive", "max_line_length": 96, "num_lines": 83, "path": "/pyScripts/get_health_score.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "import csv\nimport json\nimport re\nimport os\n\nmaxHealthScore = 1000\nfeatureWeights_dict={}\n\ndef initialize():\n reader = csv.reader(open('pyScripts/feature_weights.csv'))\n for row in reader:\n value=[]\n split_row= row[0].split('\\t')\n key=split_row[0]\n value=split_row[1:]\n featureWeights_dict[key]=value\n print(featureWeights_dict)\n\ndef getHealthScore(input_dict):\n healthScore = 0\n for key in featureWeights_dict:\n weight = float(featureWeights_dict[key][0])\n value = maxHealthScore\n if featureWeights_dict[key][1]=='negative' :\n value = value - (value*input_dict[key][0]/input_dict[key][1])\n elif featureWeights_dict[key][1]=='positive' :\n value = value - (value*(input_dict[key][1]-input_dict[key][0]-1)/input_dict[key][1])\n value = value * weight\n input_dict[key] = value #optional\n healthScore = healthScore + value\n # savings = getCostSavings(healthScore,input_dict[\"healthcare_costs\"])\n # return round(healthScore,2),round(savings,2)\n return round(healthScore,2)\n \ndef getCostSavings(improvementPoints,healthcare_costs):\n savings = (improvementPoints)/maxHealthScore\n return savings*healthcare_costs\n\ndef preprocessData(data):\n # print(\"in preprocess\",data)\n initialize()\n # data[\"exercise\"] = [data[\"exercise\"],3]\n # data[\"travel_time\"] = [data[\"travel_time\"],3]\n # data[\"sleep_time\"] = [data[\"sleep_time\"],3]\n # data[\"drink\"] = [1 if data[\"drink\"] else 0,2] \n # data[\"tobacco\"] = [1 if data[\"tobacco\"] else 0,2]\n # data[\"smoke\"] = [1 if data[\"smoke\"] else 0,2]\n # \"\"\"Bag of words to identify past ailments and dangerous job types\"\"\"\n\n # ailments=set(['heart','brain','kidney','liver','breating','asthema'])\n # job_type=set(['army','defence','factory'])\n # #pattern = re.compile(\"\\s+|^\\s+|\\s*,*\\s*|\\s+$\")\n # pattern = re.compile(\"\\s+,*\\s*\")\n # current_ailments = set([ x for x in pattern.split(data[\"ailments\"]) if x])\n # current_jobtype = set([ x for x in pattern.split(data[\"job_type\"]) if x])\n # data[\"ailments\"] = [1 if current_ailments.intersection(ailments) else 0,2]\n # data[\"job_type\"] = [1 if current_jobtype.intersection(job_type) else 0,2]\n\n # \"\"\"Identifying Healthy BMI & Age range\"\"\"\n # data[\"age\"]=[0 if data[\"age\"]>18 and data[\"age\"]<45 else 1,2]\n # data[\"bmi\"]=data[\"weight\"]/(data[\"height\"]*data[\"height\"])\n # data[\"bmi\"]=[0 if data[\"bmi\"]>18.5 and data[\"bmi\"]<24.9 else 1,2]\n # print(\"preprocess\",data)\n return getHealthScore(data)\n\nif __name__ == \"__main__\":\n initialize()\n input_dict = {}\n input_dict['age']=45 #1 means out of healthy age range\n input_dict['height']= 1.8 #1 means out of healthy BMI range\n input_dict['weight']=80\n input_dict['ailments']=\"heart ailments\" #0 means no ailments \n input_dict['tobacco']=False #binary\n input_dict['smoke']=True\n input_dict['drink']= True\n input_dict['exercise']=1 #more exercise\n input_dict['travel_time']=1\n input_dict['sleep_time']=1\n input_dict['healthcare_costs']=500\n input_dict['job_type']=\"\" #moderate risky job\n result = preprocessData(input_dict)\n print(\"Health Score is \",result[0])\n print(\"Savings is \",result[1])\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 40, "blob_id": "136ab92198cafd02f4281987128d5fba2fe2f235", "content_id": "431cbb42903c975f7a9a420106059b5409e942b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "permissive", "max_line_length": 76, "num_lines": 3, "path": "/pyScripts/score_prediction.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "Ins_Age,Ht,Wt,Individual_Rate,Individual Tobacco Rate,rate_diff,BMI,Response\n\n# take all the inputs like Ht, Wt, Rate, Age\n\n\n" }, { "alpha_fraction": 0.75126051902771, "alphanum_fraction": 0.7703081369400024, "avg_line_length": 43.57500076293945, "blob_id": "7164384a41508b182d8b4263f8229788a9a7c0e7", "content_id": "9483a336e7cd70043211b54c75dbd5c32117c18f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1785, "license_type": "permissive", "max_line_length": 143, "num_lines": 40, "path": "/pyScripts/base-for-health-score.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\n#from sklearn.preprocessing import LabelEncoder\n\n## Loading data and preprocessing\ndata = pd.read_csv('../datasets/merged.csv')\ntrain_data=data.iloc[:,0:10]\ndata[\"Individual_Rate\"] = (data[\"Individual_Rate\"]-data[\"Individual_Rate\"].min())/(data[\"Individual_Rate\"].max()-data[\"Individual_Rate\"].min())\n\nY=data.iloc[:,10]\n\n#test_data = pd.read_csv('../datasets/merged.csv')\nfeatures=['Ins_Age','BMI','Individual_Rate']\n\n##Linear Regression\n\nLinReg_model = LinearRegression()\nLinReg_model.fit(train_data[features], Y)\nlinReg_score = cross_val_score(LinReg_model, train_data[features], Y, cv=10,scoring='r2').mean()\nprint(\"R2 score using Linear Regression is \",linReg_score*100)\nprint(\"Linear reg coef\",LinReg_model.coef_)\n##Random Forest Regressor\n##\n##RanForest_model = RandomForestRegressor( random_state=0)\n##RanForest_model.fit(train_data[features], Y)\n##ranForest_score = cross_val_score(RanForest_model, train_data[features], Y, cv=10,scoring='r2').mean()\n##print(\"R2 score using Random Forest Regression is \",ranForest_score*100)\n\n##Gradient Boosting Regressor\n\nGradBoost_model = GradientBoostingRegressor(max_depth=3, random_state=0,learning_rate=0.1,n_estimators=200)\nGradBoost_model.fit(train_data[features], Y)\nGradBoost_model.apply(train_data[features])\ngradBoost_score = cross_val_score(GradBoost_model, train_data[features], Y, cv=10,scoring='r2').mean()\nprint(\"Feature Importance \",GradBoost_model.feature_importances_)\nprint(\"R2 score using Gradient Boosting Regressor is \",gradBoost_score*100)\n\n\n" }, { "alpha_fraction": 0.519336998462677, "alphanum_fraction": 0.5948434472084045, "avg_line_length": 29.16666603088379, "blob_id": "e73490b6104729fab436d75cf1cd0e497f65b67c", "content_id": "5505a105a9e78a3357d0b5c7f9f1232ea5498ee4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "permissive", "max_line_length": 197, "num_lines": 18, "path": "/health/migrations/0004_auto_20180426_1547.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-26 15:47\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('health', '0003_auto_20180426_1527'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='healthprofile',\n name='sleep_time',\n field=models.IntegerField(blank=True, choices=[(2, '>8 hours/day'), (1, '6-8 hours/day'), (0, '<6 hours/day')], default=(1, '6-8 hours/day'), help_text='Select how much do you sleep?'),\n ),\n ]\n" }, { "alpha_fraction": 0.6375952363014221, "alphanum_fraction": 0.6629974842071533, "avg_line_length": 37.09677505493164, "blob_id": "0de4dc9939c5e6c2aa2ec6fe873aa9764c9707c3", "content_id": "ff6ca20bf8896d8c590926e0fcef8f17fd86a976", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1181, "license_type": "permissive", "max_line_length": 213, "num_lines": 31, "path": "/health/migrations/0002_auto_20180414_0114.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.1 on 2018-04-14 01:14\n\nfrom django.conf import settings\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('health', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='healthprofile',\n name='healthcare_costs',\n field=models.FloatField(blank=True, help_text='Enter your total healthcare costs', null=True, validators=[django.core.validators.MaxValueValidator(50000), django.core.validators.MinValueValidator(0)]),\n ),\n migrations.AlterField(\n model_name='healthprofile',\n name='height',\n field=models.FloatField(blank=True, help_text='Enter height (In Inches) :', null=True, validators=[django.core.validators.MaxValueValidator(300), django.core.validators.MinValueValidator(20)]),\n ),\n migrations.AlterField(\n model_name='healthprofile',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n" }, { "alpha_fraction": 0.471615731716156, "alphanum_fraction": 0.688209593296051, "avg_line_length": 15.126760482788086, "blob_id": "e6f7e49ec19b234f5d38fa312afc6a6b17743f19", "content_id": "9862e05fb3a8d4e601e6419d21f43e4b894f65dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1145, "license_type": "permissive", "max_line_length": 27, "num_lines": 71, "path": "/requirements.txt", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.6.0\nblessings==1.6.1\nbpython==0.17\ncertifi==2017.11.5\nchardet==3.0.4\nclick==6.7\ncurtsies==0.2.11\ncycler==0.10.0\ncymem==1.31.2\ncytoolz==0.8.2\ndill==0.2.7.1\nDjango==2.0.12\ndjango-widget-tweaks==1.4.2\ndocopt==0.6.2\nfeedparser==5.2.1\nFlask==1.0\nftfy==4.4.3\ngreenlet==0.4.12\ngunicorn==19.7.1\nhtml2text==2018.1.9\nhtml5lib==1.0.1\nidna==2.6\nitsdangerous==0.24\nJinja2==2.10\njson-tricks==3.11.3\nlxml==4.1.1\nMarkdown==2.6.11\nMarkupSafe==1.0\nmatplotlib==2.1.2\nmock==2.0.0\nmsgpack-numpy==0.4.1\nmsgpack-python==0.5.1\nmurmurhash==0.28.0\nnltk==3.4.5\nnumpy==1.14.0\noauthlib==2.0.6\npandas==0.22.0\npath-and-address==2.0.1\npathlib==1.0.1\npbr==3.1.1\nplac==0.9.6\npreshed==1.0.0\npsycopg2==2.7.4\nPygments==2.2.0\npyparsing==2.2.0\npython-dateutil==2.6.1\npytz==2017.3\nregex==2017.4.5\nrequests==2.20.0\nrequests-oauthlib==0.8.0\nscikit-criteria==0.2.9\nscikit-learn==0.19.1\nscipy==1.0.0\nselenium==3.8.1\nsix==1.11.0\nsklearn==0.0\nspacy==2.0.5\nSQLAlchemy==1.2.2\ntabulate==0.8.2\ntermcolor==1.1.0\ntextblob==0.15.0\nthinc==6.10.2\ntoolz==0.9.0\ntqdm==4.19.5\ntweepy==3.5.0\nujson==1.35\nurllib3==1.25.6\nwcwidth==0.1.7\nwebencodings==0.5.1\nWerkzeug==0.15.3\nwrapt==1.10.11\n" }, { "alpha_fraction": 0.6691879034042358, "alphanum_fraction": 0.674761176109314, "avg_line_length": 40.180328369140625, "blob_id": "1f24d29b7d03eda7e51bdb3562cfbcc5af21abbb", "content_id": "781ec9a672d18587ec68acbd168876c930507e3c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2512, "license_type": "permissive", "max_line_length": 165, "num_lines": 61, "path": "/health/views.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import HealthProfile\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView,ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.models import User\nfrom .models import HealthProfile\nfrom django.shortcuts import redirect\nfrom pyScripts import get_health_score\nfrom pyScripts import get_recommendations\nimport json\nfrom django.core import serializers\n\n# Create your views here.\n\ndef profile(request):\n if(request.user):\n p = HealthProfile.objects.filter(user=request.user)\n if len(p)>0 :\n json_p = json.loads(serializers.serialize('json',[p[0]]))[0][\"fields\"]\n print(\"json p\")\n print(json_p)\n # json_p[\"healthcare_costs\"] = 100\n recommendations,savings = get_recommendations.processRecommendations(json_p, 1000)\n result = get_health_score.preprocessData(json_p)\n # recommendations = \"hello\"\n return render(request,'health/profile.html',{'health_profile' : p[0], 'health_score' : result, 'savings' : savings, 'recommendations' : recommendations})\n else:\n return render(request,'health/profile.html')\n\nclass HealthProfileDisplay(ListView):\n model = HealthProfile\n\ndef handle_profile(request):\n h = HealthProfile()\n if(request.user):\n p = HealthProfile.objects.filter(user=request.user)\n if len(p)>0 :\n print(\"Profile exists : Reditecting to edit\")\n s = \"/health/update/\"+str(p[0].id)\n return redirect(s)\n else:\n print(\"Creating new profile\")\n return redirect(\"/health/input\")\n\nclass HealthProfileCreate(CreateView):\n model = HealthProfile\n fields = ['age','height','weight','ailments','healthcare_costs','tobacco','smoke','drink','exercise','travel_time', 'sleep_time','job_type']\n success_url = reverse_lazy('health:health_profile')\n # initial = {'sleep_time':1}\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n print(\"+++++++++++++\",form.instance.sleep_time)\n return super().form_valid(form)\n\nclass HealthProfileUpdate(UpdateView):\n model = HealthProfile\n fields = ['age','height','weight','ailments','healthcare_costs','tobacco','smoke','drink','exercise','travel_time', 'sleep_time','job_type']\n success_url = reverse_lazy('health:health_profile')\n" }, { "alpha_fraction": 0.734133780002594, "alphanum_fraction": 0.734133780002594, "avg_line_length": 47.66666793823242, "blob_id": "acc7704f904a1db4585b57eaf58a3c540006f799", "content_id": "74e8e56df3dc524e22a10ffcfb02b69fa05a5ec0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "permissive", "max_line_length": 130, "num_lines": 12, "path": "/health/urls.py", "repo_name": "rikenshah/Well-thy", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\nfrom django.contrib.auth.decorators import login_required\n\napp_name = 'lesson'\nurlpatterns = [\n # path('', views.index, name='index'),\n path('profile',login_required(views.profile),name='health_profile'),\n\tpath('handle',login_required(views.handle_profile),name='handle'),\n path('input', login_required(views.HealthProfileCreate.as_view(template_name=\"health/form.html\")), name='health_new'),\n path('update/<pk>', login_required(views.HealthProfileUpdate.as_view(template_name=\"health/form.html\")), name='health_update')\n]" } ]
18
haikaroseworx/dsworks
https://github.com/haikaroseworx/dsworks
7bc13a9b6acf0c8f9742ba96951afc558e483d42
f7c142c5222ca075a8e7080b6f3b39768fa3d02b
c6144a072f3f662f06ef36971df56511ac6243db
refs/heads/master
2021-09-05T20:54:58.765384
2018-01-31T00:09:16
2018-01-31T00:09:16
119,610,786
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5752895474433899, "alphanum_fraction": 0.5830115675926208, "avg_line_length": 24.899999618530273, "blob_id": "fb46d3d3b78564424c718431febf64bd03d97b2e", "content_id": "1d702263a552c3e3bff66312118d85958bf2ffde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/camera_project.py", "repo_name": "haikaroseworx/dsworks", "src_encoding": "UTF-8", "text": "import sys\n\n\n## GET THE INPUT CONCERN CAMERA AND IMAGES ##\ncamera_no = sys.argv[1]\nimage_shots = sys.argv[2]\n############################################\n\nprint(\"Camera to use is camera : \" +camera_no)\nprint(\"Total number of images to capture: \"+image_shots)\n" }, { "alpha_fraction": 0.6291390657424927, "alphanum_fraction": 0.6490066051483154, "avg_line_length": 20.571428298950195, "blob_id": "c2b762eebb9e62fad8201b3b29abf79975509e9d", "content_id": "8612763c6ab94786b8555d96252cb8b9f54714a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 50, "num_lines": 28, "path": "/sedoyeka project/campy2.py", "repo_name": "haikaroseworx/dsworks", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('Camera desktop recorder',gray)\n\n ##this bullshit bellow is mine##\n camera = cap.read()\n cv2.imwrite('test.png', cap.read())\n del camera \n break\n ##end of my bullshit##\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6691176295280457, "avg_line_length": 20.473684310913086, "blob_id": "cc1671c459c62eb1b1019099218ca2dd833ba444", "content_id": "b08a20adc010f3d7b4af886587f0663a709c7395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/flask_serve/camera_server.py", "repo_name": "haikaroseworx/dsworks", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import request\nimport sys\nimport os\n\n\napp = Flask(__name__)\n\[email protected](\"/signal\")\ndef signal():\n camera = request.args.get(\"camera\")\n shots = request.args.get(\"shots\")\n\n sysparams = \"python3 camera_util.py \"+camera+\" \"+str(shots)\n os.system(sysparams)\n\n return \"proximity sensor signal received \"+sysparams\n\nif __name__ ==\"__main__\": app.run(debug = False)\n" }, { "alpha_fraction": 0.6491228342056274, "alphanum_fraction": 0.7017543911933899, "avg_line_length": 26, "blob_id": "efea71dc6313be753e76f8b243d2cc87a98ca5fb", "content_id": "081474fbfd9de924546e13f4b727152d353e3290", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 42, "num_lines": 2, "path": "/runner.py", "repo_name": "haikaroseworx/dsworks", "src_encoding": "UTF-8", "text": "import os \nos.system('python camera_project.py 1 12')\n\n\n\n" }, { "alpha_fraction": 0.591549277305603, "alphanum_fraction": 0.6291079521179199, "avg_line_length": 20.299999237060547, "blob_id": "8fd5a31c69f7765c3f85c0e84147a9078c6993a3", "content_id": "ba35303d7eb70fa78662fbcf633b66883caaf91f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/sedoyeka project/enter_capture.py", "repo_name": "haikaroseworx/dsworks", "src_encoding": "UTF-8", "text": "import cv2\n\ncamera = cv2.VideoCapture(0)\ni = 0\nwhile i < 10:\n #raw_input('Press Enter to capture')\n return_value, image = camera.read()\n cv2.imwrite('opencv'+str(i)+'.png', image)\n i = i+1\ndel(camera)\n" } ]
5
yorkurt/pygame_controllers
https://github.com/yorkurt/pygame_controllers
8ad3e3a2fedd727377fa2ae9a3c90897da45a5dd
9cf9a67e15cf319efd504db20e8adeb7b41cb625
2563b823df882f8b9dd65c8d8c2814d6fb7ffb54
refs/heads/master
2021-09-04T02:48:51.917479
2018-01-14T22:45:09
2018-01-14T22:45:09
110,867,165
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 35, "blob_id": "6425d8da6c05cb5631bd01fcdc8153d5965addae", "content_id": "c046d11836767f0c163aabc6bff23c29be12ea43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "no_license", "max_line_length": 80, "num_lines": 4, "path": "/README.md", "repo_name": "yorkurt/pygame_controllers", "src_encoding": "UTF-8", "text": "# pygame_controllers\nCrappy pygame code to run some joysticks\n\nIdeally this should work to control the rover. Full control schemes coming soon.\n" }, { "alpha_fraction": 0.48377305269241333, "alphanum_fraction": 0.5155264735221863, "avg_line_length": 29.382978439331055, "blob_id": "87ca61477d83fb56e738152464aff9b791582fe6", "content_id": "6fb85b7728c8ff065a5eddd79f70bb1834c37042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4283, "license_type": "no_license", "max_line_length": 96, "num_lines": 141, "path": "/joy.py", "repo_name": "yorkurt/pygame_controllers", "src_encoding": "UTF-8", "text": "import math\nimport pygame\n\nimport helpers\n\nclass Joystick_L:\n\n def __init__(self):\n pygame.joystick.init()\n numJoys = pygame.joystick.get_count()\n self.joyInitL = False\n self.x = 0\n self.y = 0\n self.rad = 0\n self.throttle = 0\n if (numJoys > 0):\n self.joystick = pygame.joystick.Joystick(0)\n self.joystick.init()\n self.joyInitL = True\n else:\n print(\"No left joystick found\")\n self.joyInitL = False\n self.numButtons = 0\n return\n\n self.numButtons = self.joystick.get_numbuttons()\n self.buttons = [0]*self.numButtons\n \n \n pygame.font.init()\n self.font = pygame.font.Font(pygame.font.get_default_font(),32)\n\n def compute(self):\n self.x = self.joystick.get_axis(0)\n self.y = self.joystick.get_axis(1)\n self.throttle = ((-1 * self.joystick.get_axis(2)) + 1) / 2\n self.rad = math.hypot(self.x,self.y)\n self.rad = helpers.limitToRange(self.rad,0,1)\n self.ang = math.atan2(self.y,self.x)\n self.x = self.rad*math.cos(self.ang)\n self.y = self.rad*math.sin(self.ang)\n #'clicks' to middle\n tab = .12\n if -tab < self.x < tab:\n self.x = 0\n if -tab < self.y < tab:\n self.y = 0\n\n for i in xrange(self.numButtons):\n self.buttons[i] = self.joystick.get_button(i)\n\n \n\n \n ''' def draw(self,surface):\n r = 200\n w = surface.get_width()\n h = surface.get_height()\n\n for i in xrange(self.numButtons):\n if self.buttons[i]:\n col = (0,255,0)\n else:\n col = (64,0,64)\n text = self.font.render(str(i),1,col)\n surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.numButtons+1),centery=h/2))\n \n x = int(round(w/2+self.x*r))\n y = int(round(h/2+self.y*r))\n pygame.draw.aaline(surface,(128,128,128),(w/2,h/2),(x,y),1)\n pygame.draw.circle(surface,(0,0,0),(x,y),8,4)\n pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2) '''\n\nclass Joystick_R:\n\n def __init__(self):\n pygame.joystick.init()\n numJoys = pygame.joystick.get_count()\n self.joyInitR = False\n self.x = 0\n self.y = 0\n self.rad = 0\n self.throttle = 0\n if (numJoys > 1):\n self.joystick = pygame.joystick.Joystick(1)\n self.joystick.init()\n self.joyInitR = True\n else:\n print(\"No right joystick found\")\n self.joyInitR = False\n self.numButtons = 0\n return\n\n self.numButtons = self.joystick.get_numbuttons()\n self.buttons = [0]*self.numButtons\n \n \n pygame.font.init()\n self.font = pygame.font.Font(pygame.font.get_default_font(),32)\n\n def compute(self):\n self.x = self.joystick.get_axis(0)\n self.y = self.joystick.get_axis(1)\n self.throttle = ((-1 * self.joystick.get_axis(2)) + 1) / 2\n self.rad = math.hypot(self.x,self.y)\n self.rad = helpers.limitToRange(self.rad,0,1)\n self.ang = math.atan2(self.y,self.x)\n self.x = self.rad*math.cos(self.ang)\n self.y = self.rad*math.sin(self.ang)\n #'clicks' to middle\n tab = .12\n if -tab < self.x < tab:\n self.x = 0\n if -tab < self.y < tab:\n self.y = 0\n\n for i in xrange(self.numButtons):\n self.buttons[i] = self.joystick.get_button(i)\n\n \n\n \n ''' def draw(self,surface):\n r = 200\n w = surface.get_width()\n h = surface.get_height()\n\n for i in xrange(self.numButtons):\n if self.buttons[i]:\n col = (0,255,0)\n else:\n col = (64,0,64)\n \n text = self.font.render(str(i),1,col)\n surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.numButtons+1),centery=h/2-60))\n \n x = int(round(w/2+self.x*r))\n y = int(round(h/2+self.y*r))\n pygame.draw.aaline(surface,(128,0,0),(w/2,h/2),(x,y),1)\n pygame.draw.circle(surface,(0,0,0),(x,y),8,4)\n pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2) '''" }, { "alpha_fraction": 0.3854166567325592, "alphanum_fraction": 0.3854166567325592, "avg_line_length": 14.666666984558105, "blob_id": "0d8ee4068d1eae7ca84bb1fc90bc5d4d5926daaf", "content_id": "52475857fc9b416159a5b17d4c0ab44a072410f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/helpers.py", "repo_name": "yorkurt/pygame_controllers", "src_encoding": "UTF-8", "text": "\n\ndef limitToRange(a,b,c):\n if a < b:\n a = b\n if a > c:\n a = c\n return a\n" }, { "alpha_fraction": 0.476492702960968, "alphanum_fraction": 0.5159849524497986, "avg_line_length": 29.826086044311523, "blob_id": "f6eaf19d1cc6a43597f61a0068a1af0bd060dc6e", "content_id": "e2bd1618541a19ace6ae6d70fd527decda277afe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4254, "license_type": "no_license", "max_line_length": 139, "num_lines": 138, "path": "/main.py", "repo_name": "yorkurt/pygame_controllers", "src_encoding": "UTF-8", "text": "import math\nimport pygame\n\nimport joy\n\nclass Main:\n def __init__(self):\n self.SCREEN_WIDTH = 800\n self.SCREEN_HEIGHT = 450\n self.screen = pygame.display.set_mode((self.SCREEN_WIDTH,self.SCREEN_HEIGHT))\n\n self.objects = []\n self.mode = 1\n\n pygame.font.init()\n self.font = pygame.font.Font(pygame.font.get_default_font(),32)\n\n\t\n\t\n\n def setupGame(self):\n self.clock = pygame.time.Clock()\t\n self.FPS = 60\n self.joy = joy.Joystick_L()\n self.joy2 = joy.Joystick_R()\n\n self.objects.append(self.joy)\n self.objects.append(self.joy2)\n\n\tl = self.objects[0].get_numbuttons()\n\tself.buttonArr1 = [0 for i in range(l)]\n\n def runGame(self):\n self.gameRunning = 1\n\n\n while self.gameRunning:\n\t buttonArr1 = [0 for i in range(len(buttonArr1))]\n\n self.getInput()\n self.compute()\n self.draw(self.screen)\n self.clock.tick(self.FPS)\n \n #self.leftX = pygame.joystick.Joystick(0).get_axis(0)\n #self.leftY = -1 * pygame.joystick.Joystick(0).get_axis(1)\n #self.rightX = pygame.joystick.Joystick(1).get_axis(0)\n #self.rightY = -1 * pygame.joystick.Joystick(1).get_axis(1)\n\n self.leftX = self.objects[0].get_axis(0)\n\t self.leftY = self.objects[0].get_axis(1)\n\t #self.rightX = self.objects[1].get_axis(0)\n\t #self.rightY = self.objects[1].get_axis(1)\n\n #handle buttons \n for event in pygame.event.get(pygame.JOYBUTTONUP): #event handling loop\n #handle mode switching - buttons 8/9 on both sticks\n print(event)\n if (event.button == 7): #button 8 increases mode\n\t\t buttonArr1[7] = 1\n if (self.mode == 3):\n self.mode = 1\n else:\n self.mode = self.mode + 1\n print(\"Mode is now: \" + str(self.mode))\n if (event.button == 8): #button 9 decreases mode\n\t\t buttonArr1[8] = 1\n if (self.mode == 1):\n self.mode = 3\n else:\n self.mode = self.mode - 1\n print(\"Mode is now: \" + str(self.mode))\n\n\n def getInput(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.gameRunning = 0\n\n #if(self.joy.joyInitL == True):\n pygame.display.set_caption(str(self.joy.x) + ', ' + str(self.joy.y) + ', ' + str(self.joy.rad) + ', ' + str(self.joy.throttle))\n\n def draw(self,surface):\n self.screen.fill((255,255,255))\n\n #for o in self.objects:\n # o.draw(self.screen)\n\n \n r = 200\n w = surface.get_width()\n h = surface.get_height()\n\n for i in xrange(self.joy.numButtons):\n if self.joy.buttons[i]:\n col = (0,255,0)\n else:\n col = (64,0,64)\n \n text = self.font.render(str(i),1,col)\n surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.joy.numButtons+1),centery=h/2-30))\n\n x = int(round(w/2+self.joy.x*r))\n y = int(round(h/2+self.joy.y*r))\n pygame.draw.aaline(surface,(128,128,128),(w/2,h/2),(x,y),1)\n pygame.draw.circle(surface,(0,0,0),(x,y),8,4)\n #pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2)\n\n for i in xrange(self.joy2.numButtons):\n if self.joy2.buttons[i]:\n col = (0,255,0)\n else:\n col = (64,0,64)\n \n text = self.font.render(str(i),1,col)\n surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.joy2.numButtons+1),centery=h/2+30))\n \n x1 = int(round(w/2+self.joy2.x*r))\n y1 = int(round(h/2+self.joy2.y*r))\n pygame.draw.aaline(surface,(128,0,0),(w/2,h/2),(x1,y1),1)\n pygame.draw.circle(surface,(0,0,0),(x1,y1),8,4)\n pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2)\n\n pygame.display.flip()\n\n def compute(self):\n i = 0\n while i < len(self.objects):\n self.objects[i].compute()\n i += 1\n\n\n\nm = Main()\nm.setupGame()\nm.runGame()\n\npygame.quit()\n" }, { "alpha_fraction": 0.6373276710510254, "alphanum_fraction": 0.66171795129776, "avg_line_length": 29.419355392456055, "blob_id": "6b74bc89f6bc7af5b1a183f189284355f71daa12", "content_id": "fa7ac920e5ad41173676dc5b5350634761cca4d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 70, "num_lines": 31, "path": "/talker.py", "repo_name": "yorkurt/pygame_controllers", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import String#, Float64MultiArray\nfrom controller.msg import FloatList, IntList\n#import main\n\ndef talker():\n #float64[4] axes = [main.leftX,main.leftY,main.rightX,main.rightY]\n pub_axes = rospy.Publisher('controls', FloatList, queue_size=10)\n pub_buttons = rospy.Publisher('buttons', IntList, queue_size=10)\n rospy.init_node('controller_base', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n axes = FloatList()\n buttons = IntList()\n while not rospy.is_shutdown():\n #axes.data = [main.leftX,main.leftY,main.rightX,main.rightY]\n\taxes.data = [1,-1,0,1]\n\t#buttons.data = main.buttonArr1\n\tbuttons.data = [1,0,1,0,1]\n rospy.loginfo(axes)\n pub_axes.publish(axes)\n\trospy.loginfo(buttons)\n pub_buttons.publish(buttons)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n" } ]
5
BramboraSK/osu-czsk-bot
https://github.com/BramboraSK/osu-czsk-bot
755caf48f790884eb6960e6bc6337a55b37e88c0
efa016f24eeab0e698ee53ff9fb7b3fd50035930
0669b18df848b2132c19b1d50cb6fc17d8cfb003
refs/heads/master
2020-09-12T17:43:56.093019
2019-11-18T21:18:58
2019-11-18T21:18:58
222,499,234
0
1
null
2019-11-18T16:55:58
2019-11-18T17:07:56
2019-11-18T19:34:45
null
[ { "alpha_fraction": 0.6193415522575378, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 22.14285659790039, "blob_id": "07a752665e16019d9c1ad660eec8dd6b5bd7c598", "content_id": "024592b3751cc5399ec6e568b69596aa60b38736", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 119, "num_lines": 21, "path": "/index.py", "repo_name": "BramboraSK/osu-czsk-bot", "src_encoding": "UTF-8", "text": "import tweepy\nimport random\nimport threading\n\n//API\nauth = tweepy.OAuthHandler(\"...\", \"...\")\nauth.set_access_token(\"...\", \"...\")\n\napi = tweepy.API(auth)\n\n//Súbory\nhraci = open(\"hraci.txt\").readlines()\npridavne_mena = open(\"pridavne_mena.txt\").readlines()\n\ndef send():\n //Tweet každých 1200 sekúnd (20 minút)\n threading.Timer(1200.0, send).start()\n \n api.update_status(random.choice(hraci).replace(\"\\n\", \"\") + \" je \" + random.choice(pridavne_mena).replace(\"\\n\", \"\"))\n\nsend()\n" }, { "alpha_fraction": 0.6319444179534912, "alphanum_fraction": 0.6875, "avg_line_length": 23, "blob_id": "50a1e443d40a715ad60bb8c9cb788dddd4b7df49", "content_id": "36e5e9f65c088193cf2d78df1f8972f2b146c21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 153, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/README.md", "repo_name": "BramboraSK/osu-czsk-bot", "src_encoding": "UTF-8", "text": "# Súbory\n**pridavne_mena.txt** - 37,138 českých prídavných mien\n\n**cz.txt** - Takmer milión českých slov\n\n**hraci.txt** - Ľudia z Top 100 CZ/SK\n" } ]
2
Zheng392/Python-Repository
https://github.com/Zheng392/Python-Repository
fe04a8ac232d31926228f3beb81faa415f5622b8
34b809383a985ed0040b3ccf8247b10fdb6ba7bf
f8711392cf9cefe379277a8cb535afa2a39cc9dc
refs/heads/master
2020-04-13T03:58:28.730211
2019-10-08T01:18:07
2019-10-08T01:18:07
162,945,854
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.68388432264328, "alphanum_fraction": 0.6859503984451294, "avg_line_length": 22.095237731933594, "blob_id": "896efde7319a1a45e01aff1cb3f3c72da82de728", "content_id": "68a30f24115f801551ea676631b5aa7824f01aab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/Spider/XieCheng Hotel Data/xiecheng selenium/text/xiechengservices/hotelInfotext.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n__author__ = 'LiuYang'\n\nfrom services.xiechengservices.hotelInfo import XiechenghotelService\n\nclass DriverServiceTest(object):\n\n def __init__(self):\n self.xiechenghotelService = XiechenghotelService()\n\n def crawlxiechengTest(self):\n self.xiechenghotelService.getdata()\n self.xiechenghotelService.depose()\n\n\n\nif __name__ == \"__main__\":\n\n driverServiceTest = DriverServiceTest()\n\n driverServiceTest.crawlxiechengTest()" }, { "alpha_fraction": 0.5402272939682007, "alphanum_fraction": 0.5747727155685425, "avg_line_length": 43.45454406738281, "blob_id": "45f337add63611ab6ba9b60535cfca4c203a3f0a", "content_id": "c62cdf683120b2298bd3ff418f42191f173c8e1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4566, "license_type": "no_license", "max_line_length": 113, "num_lines": 99, "path": "/Spider/Jindong review data by Scrapy/jd_spider-fundamental/jd_spider/spiders/jd_home.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom scrapy.spiders import Spider\nfrom jd_spider.items import goodsItem\nfrom scrapy.selector import Selector\nimport scrapy\nimport re\nimport json\n\nurls={'phone': 'https://list.jd.com/list.html?cat=9987,653,655&sort=sort_commentcount_desc&page=',\n 'laptop':'https://list.jd.com/list.html?cat=670,671,672&sort=sort_commentcount_desc&page=',\n 'men_cloth':'https://list.jd.com/list.html?cat=1315,1342&sort=sort_commentcount_desc&page=',\n 'women_cloth':'https://list.jd.com/list.html?cat=1315,1343&sort=sort_commentcount_desc&page=',\n 'home_textiles':'https://list.jd.com/list.html?cat=1620,1621&sort=sort_commentcount_desc&page=',\n 'sporting_goods':'https://list.jd.com/list.html?cat=1318,1466&sort=sort_commentcount_desc&page=',\n 'food':'https://list.jd.com/list.html?cat=1320,1583&sort=sort_commentcount_desc&page='\n }\n\nclass jd_spider(Spider):\n name = \"jd\"\n\n start_urls = []\n for i in range(1, 121): # 这里请自己设置页数,目前只能抓取电子烟分类下前10页的商品\n url = 'https://list.jd.com/list.html?cat=1315,1342&sort=sort_commentcount_desc&page=' + str(i)#\n start_urls.append(url)\n\n # def start_requests(self):\n # urls = []\n # for i in range(1, 61): # 这里请自己设置页数,目前只能抓取电子烟分类下前10页的商品\n # url = 'https://list.jd.com/list.html?cat=1315,1342&page=' + str(i)\n # urls.append(url)\n # for url in urls:\n # yield scrapy.Request(url=url, callback=self.parse)\n # def parse_price(self, response):\n # item1 = response.meta['item']\n # temp1 = response.body.split(b'jQuery([')\n # s = temp1[1][:-4] # 获取到需要的json内容\n # js = json.loads(s.decode()) # js是一个list\n # if 'pcp' in js:\n # item1['price'] = js['pcp']\n # else:\n # item1['price'] = js['p']\n # return item1\n\n # def parse_getCommentnum(self, response):\n # item1 = response.meta['item']\n # # response.body是一个json格式的\n # js = json.loads(response.body.decode())\n # item1['score1count'] = js['CommentsCount'][0]['Score1Count']\n # item1['score2count'] = js['CommentsCount'][0]['Score2Count']\n # item1['score3count'] = js['CommentsCount'][0]['Score3Count']\n # item1['score4count'] = js['CommentsCount'][0]['Score4Count']\n # item1['score5count'] = js['CommentsCount'][0]['Score5Count']\n # item1['comment_num'] = js['CommentsCount'][0]['CommentCount']\n # num = item1['ID'] # 获得商品ID\n # s1 = str(num)\n # url = \"http://pm.3.cn/prices/pcpmgets?callback=jQuery&skuids=\" + s1[3:-2] + \"&origin=2\"\n # yield scrapy.Request(url, meta={'item': item1}, callback=self.parse_price)\n\n # def parse_detail(self, response):\n # item1 = response.meta['item']\n # sel = Selector(response)\n #\n # temp = response.body.split(b'commentVersion:')\n #\n # pattern = re.compile(b\"[\\'](\\d+)[\\']\")\n # if len(temp) < 2:\n # item1['commentVersion'] = -1\n # else:\n # match = pattern.match(temp[1][:10])\n # item1['commentVersion'] = match.group()\n #\n # url = \"http://club.jd.com/clubservice.aspx?method=GetCommentsCount&referenceIds=\" + str(item1['ID'][0])\n # yield scrapy.Request(url, meta={'item': item1}, callback=self.parse_getCommentnum)\n def __init__(self):\n self.allids=[]\n self.count=0\n def parse(self, response): # 解析搜索页\n sel = Selector(response) # Xpath选择器\n goods = sel.xpath('//li[@class=\"gl-item\"]')\n i=0\n for good in goods:\n i=i+1\n item1 = goodsItem()\n item1['ID'] = good.xpath('./div/@data-sku').extract()\n if item1['ID'] in self.allids:\n continue\n self.allids.append(item1['ID'])\n item1['name'] = good.xpath('./div/div[@class=\"p-name\"]/a/em/text()').extract()\n item1['shop_name'] = good.xpath('./div/div[@class=\"p-shop\"]/@data-shop_name').extract()\n try:\n item1['link'] = good.xpath('./div/div[@class=\"p-img\"]/a/@href').extract()\n url = \"http:\" + item1['link'][0] + \"#comments-list\"\n except:\n pass\n\n print(\"goodNum is\",i)\n yield item1#scrapy.Request(url, meta={'item': item1}, callback=self.parse_detail)\n self.count=self.count+1\n print('count is ',self.count,response)" }, { "alpha_fraction": 0.5977542996406555, "alphanum_fraction": 0.7014530897140503, "avg_line_length": 20.338027954101562, "blob_id": "f0b3c59edd792a595320c5f5e25fefbfebf20610", "content_id": "0c78ec6a3c072423d0b886fb8421ed3ad07433c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "permissive", "max_line_length": 78, "num_lines": 71, "path": "/Foursquare research/Projects/Get venues/Foursquare api.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "'''\n#test the search function\nimport json, requests\nurl = 'https://api.foursquare.com/v2/venues/search'\n\nparams = dict(\n client_id='5MDTT4NQVXV3VQF3KEA50Z2T0DLQ5TJAF1C2UVERXOJD5V4E',\n client_secret='QK4AM22AD13GNIMNJLSHGEFX5VPBPH3JALD50L3P3CHBYHQF',\n v='20180323',\n ll='37.756803, -122.429917',\n radius=100000,\n intent='browse',\n limit=20\n)\nresp = requests.get(url=url, params=params)\ndata = json.loads(resp.text)\n\n'''\n\n#test explore\n'''\nimport json, requests\nurl = 'https://api.foursquare.com/v2/venues/explore'\n\nparams = dict(\n client_id='5MDTT4NQVXV3VQF3KEA50Z2T0DLQ5TJAF1C2UVERXOJD5V4E',\n client_secret='QK4AM22AD13GNIMNJLSHGEFX5VPBPH3JALD50L3P3CHBYHQF',\n near='San Francisco, CA',\n v='20180323',\n radius=100000,\noffset='100000',\n limit='50',\n time='any',\n day='any',\n)\nresp = requests.get(url=url, params=params)\ndata = json.loads(resp.text)\nprint(resp.text)\n'''\n\n# #test venue's photo\n# import json, requests\n# url = 'https://api.foursquare.com/v2/venues/445e36bff964a520fb321fe3/photos'\n#\n# params = dict(\n# client_id='5MDTT4NQVXV3VQF3KEA50Z2T0DLQ5TJAF1C2UVERXOJD5V4E',\n# client_secret='QK4AM22AD13GNIMNJLSHGEFX5VPBPH3JALD50L3P3CHBYHQF',\n# v='20180323',\n# limit='200',\n#\n# )\n# resp = requests.get(url=url, params=params)\n# data = json.loads(resp.text)\n# print(resp.text)\n\n\n#test venue's tips\nimport json, requests\nurl = 'https://api.foursquare.com/v2/venues/4c29567f9fb5d13aa2139b57/tips'\n\nparams = dict(\n client_id='5MDTT4NQVXV3VQF3KEA50Z2T0DLQ5TJAF1C2UVERXOJD5V4E',\n client_secret='QK4AM22AD13GNIMNJLSHGEFX5VPBPH3JALD50L3P3CHBYHQF',\n v='20180323',\n sort='popular',\n limit='500',\n\n)\nresp = requests.get(url=url, params=params)\ndata = json.loads(resp.text)\nprint(resp.text)" }, { "alpha_fraction": 0.6493775844573975, "alphanum_fraction": 0.680497944355011, "avg_line_length": 24.3157901763916, "blob_id": "e8d795942f91a3c6b3428a1de2cf849ba5584b4b", "content_id": "fa7292bc20be8a690f8afa4e03fdb3e8a8a81e50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 51, "num_lines": 19, "path": "/Foursquare research/Projects/Small Dataset creating/Get part pictures.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "pictures=[]\nwith open('all.tsv','r') as f:\n\tfor line in f.readlines():\n\t\tpictures.append(line.strip().split('\\t')[4])\nprint(pictures)\n\nphotos1=[]\nphotosfeature=[]\nallphotolines=[]\nwith open('name-fc8.txt','r') as f1:\n\tallphotolines=f1.readlines()\n\tfor line in allphotolines:\n\t\tphoto=eval(line.split(' ',1)[0])[0].split('.')[0]\n\t\tphotos1.append(photo)\n\nwith open('name-fc8_2.txt','w') as f2:\n\tfor picture in pictures:\n\t\tindex=photos1.index(picture)\n\t\tf2.write(allphotolines[index])\n\n" }, { "alpha_fraction": 0.5956472158432007, "alphanum_fraction": 0.6040473580360413, "avg_line_length": 30.14285659790039, "blob_id": "2e3c9efaae86c8c295bff9f9e585d17cbcd948f1", "content_id": "66534950642ee9d4041e6daade4514d367b6ae97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2619, "license_type": "no_license", "max_line_length": 124, "num_lines": 84, "path": "/Foursquare research/Projects/Data analysis/Create data.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport random\ndata=pd.read_csv('photo,user,venue,time.txt',names=(\"photo\",\"user\",\"venue\",\"time\"),encoding='utf-8')\ndata=data.sort_values('time')\n# data['venue']=pd.factorize(data['venue'])[0]\n# data['user']=pd.factorize(data['user'])[0]\nvenueNum=len(pd.factorize(data['venue'])[1])\ntrain=data.iloc[0:110000]\ntest=data.iloc[110000:]\n\n\n\nprint('start now')\n\ndropList=[]\nfor x in range(len(test)):\n if (test.iloc[x]['user'] not in list(train['user'].values) or test.iloc[x]['venue'] not in list(train['venue'].values)):\n dropList.append(x)\ntest = test.drop(test.index[dropList])\n\n\ntrain['venue'],venueIndex=pd.factorize(train['venue'])\ntrain['user'],userIndex=pd.factorize(train['user'])\n\ntemp=[]\nfor venue in test['venue']:\n temp.append(venueIndex.get_loc(venue))\ntest['venue']=temp\n\ntemp=[]\nfor user in test['user']:\n temp.append(userIndex.get_loc(user))\ntest['user']=temp\n\n\n\nwith open('train.tsv','w') as f:\n for x in range(len(train)):\n f.write(str(train.iloc[x]['user'])+'\\t'+str(train.iloc[x]['venue'])+'\\t'+'1'\n +'\\t'+str(train.iloc[x]['time'])+'\\t'+str(train.iloc[x]['photo'])+'\\n')\n\nprint('sucessfully save train.tsv')\nwith open('test.tsv','w') as f:\n for x in range(len(test)):\n f.write(str(test.iloc[x]['user']) + '\\t' + str(test.iloc[x]['venue']) + '\\t' + '1'\n + '\\t' + str(test.iloc[x]['time']) + '\\t' + str(test.iloc[x]['photo']) + '\\n')\n\nprint('sucessfully save test.tsv')\n\ntrainPlusTest=pd.concat([train,test])\n\nwith open('all.tsv','w') as f:\n for x in range(len(trainPlusTest)):\n f.write(str(trainPlusTest.iloc[x]['user'])+'\\t'+str(trainPlusTest.iloc[x]['venue'])+'\\t'+'1'\n +'\\t'+str(trainPlusTest.iloc[x]['time'])+ '\\t' + str(trainPlusTest.iloc[x]['photo'])+'\\n')\n\n\n\nuserVenueSet=set()\nuserVenueDict=dict()\nvenueSet=set([i for i in range(venueNum)])\nfor x in range(len(trainPlusTest)):\n if trainPlusTest.iloc[x]['user'] not in userVenueDict:\n userVenueDict[trainPlusTest.iloc[x]['user']]=set()\n userVenueDict[trainPlusTest.iloc[x]['user']].add(trainPlusTest.iloc[x]['venue'])\n\n\n\nwith open('test.negative.tsv','w') as f:\n for x in range(len(test)):\n if test.iloc[x]['user'] in userVenueDict:\n m=list(venueSet-userVenueDict[test.iloc[x]['user']])\n else:\n m=[]\n random.shuffle(m)\n r=50\n if len(m)<r:\n r=len(m)\n f.write('('+str(test.iloc[x]['user']) + ',' + str(test.iloc[x]['venue'])+')')\n for y in range(r):\n f.write('\\t'+str(m[y]))\n f.write('\\n')\n\nprint('sucessfully save test.negative.tsv')\n\n\n\n" }, { "alpha_fraction": 0.45390692353248596, "alphanum_fraction": 0.4591747224330902, "avg_line_length": 36.96666717529297, "blob_id": "3d8fc732ac9a3fc717e2bf0e86edbd673e5893ff", "content_id": "579a8257f85b0ca0cbd319215a7a63f35e86b59d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 83, "num_lines": 30, "path": "/download pdf/rename.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "from PyPDF2 import PdfFileReader\nimport os\n\n\nfor i in range(4):\n for j in range(6):\n path = 'vol%d/issue%d'%(51+i,1+j)\n for file in os.listdir(path):\n if '.pdf' in file:\n pdfFileReader = PdfFileReader(os.path.join(path,file))\n else:\n continue\n try:\n documentInfo = pdfFileReader.getDocumentInfo()\n except Exception:\n continue\n documentInfo = pdfFileReader.getDocumentInfo()\n if documentInfo.get('/Title'):\n name = documentInfo['/Title']\n name=name.replace('/',' ')\n name = name.replace(':', ' ')\n name = name.replace('\\\\', ' ')\n name = name.replace('*', ' ')\n name = name.replace('?', ' ')\n name = name.replace('\"', ' ')\n name = name.replace('<', ' ')\n name = name.replace('>', ' ')\n name = name.replace('|', ' ')\n print(os.path.join(path, file))\n os.rename(os.path.join(path, file), os.path.join(path,name+\".pdf\"))\n" }, { "alpha_fraction": 0.5977763533592224, "alphanum_fraction": 0.6000654101371765, "avg_line_length": 35.35714340209961, "blob_id": "50c483ac09faaec95f173745b384a53a3b319069", "content_id": "2c414c614807af7c8d6a506e41e796244d4b4d75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3152, "license_type": "no_license", "max_line_length": 529, "num_lines": 84, "path": "/Spider/XieCheng Hotel Data/xiecheng selenium/DAO/xiecheng.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__author__ = 'LiuYang'\nimport pymysql\nimport uuid\nimport random\n\nclass xiechengDAO(object):\n def __init__(self,host= 'localhost',db='mysql',user='root',password='73628'):\n self.host = host\n self.db = db\n self.user=user\n self.password =password\n\n # 存储酒店基本信息\n def savehotelComment(self,items):\n db = pymysql.connect(self.host,self.user,self.password,self.db,charset='utf8')\n cursor = db.cursor()\n for item in items:\n try:\n cursor.execute(\"replace into hotelinfo(guid,city,title,price,score,recommend,area,havawifi,discussNum,common_facilities,activity_facilities,service_facilities,room_facilities,around_facilities)values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\" ,(item[\"guid\"],item[\"city\"],item[\"title\"],item[\"price\"],item[\"score\"],item[\"recommend\"],item[\"area\"],item[\"havawifi\"],item[\"discussNum\"],item[\"common_facilities\"],item[\"activity_facilities\"],item[\"service_facilities\"],item[\"room_facilities\"],item[\"around_facilities\"]))\n except Exception as e:\n print(e)\n db.commit()\n cursor.close()\n db.close()\n\n\n # 存储所有酒店的链接\n def savehotellink(self,listPageInfo):\n db = pymysql.connect(self.host,self.user,self.password,self.db,charset='utf8')\n cursor = db.cursor()\n for hotel in listPageInfo:\n try:\n id = uuid.uuid1()\n cursor.execute(\"replace into hotellianjie(guid,lianjie,city,comm_num)values(%s,%s,%s,%s)\" ,(id,hotel[\"url\"],hotel[\"city\"],hotel[\"comm_num\"]))\n except Exception as e:\n print(hotel[\"url\"])\n db.commit()\n cursor.close()\n db.close()\n\n\n # 从数据库中读取链接数据\n def _return(self):\n db = pymysql.connect(self.host,self.user,self.password,self.db,charset='utf8')\n cursor = db.cursor()\n\n cursor.execute(\"SELECT * FROM hotellianjie\")\n\n rows = cursor.fetchall()\n return rows\n\n db.commit()\n cursor.close()\n db.close()\n\n # 存储酒店评论信息\n def savehotelCommentinfo(self,items):\n db = pymysql.connect(self.host,self.user,self.password,self.db,charset='utf8')\n cursor = db.cursor()\n for item in items:\n\n try:\n cursor.execute(\"insert into hotelcommentinfo(hotelname,username,commentscore,intime,tourstyle,praisenum,commenttime,comment)values(%s,%s,%s,%s,%s,%s,%s,%s)\" ,(item[\"title\"],item[\"username\"],item[\"commentscore\"],item[\"intime\"],item[\"tourstyle\"],item[\"praisenum\"],item[\"commenttime\"],item[\"comment\"]))\n except :\n print(item)\n db.commit()\n cursor.close()\n db.close()\n\n\n # 从数据库中读取评论数据\n def _returncomment(self):\n db = pymysql.connect(self.host,self.user,self.password,self.db,charset='utf8')\n cursor = db.cursor()\n\n cursor.execute(\"SELECT * FROM hotelcommentinfo\")\n\n rows = cursor.fetchall()\n return rows\n\n db.commit()\n cursor.close()\n db.close()\n\n\n\n\n" }, { "alpha_fraction": 0.5355501770973206, "alphanum_fraction": 0.5528520941734314, "avg_line_length": 31.743362426757812, "blob_id": "f91616be7377d19912865f599c1a619eb8d2e6f6", "content_id": "d95ac368eb7b1d99157f281d0ef185d06fb9d903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3699, "license_type": "no_license", "max_line_length": 109, "num_lines": 113, "path": "/Foursquare research/Projects/Venues details/tutorial/spiders/scraper.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport re\nfrom scrapy import Selector\nfrom tutorial.items import *\nfrom urllib.parse import urlencode\nimport yaml\nimport json\nimport pandas as pd\nimport numpy as np\nimport re\n\ndef get_delta(lower, upper, length):\n return (upper - lower) / length\n\nclass scraper(scrapy.Spider):\n name = \"scraper\"\n # base_url = \"http://www.tripadvisor.cn\"\n # start_urls = [\n # base_url + \"/Hotels-g297463-Chengdu_Sichuan-Hotels.html\"\n # ]\n with open(\"config.yaml\", \"r\") as f:\n cfg = yaml.load(f)\n search_params = {\n 'client_id': cfg['client_id'],\n 'client_secret': cfg['client_secret'],\n 'v': '20180218'\n }\n start_urls = []\n\n df=pd.read_csv('data.csv',encoding='utf-8')\n df=df.sort_values(by='checkinsCount',ascending=False)\n ids=list(df['id'][0:500].values)\n for id in ids:\n url = 'https://api.foursquare.com/v2/venues/{}'.format(id) + '?' + urlencode(search_params)\n start_urls.append(url)\n\n\n def __init__(self):\n\n self.venue_ids = set()\n self.allVenues = []\n self.search_count=0\n with open(\"config.yaml\", \"r\") as f:\n cfg = yaml.load(f)\n self.photosParams = {\n 'client_id': cfg['client_id'],\n 'client_secret': cfg['client_secret'],\n 'v': '20180218',\n \"limit\": 200\n\n }\n self.tipsParams = {\n 'client_id': cfg['client_id'],\n 'client_secret': cfg['client_secret'],\n 'v': '20180218',\n \"limit\":200\n }\n\n\n def parse(self, response):\n\n if 'venue' in json.loads(response.body)['response']:\n venue = json.loads(response.body)['response']['venue']\n photosNum=venue['photos']['count']\n tipsNum=venue['stats']['tipCount']\n venuesItemInsta=venuesItem()\n venuesItemInsta['venue']=venue\n yield venuesItemInsta\n\n self.search_count += 1\n print(\"search count: {}; \".format(self.search_count))\n\n for photosCount in range(int(photosNum/200)+1):\n self.photosParams.update({\"offset\":200*photosCount})\n yield scrapy.Request(url=response.url.split('?')[0]+'/photos?'+ urlencode(self.photosParams),\n callback=self.ParsePhotos)\n\n\n for tipsCount in range(int(tipsNum/200)+1):\n self.tipsParams.update({\"offset\":200*tipsCount})\n yield scrapy.Request(url=response.url.split('?')[0]+'/tips?'+ urlencode(self.tipsParams),\n callback=self.ParseTips)\n\n\n\n def ParsePhotos(self, response):\n if 'photos' in json.loads(response.body)['response']:\n photos=json.loads(response.body)['response']['photos']['items']\n id=re.search('(venues/)(.*?)(/photo)',response.url).group(2)\n for photo in photos:\n photosItemInst=photosItem()\n photosItemInst['photo']=photo\n photosItemInst['id']=id\n yield photosItemInst\n\n self.search_count += 1\n print(\"search count: {}; \".format(self.search_count))\n\n\n\n def ParseTips(self, response):\n if 'tips' in json.loads(response.body)['response']:\n tips=json.loads(response.body)['response']['tips']['items']\n id=re.search('(venues/)(.*?)(/tips)',response.url).group(2)\n for tip in tips:\n tipsItemInst=tipsItem()\n tipsItemInst['tip']=tip\n tipsItemInst['id']=id\n yield tipsItemInst\n\n self.search_count += 1\n print(\"search count: {}; \".format(self.search_count))" }, { "alpha_fraction": 0.6243761777877808, "alphanum_fraction": 0.6401151418685913, "avg_line_length": 29.296510696411133, "blob_id": "41825925840ffb892c9040704d77eb2563550e5f", "content_id": "b710477c2ac06d1b0b48f8ae1e9d9669e57db93d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5472, "license_type": "no_license", "max_line_length": 191, "num_lines": 172, "path": "/Foursquare research/Projects/Data analysis/main.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport pandas as pd\nimport numpy\nimport pickle\n\n#一个用户可能在一个景点发很多图片,后缀ByPhotos表示按图片统计,ByVenues表示按景点统计,\nvenue_id=[] #景点ID\ncategories=[] #标签的景点数\ncategories_num=[] #景点的标签数\nuserVenueByPhotos=[] #照片的user,景点\ntip_user=[] #tip的user,景点\nusrByPhotos=[]\nplaByPhotos=[]\nuserid=[] #大于10的用户ID\nvenueid=[] #大于10的地点ID\nfolders=['1-500','500-2000','2000-5000','5000-10000','Supplements']\nphotos=[]\n'''\nfor m in folders:\n # with open('venues'+str(m)+'.json','r',encoding=\"utf8\") as file:\n # for line in file.readlines():\n # venue=json.loads(line)['venue']\n # for i in range(0, len(venue['categories'])):\n # categories.append(venue['categories'][i]['id'])\n # categories_num.append(len(venue['categories']))\n # venue_id.append(venue['id'])\n\n with open(m+'/photosData.json','r',encoding=\"utf8\") as file:\n for line in file.readlines():\n userVenueByPhotos.append([json.loads(line)['photo']['user']['id'],json.loads(line)['id']])\n usrByPhotos.append(json.loads(line)['photo']['user']['id'])\n plaByPhotos.append(json.loads(line)['id'])\n photos.append(json.loads(line)['photo']['id'])\n print(\"load %s successfully\"%{m})\n\n'''\n\n# with open('params.sav', 'wb') as f:\n# pickle.dump([userVenueByPhotos,usrByPhotos,plaByPhotos,photos], f, -1)\n\nprint('save params successfully')\nparams=[]\nwith open('params.sav', 'rb') as f:\n tmp = pickle.load(f)\n userVenueByPhotos, usrByPhotos, plaByPhotos,photos=tmp\nuserVenueByVenues=set() # 用户景点set\nfor pu in userVenueByPhotos:\n pu=tuple(pu)\n userVenueByVenues.add(pu)\n\nusersByVenues=[]\nfor m in userVenueByVenues:\n usersByVenues.append(m[0])\n\n\nuserNumByVenues=list(zip(*numpy.unique(usersByVenues, return_counts=True)))\nuserNumByPhotos=list(zip(*numpy.unique(usrByPhotos, return_counts=True)))\nplaNumByPhotos=list(zip(*numpy.unique(plaByPhotos, return_counts=True)))\n\n#筛选出去过景点数大于20(即图片数肯定更多)的用户\nfor p in range(1,len(userNumByVenues)):\n if userNumByVenues[p][1] >=20:\n userid.append(userNumByVenues[p][0])\n\n\n#再筛选出图片数大于x的景点\nfor p in range(1,len(plaNumByPhotos)):\n if plaNumByPhotos[p][1] >=50:\n venueid.append(plaNumByPhotos[p][0])\n\n\n\n\n# print(dict(zip(*numpy.unique(categories, return_counts=True)))) #标签的景点数\n# print(dict(zip(*numpy.unique(categories_num, return_counts=True))))#景点的标签数\n# lab=dict(zip(*numpy.unique(categories, return_counts=True))).values()\n# print(lab)\n\n\n\nuserVenueByPhotos=numpy.array(userVenueByPhotos)\nuser_label=numpy.unique(userVenueByPhotos[:,0])\nvenue_label=numpy.unique(userVenueByPhotos[:,1])\na=list(zip(*numpy.unique(userVenueByPhotos[:,0], return_counts=True)))\nb=list(zip(*numpy.unique(userVenueByPhotos[:,1], return_counts=True)))\na=numpy.array(a)[:,1]\nb=numpy.array(b)[:,1]\na=list(zip(*numpy.unique(a, return_counts=True)))\nb=list(zip(*numpy.unique(b, return_counts=True)))\na=numpy.array(a)\nb=numpy.array(b)\na1=list(a[:,0])\na2=list(a[:,1])\nb1=list(b[:,0])\nb2=list(b[:,1])\n\n\n\nfile_obj = open(\"distribution.txt\", 'w')\nfor num in a1:\n file_obj.writelines(num)\n file_obj.writelines(', ')\nfile_obj.write('\\n\\n')\nfor num in a2:\n file_obj.writelines(num)\n file_obj.writelines(', ')\nfile_obj.write('\\n')\nfile_obj.write('\\n')\nfor num in b1:\n file_obj.writelines(num)\n file_obj.writelines(', ')\nfile_obj.write('\\n\\n')\nfor num in b2:\n file_obj.writelines(num)\n file_obj.writelines(', ')\nfile_obj.write('\\n')\n\nfile_obj.close()\n\n\n# with open('tipsData.json','r',encoding=\"utf8\") as file:\n# for line in file.readlines():\n# tip_user.append([json.loads(line)['tip']['user']['id'], json.loads(line)['id']])\n#\n# tip_user=numpy.array(tip_user)\n# user_label=numpy.unique(tip_user[:,0])\n# venue_label=numpy.unique(tip_user[:,1])\n# print('tip')\n# print(len(user_label))\n# print(len(venue_label))\n\n'''\nmatrix=numpy.zeros((len(user_label),len(venue_label)))\nprint(numpy.sum(matrix))\n\n\nfor k in range(0,len(tip_user)):\n data=tip_user[k]\n i=user_label.tolist().index(data[0])\n j=venue_label.tolist().index(data[1])\n matrix[i,j]=matrix[i,j]+1\nprint(numpy.sum(matrix))\n'''\n\n\n# df=pd.DataFrame(matrix,columns=venue_label,index=user_label)\n# df.to_csv('Data analysis result.csv')\n\n\n\nn=0\nnn=0\nfile_write_obj = open(\"photo,user,venue,time.txt\", 'w')\nfor m in folders:\n with open(m+'/photosData.json','r',encoding=\"utf8\") as file:\n for line in file.readlines():\n nn = nn + 1\n if json.loads(line)['photo']['user']['id'] in userid:\n if json.loads(line)['id'] in venueid:\n puvt = json.loads(line)['photo']['id'] +', '+ json.loads(line)['photo']['user']['id'] + ', '+ json.loads(line)['id'] + ', ' + str(json.loads(line)['photo']['createdAt'])\n file_write_obj.writelines(puvt)\n file_write_obj.write('\\n')\n n=n+1\n print(\"secondly load %s successfully\" % {m})\nprint('地点剩余 ',len(venueid),'/',len(plaNumByPhotos))\nprint('用户剩余 ',len(userid),'/',len(userNumByPhotos))\nprint('photo剩余 ',n,'/',nn)\nfile_write_obj.close()\nprint('稀疏性=',1-(n/(len(userid)*len(venueid))))" }, { "alpha_fraction": 0.4955223798751831, "alphanum_fraction": 0.5671641826629639, "avg_line_length": 29.363636016845703, "blob_id": "453b55f905a9119fbc50d92dd6022119826e91c3", "content_id": "cd3cf100b621f63d17179436b1433ebb3f7ef7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/Spider/Jindong review data by Scrapy/jdcomment/jd_spider/test.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "from urllib.parse import urlencode\ndata = {\n 'callback': 'fetchJSON_comment98vv61',\n 'productId': '3555984',\n 'score': 0, # all 0 bad 1 mid 2 good 3 zhuiping 5\n 'sortType': 5, # time series rank 6 recommend rank 5\n 'pageSize': 10,\n 'isShadowSku': 0,\n 'page': 0\n }\nurlencode(data)\n\n" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 36.5, "blob_id": "02795151a3dc1db3a058f14401ccffd488ab4fe6", "content_id": "a3ace824bd6fd39a3d39e0c2044f6838054ca10d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "permissive", "max_line_length": 47, "num_lines": 2, "path": "/Foursquare research/Projects/Get venues/execute.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "from scrapy import cmdline\ncmdline.execute('scrapy crawl scraper'.split())" }, { "alpha_fraction": 0.6770491600036621, "alphanum_fraction": 0.6811475157737732, "avg_line_length": 30.30769157409668, "blob_id": "856e20f356dd19bd8f3dacd503e2f9155984ecc6", "content_id": "17ad6c538eb3e65bed741c25bcfee149589f2e08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "no_license", "max_line_length": 120, "num_lines": 39, "path": "/Foursquare research/Projects/Venues details/Data analysis.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nimport pandas as pd\nimport numpy as np\nvenues=[]\nids=set()\nvenueLine=[]\nphotos=[]\ntips=[]\nwith open('venues.json','r',encoding=\"utf8\") as f:\n for line in f.readlines():\n venue=json.loads(line)['venue']\n venues.append([venue['id'],venue['stats']['tipCount'],venue['stats']['checkinsCount'],venue['photos']['count']])\n ids.add(venue['id'])\n\nwith open('photosData.json','r',encoding=\"utf8\") as f:\n for line in f.readlines():\n photo=json.loads(line)['id']\n photos.append(photo)\n\nwith open('tipsData.json','r',encoding=\"utf8\") as f:\n for line in f.readlines():\n tip=json.loads(line)['id']\n tips.append(tip)\n\nphotosSeries=pd.Series(photos)\nphotosScraperCount=photosSeries.value_counts()\n\ntipsSeries=pd.Series(tips)\ntipsScraperCount=tipsSeries.value_counts()\n\ndf=pd.DataFrame(np.array(venues),columns=['id','tipsCount','checkinsCount','photosCount'])\ndf=df.set_index('id')\ndf=pd.merge(df, pd.DataFrame(tipsScraperCount), left_index=True, right_index=True, how='outer')\ndf=pd.merge(df, pd.DataFrame(photosScraperCount), left_index=True, right_index=True, how='outer')\n\ndf.to_csv('Data analysis result.csv')\npass" }, { "alpha_fraction": 0.6166666746139526, "alphanum_fraction": 0.6166666746139526, "avg_line_length": 31.384614944458008, "blob_id": "d9d21fd5f44f61ae0a71eddbceda80537fbc265a", "content_id": "ae2c680449c4ba9a1b54e6a0f1da24c613b4f574", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 420, "license_type": "no_license", "max_line_length": 115, "num_lines": 13, "path": "/Foursquare research/Projects/Venues details/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# Venues details\n\nGet detailed data for venues, including tips and photos of venues.\n\n\n\n## Output\n\nphotosData.json: Every line stands for a photo and the format of a line is {\"photo\": {.........}, \"id\": \".......\"}\n\ntipsData.json: Every line stands for a tip and the format of a line is {\"tip\": {.........}, \"id\": \".......\"}\n\nvenues.json: Every line stands for a venue and the format of a line is {\"venue\": {.........}}" }, { "alpha_fraction": 0.593879222869873, "alphanum_fraction": 0.6253101825714111, "avg_line_length": 45.5, "blob_id": "a8ca9f2f118a5cf6b5c48581ff355b0497c5edc0", "content_id": "01a6db8835815a634e1e88d88f4f672cab76e182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 184, "num_lines": 26, "path": "/Spider/XieCheng Hotel Data/hotel_index_data/hotel_index_data/spiders/hotel_index_data.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "\nfrom scrapy.spiders import Spider\nfrom scrapy import *\nfrom scrapy.selector import Selector\nimport re\nimport json\nimport xlrd\nimport sys\nimport re\nfrom urllib.parse import urlencode\nclass hotel_index_data(Spider):\n name='hotel_index_data'\n def start_requests(self):\n url = \"https://hotels.ctrip.com/Domestic/Tool/AjaxHotelList.aspx\"\n data = {'page': '1', 'cityPY': 'shanghai', 'cityId': '2', 'cityCode': '021',\n\n }\n headers = {'Referer': 'http://hotels.ctrip.com/hotel/shanghai2',\n 'Accept': '* / *', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'max-age=0',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Host': 'hotels.ctrip.com', 'Origin': 'http://hotels.ctrip.com',\n 'Proxy-Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',\n }\n yield Request(url, self.parse, method=\"POST\", headers=headers, body=json.dumps(data))\n\n def parse(self, response):\n data = json.loads(response.body)\n print(data)" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 15.5, "blob_id": "e10fd64787c467768c4bc097262d4007ed9a12c5", "content_id": "a8431a2ced2cd5b4a1423bff74287b37b595b8bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/Spider/Jindong review data by Scrapy/jd_spider-fundamental/jd_spider/exec.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# coding:utf-8\n\nfrom scrapy import cmdline\nimport json\n\ncmdline.execute(\"scrapy crawl jd\".split())\n" }, { "alpha_fraction": 0.5968858003616333, "alphanum_fraction": 0.5968858003616333, "avg_line_length": 35.1875, "blob_id": "26d3cc23ac27e9fe3359dd14d32fabfc5bc72af8", "content_id": "58c89c816a7f7baa72f383a5c846a01331e03934", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "permissive", "max_line_length": 107, "num_lines": 16, "path": "/Foursquare research/Projects/Get venues/Data analysis.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import json\nimport pandas as pd\nimport numpy as np\nvenues=[]\nids=set()\nwith open('venues.json','r') as f:\n for line in f.readlines():\n venue=json.loads(line)['venue']\n venues.append([venue['id'],venue['name'],venue['stats']['tipCount'],venue['stats']['usersCount'],\n venue['stats']['checkinsCount'],venue['location']['lat'],\n venue['location']['lng']])\n ids.add(venue['id'])\n\ndf=pd.DataFrame(np.array(venues),columns=['id','name','tipCount','usersCount','checkinsCount','lat','lng'])\ndf.to_csv('data.csv')\npass" }, { "alpha_fraction": 0.6331658363342285, "alphanum_fraction": 0.6633166074752808, "avg_line_length": 21.11111068725586, "blob_id": "a6e53ad428ec2ba53a5ae3926986896be3c7f846", "content_id": "ecb5c521ffc1b5b8e16da91bff4d38fce896f0ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/Foursquare research/Projects/Small Dataset creating/Get part lines.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "lines=[]\nwith open('photo,user,venue,time.txt','r') as f:\n\tfor i in range(5000):\n\t\tlines.append(f.readline())\n\n\nwith open('photo,user,venue,time.txt','w') as f2:\n\tfor line in lines:\n\t\tf2.write(line)\n" }, { "alpha_fraction": 0.7524116039276123, "alphanum_fraction": 0.7524116039276123, "avg_line_length": 17.235294342041016, "blob_id": "430c2484df751b2c57df444f72b3af9399a2dbdf", "content_id": "9a994ebe5d64a1cb60359b52804ab6d461ee433a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 365, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# Python Repository\n\nCollections of my code in python.\n\nNote:This repository is only used to backup my code,not for version control.\n\n## Contents\n\n*tutorial*: a Scrapy project for Build a spider project quickly\n\n*download pdf*: 自动批量下载文献\n\n*HTML-to-PDF*: 将HTML-to-PDF转成pdf\n\n*Spider*: 曾经用过的爬虫\n\n*Gamepad*: 游戏手柄控制器\n\n" }, { "alpha_fraction": 0.8072289228439331, "alphanum_fraction": 0.8072289228439331, "avg_line_length": 41, "blob_id": "00e3df1627d25380665c38bdc9f6163f2b9b877e", "content_id": "1d1aa085b42a66a14cd9a8a6b5d64bb58bc328fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/Spider/XieCheng Hotel Data/hotel_index_data/begin.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "from scrapy import cmdline\ncmdline.execute('scrapy crawl hotel_index_data'.split())" }, { "alpha_fraction": 0.529335081577301, "alphanum_fraction": 0.5874034762382507, "avg_line_length": 41.78969955444336, "blob_id": "1d000445cfdba6dfb231072aff6f1d47eb7b7b08", "content_id": "8e85ec38e9cf507e4413a8e3f5bc5edd82847ed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10981, "license_type": "no_license", "max_line_length": 189, "num_lines": 233, "path": "/Spider/XieCheng Hotel Data/Requests_threading/hotel_data_threading.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import requests,json,random\nimport re,threading\nimport json\nfrom lxml import etree\nimport time\nimport random\nlock=threading.Lock()\n\ncity='shanghai'\nunsucesspage=[]\nproxies=[]\n# user_agent_list = [\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\" ,\\\n# \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\", \\\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\", \\\n# \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\", \\\n# \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\", \\\n# \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\", \\\n# \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\", \\\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n# \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\", \\\n# \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\", \\\n# \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n# ]\nuser_agent_list = [\n'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'\n\n ]\n\ncount=0\ndef header_body_update(page):\n data = {'cityPY': 'shanghai', 'cityId': 2, 'cityCode': '021','page':page}\n headers = {'Referer': 'https://hotels.ctrip.com/hotel/shanghai2/sl1274819', \"Host\": \"hotels.ctrip.com\",\n 'Origin':'http://hotels.ctrip.com',\n \"Accept-Language\": \"en-US,en;q=0.9\",\n \"Connection\": \"keep-alive\",\n 'Accept': '* / *',\n \"Accept-Encoding\": \"gzip, deflate\",\n 'Cache-Control':'max-age=0',}\n \n headers['User-Agent'] = random.choice(user_agent_list)\n return headers,data\ndef proxy_update():\n ip=random.sample(proxies,1)[0].replace('\\n','')\n http='http://'+ip\n proxy={'http' : http,\n 'https': http}\n\n return proxy\ndef get_page():\n headers, data = header_body_update(1)\n se=[]\n while 1:\n try:\n proxy = proxy_update()\n hotel_list_requests = requests.post('https://hotels.ctrip.com/Domestic/Tool/AjaxHotelList.aspx', data=data,\n headers=headers,proxies=proxy)#\n hotel_info = eval(hotel_list_requests.text)\n text = hotel_info['paging']\n se = etree.HTML(text)\n page_number=json.loads(se.xpath('//a[@rel=\"nofollow\"]/text()')[0])\n\n break\n except Exception as e:\n print(e)\n\n \n return page_number\n\ndef fang_com(page): ##列表页\n # print('page is ',page)\n headers,data=header_body_update(page)\n prices=[]\n hotel_lists=[]\n fail_try=10\n while(fail_try): ###这个主要是,fang.com会随机返回几个10054或者10053,如果连页面都没读取到,提取就是后话了,这网站没有封杀,即使使用单ip只会很少时候随机来几个10054 ,('Connection aborted.', error(10054, ''))\n hotel_info={}\n try:\n proxy = proxy_update()\n hotel_list_requests=requests.post('https://hotels.ctrip.com/Domestic/Tool/AjaxHotelList.aspx', data=data,\n headers=headers)#,timeout=2,proxies=proxy\n text=hotel_list_requests.text\n \n hotel_info=eval(text)\n prices = json.loads(hotel_info['HotelMaiDianData']['value']['htllist'])\n hotel_lists = hotel_info['hotelPositionJSON']\n for hotel, price in zip(hotel_lists, prices):\n hotel.update(price)\n\n line = json.dumps(hotel) + '\\n'\n file.write(line)\n print(hotel_lists)\n print('success',page)\n break\n #print text\n except Exception as e:\n print(e)\n fail_try=fail_try-1\n if fail_try==0:\n print('unsuccess page',page)\n\n\n\n\n time.sleep(1)\n\n # lock.acquire()\n # print(hotel_lists)\n # lock.release()\n\n'''\n text=''\n se = etree.HTML(text) ###########为了利于大家学习,这段演示xpath提取信息\n all_dl=se.xpath('//dl[@class=\"list rel\"]')\n print(len(all_dl))\n for dl in all_dl:\n title=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"title\"]/a/text()')[0]\n url=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"title\"]/a/@href')[0]\n url='http://esf.sz.fang.com'+url\n\n info_list=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"mt12\"]/text()')\n #print json.dumps(info,ensure_ascii=False) #py2显示汉字,py3可以直接print mt12\n info=''\n for l in info_list:\n l2= re.findall('\\S*',l)[0] ###消除空白和换行\n #print m_str\n info+=l2+'|'\n\n time.sleep(1)\n # total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years,discription=get_detail(url)\n\n\n lock.acquire() ###这里叫锁,一是保证count计数准确,而是不会导致多个线程乱print,导致看不清楚。加锁的目的是别的线程不能运行这段代码了。但我之前看到有的人乱加锁,把消耗时间很长的代码加锁,那样导致多线程就基本个废物\n global count\n count+=1\n print(time.strftime('%H:%M:%S', time.localtime(time.time())), ' ', count)\n print('列表页:')\n print(' title: %s\\n url: %s\\n info: %s\\n' % (title, url, info))\n\n print('详情页:')\n # print(\n # ' total_price: %s\\n price_squere: %s\\n huxin: %s\\n cankao_shoufu: %s\\n shiyong_mianji: %s\\n jianzhu_mianji: %s\\n years: %s \\n' % (\n # total_price, price_squere, huxin, cankao_shoufu, shiyong_mianji, jianzhu_mianji, years))\n print('**************************************************************')\n lock.release()\n\n'''\n\ndef get_detail(url): ###详情页\n\n header={'User-Agent':random.choice(user_agent_list)}\n header.update({\"Host\":\"esf.sz.fang.com\"})\n\n while(1):\n content=''\n try:\n content=requests.get(url,headers=header,timeout=10).content\n except Exception as e:\n print(e)\n pass\n if content!='':\n break\n\n content=content.decode('gbk').encode('utf8') ##查看网页源代码可看到是gbk编码,直接print的话,如果你在pycharm设置控制台是utf8编码,那么控制台的中文则会乱码,cmd是gbk的恰好可以显示。如果你在pycharm设置控制台是utf8编码,需要这样做\n #print content\n\n inforTxt=getlist0(re.findall('(<div class=\"inforTxt\">[\\s\\S]*?)<ul class=\"tool\">',content)) ###########为了利于大家学习,这段演示正则表达式提取信息,某些信息可能在有的房子界面没有,要做好判断\n #print inforTxt\n\n total_price=getlist0(re.findall('</span>价:<span class=\"red20b\">(.*?)</span>',inforTxt))\n\n price_squere=getlist0(re.findall('class=\"black\">万</span>\\((\\d+?)元[\\s\\S]*?<a id=\"agantesfxq_B02_03\"',inforTxt))\n huxin=getlist0(re.findall('<dd class=\"gray6\"><span class=\"gray6\">户<span class=\"padl27\"></span>型:</span>(.*?)</dd>',inforTxt))\n cankao_shoufu=getlist0(re.findall('参考首付:</span><span class=\"black floatl\">(.*?万)</span> </dd>',inforTxt))\n shiyong_mianji=getlist0(re.findall('>使用面积:<span class=\"black \">(.*?)</span></dd>',inforTxt))\n shiyong_mianji=getlist0(re.findall('\\d+',shiyong_mianji))\n jianzhu_mianji=getlist0(re.findall('建筑面积:<span class=\"black \">(.*?)</span></dd>',inforTxt))\n jianzhu_mianji=getlist0(re.findall('\\d+',jianzhu_mianji))\n years=getlist0(re.findall('<span class=\"gray6\">年<span class=\"padl27\"></span>代:</span>(.*?)</dd>',inforTxt))\n\n discription=getlist0(re.findall('style=\"-moz-user-select: none;\">([\\s\\S]*?)<div class=\"leftBox\"',content))\n #print discription\n #print total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years\n\n return total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years,discription\n\n\n\n\n#get_detail('http://esf.sz.fang.com/chushou/3_193928457.htm')\ndef getlist0(list):\n if list:\n return list[0]\n else:\n return '空'\n\nif __name__=='__main__':\n fr = open('proxy.txt', 'r')\n fr.readline() # skip the header\n fr.readline()\n proxies = fr.readlines()\n fr.close()\n\n\n page_num=get_page()\n print(page_num)\n file = open('%s.json'%city, 'w', encoding='utf-8')\n #''' ##这个是单线程,单线程爬很慢,3000个房子信息,一个5秒,那也得15000秒了,很耽误时间\n for i in range(1,page_num):\n data = {'page': i, 'cityPY': 'shanghai', 'cityId': 2, 'cityCode': '021'}\n fang_com(i)\n '''\n threads=[] ###这个是演示多线程爬取\n for i in range(1,page_num): #开了100线程,这样开100线程去爬100页面的详情页面,因为fang.com只能看100页\n\n t=threading.Thread(target=fang_com,args=(i,)) ###这样做没问题,但如果你是爬取1000页面,也这样做就不合适了,python开多了线程会导致线程创建失败,100线程已经很快了,网速是瓶颈了这时候,我开100线程时候网速是800KB左右的网速,我宽带才4M,运营商还算比较良心了,4M宽带400k\n\n threads.append(t)\n\n t.start()\n\n for t in threads:\n t.join()\n '''\n file.close()\n print('over')\n\n" }, { "alpha_fraction": 0.6246753334999084, "alphanum_fraction": 0.6844155788421631, "avg_line_length": 29.84000015258789, "blob_id": "cc3f3afd6b4db425bff007c980aba5d3c5f21854", "content_id": "02d36893673d54ce49b4033aefb2397bd9f8920d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 194, "num_lines": 25, "path": "/Spider/XieCheng Hotel Data/Requests_threading/hotel_data_simple.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\nheaders = {'Referer': 'http://hotels.ctrip.com/hotel/shanghai2','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'}\n\nfor i in range(1,384):\n\tdata={'page':i,'cityPY':'shanghai','cityId':2,'cityCode':'021' }\n\tr = requests.post('https://hotels.ctrip.com/Domestic/Tool/AjaxHotelList.aspx', data = data, headers=headers)\n\tprint(r.text)\n\tprint(i)\n\npass\n\n\n\n#\n# headers = {'Referer': 'http://hotels.ctrip.com/hotel/shanghai2',}\n#\n# for i in range(1000):\n# \tdata={'page':i,'cityPY':'shanghai','cityId':2,'cityCode':'021' }\n# \tr = requests.post('http://hotels.ctrip.com/Domestic/Tool/AjaxHotelList.aspx', data = data, headers=headers)\n# \tprint(r.text)\n# \tprint(i)\n#\n# pass" }, { "alpha_fraction": 0.676171064376831, "alphanum_fraction": 0.6782077550888062, "avg_line_length": 15.399999618530273, "blob_id": "4230a390ef22aa091e19460e5acb147e7e7b2e42", "content_id": "83a5b09fedf8d0889ec81d8c303c997f6fad44e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 51, "num_lines": 30, "path": "/Foursquare research/Projects/Venues details/tutorial/items.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy import Field\n\n\nclass TutorialItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\n\n\n\nclass venuesItem(scrapy.Item):\n venue=Field()\n\n\nclass photosItem(scrapy.Item):\n id=Field()\n photo=Field()\n\nclass tipsItem(scrapy.Item):\n id=Field()\n tip=Field()" }, { "alpha_fraction": 0.5796637535095215, "alphanum_fraction": 0.6341072916984558, "avg_line_length": 32.783782958984375, "blob_id": "2a18931199a7a1b01944ba93059381fd145f7d30", "content_id": "0c2ab9847998ecf11a4a84fed3140c1986c4357a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1249, "license_type": "no_license", "max_line_length": 108, "num_lines": 37, "path": "/Data Center/Share data/share data.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import tushare as ts\nimport pandas as pd\nimport numpy as np\nfrom pandas.tseries.offsets import *\nsz50=ts.get_sz50s()['code'].values\nnp.random.shuffle(sz50)\n# pd.date_range()\nrng=[]\n\nAlldata=[]\nallsigma=[]\nfor i,stock in enumerate(sz50):\n data=ts.get_k_data(stock,start='2010-01-01', end='2017-06-02',autype=None,index=False).set_index('date')\n data = data.set_index(pd.to_datetime(data.index, format=\"%Y-%m-%d\"))\n if(i==0):\n rng=pd.date_range('2010-01-01', '2017-06-02', freq=BDay())\n rng = pd.DataFrame(pd.Series(rng), columns=[\"date\"]).set_index(\"date\")\n\n merge=data.join(rng,how='outer')['close']\n merge=merge.ffill().bfill()\n for j in range(merge.shape[0]-1):\n merge[j]=np.log(merge[j+1]/merge[j])\n merge=merge[0:-1].values\n Alldata.append(merge)\n print(len(merge))\n if(i>0):\n matrix=np.array(Alldata)\n cov=np.cov(matrix)\n sigma=(1/(i+1))**2*np.sum(cov)\n allsigma.append(sigma)\n # rng = pd.date_range(start='7/19/2016', end='10/18/2016', freq='20min')\n # rng = pd.DataFrame(pd.Series(rng), columns=[\"time_window\"]).set_index(\"time_window\")\n pass\n # data.to_excel('stock of %s.xlsx' % stock)\nallsigma=pd.Series(allsigma)\nallsigma.to_csv('sigma4.csv')\npass" }, { "alpha_fraction": 0.6072629690170288, "alphanum_fraction": 0.614660382270813, "avg_line_length": 26.054546356201172, "blob_id": "1b191d19ec29a71e98b26779431e49c40408bfe2", "content_id": "9a38bd94cf96483ebe2e0764099e3e0ab2040f2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1815, "license_type": "no_license", "max_line_length": 51, "num_lines": 55, "path": "/Spider/Jindong review data by Scrapy/jdcomment/jd_spider/items.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy import Item, Field\n\n\nclass JdSpiderItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\n\nclass goodsItem(Item):\n link = Field() # 商品链接\n ID = Field() # 商品ID\n name = Field() # 商品名字\n comment_num = Field() # 评论人数\n shop_name = Field() # 店家名字\n price = Field() # 价钱\n commentVersion = Field() # 为了得到评论的地址需要该字段\n score1count = Field() # 评分为1星的人数\n score2count = Field() # 评分为2星的人数\n score3count = Field() # 评分为3星的人数\n score4count = Field() # 评分为4星的人数\n score5count = Field() # 评分为5星的人数\n\n\nclass commentItem(Item):\n user_name = Field() # 评论用户的名字\n user_ID = Field() # 评论用户的ID\n userProvince = Field() # 评论用户来自的地区\n content = Field() # 评论内容\n good_ID = Field() # 评论的商品ID\n good_name = Field() # 评论的商品名字\n date = Field() # 评论时间\n replyCount = Field() # 回复数\n score = Field() # 评分\n status = Field() # 状态\n title = Field()\n userLevelId = Field()\n userRegisterTime = Field() # 用户注册时间\n productColor = Field() # 商品颜色\n productSize = Field() # 商品大小\n userLevelName = Field() # 银牌会员,钻石会员等\n userClientShow = Field() # 来自什么 比如来自京东客户端\n isMobile = Field() # 是否来自手机\n days = Field() # 天数\n commentTags = Field() # 标签\n usefulVoteCount=Field()\n uselessVoteCount=Field()" }, { "alpha_fraction": 0.5719166398048401, "alphanum_fraction": 0.6498121023178101, "avg_line_length": 29.5, "blob_id": "059b82e001ecfb34e1ccba9ff37638b6210c5318", "content_id": "726e6e2e9f8e3845c404e7a3ec5d6c5019b88059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2927, "license_type": "no_license", "max_line_length": 173, "num_lines": 96, "path": "/download pdf/test.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import requests\nfrom lxml import etree\nimport os\nimport re\nfrom selenium import webdriver\n\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport json\nimport time\n\ndef get_html(url):\n headers={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n\n r=requests.get(url,headers=headers)\n # print(r.text)\n html=etree.HTML(r.text)\n result=html.xpath('//*[@id=\"article-list\"]/form/div[3]/ol/li/dl/dd[4]/a/@href')\n if result==[]:\n result = html.xpath('//*[@id=\"article-list\"]/form/div[3]/ol//ol/li/dl/dd[4]/a/@href')\n return result\n\ndef save_pdf(url,folder,name):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n if os.path.exists(folder+'/'+name+'.pdf'):\n return\n\n chrome_options = Options()\n # chrome_options.add_argument('--headless')\n # chrome_options.add_argument('--disable-gpu')\n browser = webdriver.Chrome(chrome_options=chrome_options)\n browser.get(url)\n # time.sleep(1)\n url=browser.current_url\n browser.close()\n\n r = requests.get(url, headers=headers)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\n with open(folder+'/'+name+'.pdf','wb') as f:\n f.write(r.content)\n\n'''test download pdf\nurl='https://ac.els-cdn.com/S0306457315000242/1-s2.0-S0306457315000242-main.pdf?_tid=44624573-386b-4ebf-921c-825259bf4488&acdnat=1545467755_f582eab50ca643aeeb8c4ecf4756a951'\nsave_pdf(url,'test','aaa')\n\n'''\n\n'''test selenium\n\n\n\nbrowser= webdriver.Chrome()\nurl='https://www.sciencedirect.com/science/article/pii/S0306457315000242/pdfft?md5=abece181b57cc15e01f82237124b92d9&pid=1-s2.0-S0306457315000242-main.pdf'\nbrowser.get(url)\nprint(browser.current_url)\nbrowser.close()\npass\n\n\n'''\n\n\n\nif __name__=='__main__':\n links={}\n # for i in range(4):\n # for j in range(6):\n # url = 'https://www.sciencedirect.com/journal/information-processing-and-management/vol/%d/issue/%d'%(51+i,j+1)\n # links['vol%dissue%d'%(51+i,j+1)]=get_html(url)\n # print('get links of vol%d issue%d '%(51+i,j+1))\n\n\n # with open('data2.json','w') as file:\n # file.write(json.dumps(links))\n with open('data2.json','r') as file:\n links=json.loads(file.read())\n\n\n\n for i in range(4):\n for j in range(6):\n part=links['vol%dissue%d'%(51+i,j+1)]\n for l,u in enumerate(part):\n url='https://www.sciencedirect.com'+u\n save_pdf(url, 'vol%d/issue%d'%(51+i,j+1),str(l))\n print('save pdf of vol%d issue%d %d'%(51+i,j+1,l))" }, { "alpha_fraction": 0.517257571220398, "alphanum_fraction": 0.5372152924537659, "avg_line_length": 37.36936950683594, "blob_id": "0e6973e42970ffc8522495a3390f8e7696b488dc", "content_id": "0ae5c4e8321d4e46010a291fc5ccae277e945aef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4435, "license_type": "no_license", "max_line_length": 119, "num_lines": 111, "path": "/Spider/Jindong review data by Scrapy/jdcomment/jd_spider/spiders/jd_comment.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom scrapy.spiders import Spider\nfrom jd_spider.items import goodsItem, commentItem\nfrom scrapy.selector import Selector\nimport re\nimport json\nimport xlrd\nimport sys\nimport re\nfrom urllib.parse import urlencode\nclass comment_spider(Spider):\n name = \"comment2\"\n IDs = []\n file = open(\"./data/women_cloth_data.json\", 'r')\n for line in file:\n line = line.strip()\n try: # try parsing to dict\n a = json.loads(line)\n IDs.append(a[\"ID\"])\n except:\n print(repr(line))\n print(sys.exc_info())\n print(\"fail\")\n file.close()\n url = 'https://club.jd.com/comment/skuProductPageComments.action'\n data = {\n 'callback': 'fetchJSON_comment98vv61',\n 'productId': '3555984',\n 'score': 0, # all 0 bad 1 mid 2 good 3 zhuiping 5\n 'sortType': 5, # time series rank 6 recommend rank 5\n 'pageSize': 10,\n 'isShadowSku': 0,\n 'page': 0\n }\n start_urls = []\n for ID in IDs:\n try:\n data[\"productId\"] = ID[0]\n except:\n pass\n for page in range(1,6):\n data['page']=page\n data['score'] = 1\n comb_url=url+'?'+urlencode(data)\n start_urls.append(comb_url)\n data['score'] = 3\n comb_url = url + '?' + urlencode(data)\n start_urls.append(comb_url)\n\n # name = \"comment2\"\n # xlrd.Book.encoding = \"utf-8\"\n # data = xlrd.open_workbook(\"goods.xls\")\n # # goods为要抓取评论的商品信息,现提供一个goods.xls文件供参考,第1列:商品ID;第2列:商品评论数;第3列:商品的commentVersion\n # # test.xlsx也可以使用\n # table = data.sheets()[0]\n # nrows = table.nrows # 行数\n # ncols = table.ncols # 列数\n # good_id = table.col_values(0) # 商品ID\n # comment_n = table.col_values(1) # 商品评论数\n # comment_V = table.col_values(2) # 商品评论的commentVersion\n #\n # start_urls = []\n # for i in range(len(good_id)): # 一件商品一件商品的抽取\n # good_num = int(good_id[i])\n # comment_total = int(comment_n[i])\n # if comment_total % 10 == 0: # 算出评论的页数,一页10条评论\n # page = comment_total/10\n # else:\n # page = comment_total/10 + 1\n # for k in range(0, int(page)):\n # url = \"http://sclub.jd.com/productpage/p-\" + str(good_num) + \"-s-0-t-3-p-\" + str(k) \\\n # + \".html?callback=fetchJSON_comment98vv\" #+ str(comment_V[i])\n # start_urls.append(url)\n\n def parse(self, response):\n m = re.search(r'(?<=fetchJSON_comment98vv61\\().*(?=\\);)', response.body.decode('gbk',errors='ignore')).group(0)\n j = json.loads(m)\n commentSummary = j['comments']\n items = []\n for comment in commentSummary:\n item1 = commentItem()\n item1['user_name'] = comment['nickname']\n item1['user_ID'] = comment['id']\n item1['userProvince'] = comment['userProvince']\n item1['content'] = comment['content'].replace('\\n','')\n item1['good_ID'] = comment['referenceId']\n item1['userClientShow'] = comment['userClientShow']\n item1['good_name'] = comment['referenceName']\n item1['date'] = comment['referenceTime']\n item1['replyCount'] = comment['replyCount']\n item1['score'] = comment['score']\n item1['uselessVoteCount']=comment[\"uselessVoteCount\"]\n item1['usefulVoteCount']=comment['usefulVoteCount']\n # item1['status'] = comment['status']\n # title = \"\"\n # if 'title'in comment:\n # item1['title'] = comment['title']\n # item1['title'] = title\n # item1['userRegisterTime'] = comment['userRegisterTime']\n # item1['productColor'] = comment['productColor']\n # item1['productSize'] = comment['productSize']\n # item1['userLevelName'] = comment['userLevelName']\n # item1['isMobile'] = comment['isMobile']\n # item1['days'] = comment['days']\n # tags = \"\"\n # if 'commentTags' in comment:\n # for i in comment['commentTags']:\n # tags = tags + i['name'] + \" \"\n # item1['commentTags'] = tags\n #items.append(item1)\n yield item1\n" }, { "alpha_fraction": 0.6158690452575684, "alphanum_fraction": 0.6750629544258118, "avg_line_length": 36.85714340209961, "blob_id": "ec9616fbfd35c88556bfdec01702a20333d427e1", "content_id": "2d04b7d49c4aef5eff001761849af2330bc592b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "no_license", "max_line_length": 143, "num_lines": 21, "path": "/Spider/Shanghai Stock Exchange data by requests/Shanghai Stock Exchange data.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import requests\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',\\\n 'Referer': 'http: // www.sse.com.cn / disclosure / dealinstruc / suspension /'\n}\npayload = {'jsonCallBack': 'vajsonpCallback68485', 'isPagination': 'true', 'bgFlag': '1', \\\n 'pageHelp.pageNo': '1', 'pageHelp.beginPage': '1'}\nparams={\"isPagination\": 'true','searchDate': \"\",'bgFlag': 1,\n'searchDo': 1,\\\n\"pageHelp.pageSize\": 1500,\\\n\"pageHelp.pageNo\": 1,\\\n\"pageHelp.beginPage\": 1,\\\n\"pageHelp.cacheSize\": 1,\\\n\"pageHelp.endPage\": 3,\\\n'desc':\"\",\\\n'pageCache':1\n}\nr=requests.get(url='http://query.sse.com.cn/infodisplay/querySpecialTipsInfoByPage.do',params=params,headers=headers)\nprint(r.headers)\nprint(r.json())\npass" }, { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 32, "blob_id": "b8809293153bdefe1586f9af408c426cd4528adf", "content_id": "ca3f9ae8160644f4f026486100edc20477b818e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 32, "num_lines": 1, "path": "/download pdf/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "*rename.pdf*: 读取pdf获得文章名然后重命名文件。\n" }, { "alpha_fraction": 0.49803921580314636, "alphanum_fraction": 0.505591869354248, "avg_line_length": 39.26315689086914, "blob_id": "c47671c66f750881afcf4826adcb4c62293c4a59", "content_id": "95f59b9533bb9430b50c9369d22f14ef1346eb19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6885, "license_type": "no_license", "max_line_length": 246, "num_lines": 171, "path": "/Spider/Jindong review data by Scrapy/jdcomment/jd_spider/pipelines.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql.cursors\nimport json\n\nfrom twisted.enterprise import adbapi\n\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom scrapy.utils.project import get_project_settings\nfrom scrapy import log\n# import chardet\n\nSETTINGS = get_project_settings()\n\n#\n# class MySQLPipeline(object):\n# @classmethod\n# def from_crawler(cls, crawler):\n# return cls(crawler.stats)\n#\n# def __init__(self, stats):\n# # Instantiate DB\n# self.file=open('phone_data.json', 'w', encoding='utf-8')\n# # self.dbpool = adbapi.ConnectionPool('pymysql',\n# # host=SETTINGS['DB_HOST'],\n# # user=SETTINGS['DB_USER'],\n# # passwd=SETTINGS['DB_PASSWD'],\n# # port=SETTINGS['DB_PORT'],\n# # db=SETTINGS['DB_DB'],\n# # charset='utf8',\n# # use_unicode=True,\n# # cursorclass=pymysql.cursors.DictCursor\n# # )\n# # self.stats = stats\n# # dispatcher.connect(self.spider_closed, signals.spider_closed)\n#\n# def spider_closed(self, spider):\n# print(\"done!!!\")\n#\n# \"\"\" Cleanup function, called after crawing has finished to close open\n# objects.\n# Close ConnectionPool. \"\"\"\n# # self.dbpool.close()\n#\n# def process_item(self, item, spider):\n# line=json.dumps(dict(item,ensure_ascii=False))+'\\n'\n# self.file.write(line)\n#\n# # query = self.dbpool.runInteraction(self._insert_record, item)\n# # query.addErrback(self._handle_error)\n# return item\n#\n# def _insert_record(self, tx, item):\n# ID = item['ID'][0]\n# name = item['name'][0]\n# comment_num = str(item['comment_num'])\n# shop_name = item['shop_name'][0]\n# link = item['link'][0]\n# commentVersion = str(item['commentVersion'])\n# commentVersion = commentVersion[1:-1]\n#\n# score1count = str(item['score1count'])\n# score2count = str(item['score2count'])\n# score3count = str(item['score3count'])\n# score4count = str(item['score4count'])\n# score5count = str(item['score5count'])\n#\n# price = str(item['price'])\n#\n# ID = ID.encode('utf-8')\n# name = name.encode('utf-8')\n# comment_num = comment_num.encode('utf-8')\n# shop_name = shop_name.encode('utf-8')\n# link = link.encode('utf-8')\n# commentVersion = commentVersion.encode('utf-8')\n# score1count = score1count.encode('utf-8')\n# score2count = score2count.encode('utf-8')\n# score3count = score3count.encode('utf-8')\n# score4count = score4count.encode('utf-8')\n# score5count = score5count.encode('utf-8')\n# price = price.encode('utf-8')\n#\n# sql = \"INSERT INTO jd_goods VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\" % \\\n# (ID, name, comment_num, shop_name, link, commentVersion, score1count, score2count, score3count,\n# score4count, score5count, price)\n# tx.execute(sql)\n# print(\"yes\")\n#\n# def _handle_error(self, e):\n# log.err(e)\n\n\nclass CommentPipeline(object):\n # @classmethod\n # def from_crawler(cls, crawler):\n # return cls(crawler.stats)\n\n def __init__(self):\n self.count=0\n self.allline=[]\n self.file = open('women_cloth_comment.csv', 'w', encoding='utf-8')\n # Instantiate DB\n # self.dbpool = adbapi.ConnectionPool('pymysql',\n # host=SETTINGS['DB_HOST'],\n # user=SETTINGS['DB_USER'],\n # passwd=SETTINGS['DB_PASSWD'],\n # port=SETTINGS['DB_PORT'],\n # db=SETTINGS['DB_DB'],\n # charset='utf8',\n # use_unicode=True,\n # cursorclass=pymysql.cursors.DictCursor\n # )\n # self.stats = stats\n # dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n def spider_closed(self, spider):\n \"\"\" Cleanup function, called after crawing has finished to close open\n objects.\n Close ConnectionPool. \"\"\"\n # self.dbpool.close()\n print(\"done2!!!\")\n def process_item(self, item, spider):\n # query = self.dbpool.runInteraction(self._insert_record, item)\n # query.addErrback(self._handle_error)\n self.count=self.count+1\n line = '0'+','+ str(item['score'])+','+ str(item['user_ID'])+','+ item['content'] +','+ str(item['good_ID']) +','+item['userClientShow'] +','+ str(item['replyCount'])+','+str(item['uselessVoteCount'])+','+str(item['usefulVoteCount'])+'\\n'\n if line not in self.allline:\n self.file.write(line)\n self.allline.append(line)\n else:\n print('repetition!!')\n\n print(\"scrapy item num is\",self.count)\n return item\n\n # def _insert_record(self, tx, item):\n # user_name = item['user_name']\n # user_ID = item['user_ID']\n # userProvince = item['userProvince']\n # content = item['content']\n # good_ID = item['good_ID']\n # good_name = item['good_name']\n # date = item['date']\n # replyCount = item['replyCount']\n # score = item['score']\n # status = item['status']\n # title = item['title']\n # userRegisterTime = item['userRegisterTime']\n # productColor = item['productColor']\n # productSize = item['productSize']\n # userLevelName = item['userLevelName']\n # isMobile = item['isMobile']\n # days = item['days']\n # tags = item['commentTags']\n #\n # sql = \"INSERT INTO jd_comment VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s',\" \\\n # \"'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\" % \\\n # (user_name, user_ID, userProvince, content, good_ID, good_name, date, replyCount, score,\n # status, title, userRegisterTime, productColor, productSize, userLevelName,\n # isMobile, days, tags)\n #\n # tx.execute(sql)\n # print(\"yes\")\n\n def _handle_error(self, e):\n log.err(e)\n" }, { "alpha_fraction": 0.6912065148353577, "alphanum_fraction": 0.6932515501976013, "avg_line_length": 24.789474487304688, "blob_id": "26aae946ac43fbb6332cd83cd9d7fe97eb7c8116", "content_id": "e8c2df4c789bb58cf88cced36a894ea507a466a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/Spider/XieCheng Hotel Data/xiecheng selenium/text/xiechengservices/DriveServices.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n__author__ = 'LiuYang'\nfrom services.xiechengservices.DriveServices import XiechengDriverService\n\nclass DriverServiceTest(object):\n\n def __init__(self):\n self.xiechengDriverService = XiechengDriverService()\n\n def crawlxiechengTest(self):\n self.xiechengDriverService.start()\n self.xiechengDriverService.depose()\n\n\nif __name__ == \"__main__\":\n\n driverServiceTest = DriverServiceTest()\n\n driverServiceTest.crawlxiechengTest()" }, { "alpha_fraction": 0.6729399561882019, "alphanum_fraction": 0.7105798721313477, "avg_line_length": 25.945205688476562, "blob_id": "1dbf999423708fabeedc917859a42432eddfa9f3", "content_id": "6a08ccaa9cf3a6d9e38e091733d8e392078af975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2384, "license_type": "no_license", "max_line_length": 74, "num_lines": 73, "path": "/Spider/XieCheng Hotel Data/xiecheng selenium/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "GB18030", "text": "xiecheng\n=================================\n\n众源时空信息聚合平台\n--------------------------\n\n##基本信息:\n\n基于scrapy+selenium的爬去策略,以南京市为例,抽取南京市酒店的基本信息数据与酒店点评数据<br />\n\n##使用Python 库:\n1.scrapy,网上安装方法许多,可自行下载相关依赖<br />\n\n2.selenium<br />\n可以直接使用pip进行安装<br />\n\nSelenium也是一个用于Web应用程序测试的工具。Selenium测试直接运行在浏览器中,就像真正的用户在操作一样。<br />\n\n\n##使用驱动:\n1.Chrome驱动<br />\n下载地址:http://npm.taobao.org/mirrors/chromedriver<br />\n\nselenium调用需要,需下载系统对应版本,将其放置到系统能直接访问的文件夹,如放在{PYTHON_HOME}/Scripts文件夹中<br />\n\n\n##数据库:\n\n###数据库名:xiecheng\n1.hotellianjie(存储酒店url)<br />\n```sql\nDROP TABLE IF EXISTS `hotellianjie`;\nCREATE TABLE `hotellianjie` (\n `guid` varchar(255) DEFAULT NULL,\n `lianjie` varchar(255) DEFAULT NULL,\n `city` varchar(30) DEFAULT NULL,\n `comm_num` int(30) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n```\n2.hotelinfo(酒店基本信息数据)<br />\n```sql\nDROP TABLE IF EXISTS `hotelinfo`;\nCREATE TABLE `hotelinfo` (\n `guid` varchar(255) DEFAULT NULL,\n `city` varchar(30) DEFAULT NULL,\n `title` varchar(60) DEFAULT NULL,\n `price` decimal(10,1) DEFAULT NULL,\n `score` int(20) DEFAULT NULL,\n `recommend` varchar(120) DEFAULT NULL,\n `area` varchar(120) DEFAULT NULL,\n `havawifi` varchar(20) DEFAULT NULL,\n `discussNum` int(11) DEFAULT NULL,\n `common_facilities` varchar(500) DEFAULT NULL,\n `activity_facilities` varchar(255) DEFAULT NULL,\n `service_facilities` varchar(255) DEFAULT NULL,\n `room_facilities` varchar(255) DEFAULT NULL,\n `around_facilities` varchar(255) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n```\n3.hotelcommentinfo(存储酒店评论数据)<br />\n```sql\nDROP TABLE IF EXISTS `hotelcommentinfo`;\nCREATE TABLE `hotelcommentinfo` (\n `hotelname` varchar(50) DEFAULT NULL,\n `username` varchar(40) DEFAULT NULL,\n `commentscore` varchar(40) DEFAULT NULL,\n `intime` varchar(40) DEFAULT NULL,\n `tourstyle` varchar(40) DEFAULT NULL,\n `praisenum` int(11) DEFAULT NULL,\n `commenttime` varchar(60) DEFAULT NULL,\n `comment` varchar(1000) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n```" }, { "alpha_fraction": 0.46783626079559326, "alphanum_fraction": 0.4938271641731262, "avg_line_length": 27.165138244628906, "blob_id": "40cbb623bd7a6e0658e92668be6d67c464eca3aa", "content_id": "cc44b77ed77b623ecf41436d85bae434ce4fad66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3244, "license_type": "no_license", "max_line_length": 85, "num_lines": 109, "path": "/Foursquare research/Projects/Small Dataset creating/Data_scraping.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\ndef get_tuple(f,max = 10):#获得(user,time,id)的元组\n n = 1\n while n<= max:\n line = f.readline().split('\\t')\n user = line[0]\n venue = line[1]\n # 1 = line[2]\n time = line[3]\n photo_id = line[4].split('\\n')[0]\n TUPLE = (user,time,photo_id)\n yield TUPLE\n n += 1\n #print(TUPLE)\n\ndef divide_time(start,end,num=10):#均分时间段\n list = []\n gap = (end - start)/num\n for i in range(num+1):\n list.append(start+i*gap)\n return list\n\ndef find_time_index(time,list):#在均分的时间点list里找到时间的位置\n for i in range(len(list)):\n if i == len(list)-1 and time == list[i]:\n return len(list)-1\n break\n if list[i]<=time<list[i+1]:\n return i+1\n break\n else:\n pass\n\nif __name__ == '__main__':\n #这边改成train.tsv的path\n path = r'all.tsv'\n #这边改成想读多少行数据\n line_num = 4888\n\n photo_dict = {}\n user_dict = {}\n time_dict = {}\n\n f = open(path, 'r')\n\n\n flag= 0\n for i in get_tuple(f,line_num):\n flag+=1\n print(flag)\n if user_dict.__contains__(i[0]):\n temp = user_dict[i[0]]\n #print(temp,i)\n temp.append([i[2], int(i[1])])\n user_dict[i[0]]=temp\n #print(user_dict)\n\n else:\n user_dict[i[0]]=[]\n #print(user_dict)\n #print(i)\n temp = user_dict[i[0]]\n #print(temp,i)\n temp.append([i[2], int(i[1])])\n user_dict[i[0]]=temp\n #print(user_dict)\n#以上是为了获得{user:[[photoid,time],...],...}的字典,以便下面使用\n\n for user in user_dict:\n time_set = []\n for photo in user_dict[user]:\n time_set.append(photo[1])#photo[1] 为时间,[0] 为id\n #print(time_set)\n start_time = min(time_set)\n end_time = max(time_set)\n div_10 = divide_time(start_time,end_time,10)\n #print(div_10)\n#对每个用户的时长十等分,得到的是list:div_10\n\n\n for index, host_photo in enumerate(user_dict[user]):\n host_photo_id = host_photo[0]\n host_photo_time = host_photo[1]\n photo_dict[(user, host_photo_time, host_photo_id)] =\\\n {10:[],1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:[]}\n\n time_dict[(user, host_photo_time, host_photo_id)] =\\\n [0,0,0,0,0,0,0,0,0,0]\n\n for guest_photo in user_dict[user]:\n guest_photo_id = guest_photo[0]\n guest_photo_time = guest_photo[1]\n if guest_photo_time < host_photo_time:\n time_index = find_time_index(guest_photo_time,div_10)\n photo_dict[(user,host_photo_time,host_photo_id)][time_index]\\\n .append((user,guest_photo_time,guest_photo_id))\n time_dict[(user,host_photo_time,host_photo_id)][time_index-1] = 1\n#得到两个字典\n\n #print(photo_dict)\n #print(time_dict)\n\n with open('dict1.txt','w') as f1:\n f1.write(str(photo_dict))\n\n with open('dict2.txt','w') as f1:\n f1.write(str(time_dict))\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4829443395137787, "alphanum_fraction": 0.4947422444820404, "avg_line_length": 34.733943939208984, "blob_id": "cd0f0c5fd6f7f6bab8a934b56d855ec980af3113", "content_id": "25dd16e931bff82c78d0878066240ae2f7e5ea5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3899, "license_type": "permissive", "max_line_length": 125, "num_lines": 109, "path": "/Foursquare research/Projects/Get venues/tutorial/spiders/scraper.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport re\nfrom scrapy import Selector\nfrom tutorial.items import *\nfrom urllib.parse import urlencode\nimport yaml\nimport json\n\n\ndef get_delta(lower, upper, length):\n return (upper - lower) / length\n\nclass scraper(scrapy.Spider):\n name = \"scraper\"\n # base_url = \"http://www.tripadvisor.cn\"\n # start_urls = [\n # base_url + \"/Hotels-g297463-Chengdu_Sichuan-Hotels.html\"\n # ]\n def __init__(self):\n\n self.venue_ids = set()\n self.allVenues = []\n self.search_count=0\n with open(\"config.yaml\", \"r\") as f:\n self.cfg = yaml.load(f)\n #-\n self.lat_delta = get_delta(self.cfg['top_bound'], self.cfg['bottom_bound'], self.cfg['grid_size'])\n #+\n self.long_delta = get_delta(self.cfg['left_bound'], self.cfg['right_bound'], self.cfg['grid_size'])\n def start_requests(self):\n\n\n\n\n search_params = {\n 'client_id': self.cfg['client_id'],\n 'client_secret': self.cfg['client_secret'],\n 'intent': 'browse',\n 'limit': 50,\n 'v': '20180218'\n }\n\n\n\n\n for lat in range(self.cfg['grid_size']):\n for long in range(self.cfg['grid_size']):\n ne_lat = self.cfg['top_bound'] + lat * self.lat_delta\n ne_long = self.cfg['left_bound'] + (long + 1) * self.long_delta\n\n search_params.update({'ne': '{},{}'.format(ne_lat, ne_long),\n 'sw': '{},{}'.format(ne_lat + self.lat_delta,\n ne_long - self.long_delta)})\n\n\n url='https://api.foursquare.com/v2/venues/search'+'?'+urlencode(search_params)\n\n yield scrapy.Request(url=url, callback=self.parse,meta={'ne':[ne_lat,ne_long],'cut':0})\n\n\n\n def parse(self, response):\n\n search_params = {\n 'client_id': self.cfg['client_id'],\n 'client_secret': self.cfg['client_secret'],\n 'intent': 'browse',\n 'limit': 50,\n 'v': '20180218'\n }\n\n # hotel_urls = response.xpath('//h2[@class=\"listing_title\"]//a[contains(@class, \"property_title\")]/@href').extract()\n #\n # if hotel_urls:\n # for hotel_url in hotel_urls:\n # hotel_completed_url = self.base_url + hotel_url\n # yield scrapy.Request(url=hotel_completed_url,\n # callback=self.parse_fetch_hotel)\n # break\n\n if 'venues' in json.loads(response.body)['response']:\n venues = json.loads(response.body)['response']['venues']\n if len(venues)<50:\n\n for venue in venues:\n venuesItemInsta=venuesItem()\n venuesItemInsta['venue']=venue\n yield venuesItemInsta\n else:\n cut=response.meta['cut']\n lat_delta=(0.5**(cut+1))*self.lat_delta\n long_delta=(0.5**(cut+1))*self.long_delta\n for i in range(2):\n for j in range(2):\n ne_lat = response.meta['ne'][0] + i * lat_delta\n ne_long = response.meta['ne'][1] - j * long_delta\n\n search_params.update({'ne': '{},{}'.format(ne_lat, ne_long),\n 'sw': '{},{}'.format(ne_lat + lat_delta,\n ne_long - long_delta)})\n\n url = 'https://api.foursquare.com/v2/venues/search' + '?' + urlencode(search_params)\n\n yield scrapy.Request(url=url, callback=self.parse, meta={'ne': [ne_lat, ne_long], 'cut': cut+1})\n\n\n self.search_count += 1\n print(\"search count: {}; Venues number: {} cut: {}\".format(self.search_count, len(venues),response.meta['cut']))\n\n\n\n\n" }, { "alpha_fraction": 0.7121770977973938, "alphanum_fraction": 0.7195571660995483, "avg_line_length": 17.133333206176758, "blob_id": "1af8c6a9d765b2d4b4ac791e560793a73ebc05c2", "content_id": "b2b0981188155e7987d7c65c4b6530c79b8eb291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 271, "license_type": "no_license", "max_line_length": 117, "num_lines": 15, "path": "/Foursquare research/Projects/Data analysis/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# Data analysis\n\nAnalyse the foursquare data from San francisco.\n\n## Create data.py\n\nCreat *all.tsv*, *test.negative.tsv*, *test.tsv* and *train.tsv files*. The input file is *photo,user,venue,time.txt*\n\n\n\n## Data_scraping.py\n\nInput: all.tsv\n\nOutput: dict1.txt, dict2.txt" }, { "alpha_fraction": 0.6595098376274109, "alphanum_fraction": 0.7024508714675903, "avg_line_length": 33.7746467590332, "blob_id": "01e19e8f56785e5a8b395b88c3cc8da1dd254326", "content_id": "9e18969cf05709d582bb6a7f49bd7945471cee2c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5057, "license_type": "permissive", "max_line_length": 109, "num_lines": 142, "path": "/Foursquare research/Projects/Get venues/tutorial/settings.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Scrapy settings for tutorial project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'tutorial'\n\nSPIDER_MODULES = ['tutorial.spiders']\nNEWSPIDER_MODULE = 'tutorial.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = False\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\nDOWNLOAD_DELAY = 0\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN = 16\n#CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\n#COOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n 'tutorial.pipelines.TutorialPipeline': 300,\n}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\n#HTTPCACHE_DIR = 'httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES = []\n#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\n\n\n# 如果不想使用代理IP,可以将下面这段DOWNLOADER_MIDDLEWARES代码注释掉\nDOWNLOADER_MIDDLEWARES = {\n 'tutorial.middlewares.RandomUserAgent': 1,\n # 不用ip代理的话就把下面注释掉\n # 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,\n # 'tutorial.middlewares.ProxyMiddleware': 100,\n\n #pause Scrapy and resume after x minutes\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,\n 'tutorial.middlewares.SleepRetryMiddleware': 100,\n}\n\n#你需要去定义重试次数和上面的连接失败暂停一会配合使用\nRETRY_TIMES=40\n\nUSER_AGENTS=[]\nwith open('./User agent.txt','r') as f:\n for line in f.readlines():\n USER_AGENTS.append(line.strip())\n\n\nPROXIES = [\n {'ip_port': '120.83.103.119:808', 'user_pass': ''},\n {'ip_port': '110.73.0.38:8123', 'user_pass': ''},\n {'ip_port': '183.32.88.18:808', 'user_pass': ''},\n {'ip_port': '113.121.254.192:808', 'user_pass': ''},\n {'ip_port': '180.110.6.6:808', 'user_pass': ''},\n {'ip_port': '61.191.173.31:808', 'user_pass': ''},\n {'ip_port': '110.72.33.106:8123', 'user_pass': ''},\n\n # {'ip_port': '202.108.2.42:80', 'user_pass': ''},\n # {'ip_port': '122.96.59.104:80', 'user_pass': ''},\n # {'ip_port': '120.76.243.40:80', 'user_pass': ''},\n # {'ip_port': '139.196.108.68:80', 'user_pass': ''},\n # {'ip_port': '60.194.100.51:80', 'user_pass': ''},\n # {'ip_port': '202.171.253.72:80', 'user_pass': ''},\n # {'ip_port': '123.56.74.13:8080', 'user_pass': ''},\n]\n\n# DEFAULT_REQUEST_HEADERS={\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n# 'Accept - Encoding':'gzip, deflate, br',\n# 'Accept-Language':'en-US,en;q=0.9',\n# 'Connection':'Keep-Alive',\n# \t 'Cache-Control': 'max-age=0',\n# }\n\n#let scrapy not show every content of responses\nLOG_LEVEL = 'INFO'" }, { "alpha_fraction": 0.5602836608886719, "alphanum_fraction": 0.6643025875091553, "avg_line_length": 46.11111068725586, "blob_id": "7f36e059bf817f1eb2021e62b3f9457a1a0e3f41", "content_id": "fecf41571b848e3fa460ba0dc551948f3a08e1e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 107, "num_lines": 9, "path": "/Data Center/Share data/data_download.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import tushare as ts\nsz50=ts.get_sz50s()['code'].values\n# for stock in sz50:\n# data = ts.get_k_data(stock, start='2010-01-01', end='2017-06-02', autype=None, index=False,ktype='M')\n# data.to_excel('Month data/stock of %s.xlsx' % stock)\n# pass\nstock='000016'\ndata = ts.get_k_data(stock, start='2010-01-01', end='2017-06-02', autype=None, index=True,ktype='M')\ndata.to_excel('Month data/stock of %s.xlsx' % stock)" }, { "alpha_fraction": 0.7879580855369568, "alphanum_fraction": 0.8089005351066589, "avg_line_length": 16.904762268066406, "blob_id": "02fc04de0bc0f643ec11772345eb7cbb307c2073", "content_id": "56c5c4b0d8903c76942f8fdbd706d1aff3aa5e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 734, "license_type": "no_license", "max_line_length": 66, "num_lines": 21, "path": "/Spider/Proxy Pool/multithread-proxy/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# multithread-proxy\n\npython 2.7\n\nRequirements:见requirements.txt\n使用 \n pip install requirements -r\n 安装所有依赖包\n \n 需要使用的同学直接git clone下来用就好了,记得要下载对应版本的PhantomJS,并在第90行处修改一下可执行文件的路径!\n \n 新功能:\n多线程代理池:\n1. 新增西刺代理(xicidaili)+快代理(kuaidaili)两个来源的上千个免费代理\n2. 保证代理的时效性和质量,增加国内外代理的有效性认证(validation)\n3. 认证过程采用多线程操作\n4. 为了方便配置,不使用数据库,改成文件操作,认证成功的保存在本地的txt文档中。\n\n\n\n注:kuaidaili未调试成功,所以该代码中仅有xicidaili\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.772382378578186, "alphanum_fraction": 0.8019726872444153, "avg_line_length": 19.200000762939453, "blob_id": "a8a6dc63dcc9044550da7b3899e7827b5b10134f", "content_id": "e53f7f565a6129e325ca945db110b8f37c06075b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3090, "license_type": "no_license", "max_line_length": 199, "num_lines": 65, "path": "/Foursquare research/Research thoughts/研究思路.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "[TOC]\n\n# 研究\n\n## 问题\n\n1. **用户的temporal dynamics:在协同过滤的推荐模型中,如何加入用户的图片和用户的temporal dynamics可以使模型有更好的效果?**\n\n A picture is worth a thousand words. 图片放映的信息很多时候不是一两句话所能描述,将图片加入推荐系统,也许能较好改善推荐效果。图片+协同过滤的模型已经被许多人所研究,但他们的研究中大都忽视了用户的偏好变动,不同的时期用户表现出不同的偏好,探索过去的偏好,甚至在一定程度上可以预测未来的偏好。\n\n ​\n\n2. **可解释性:如何让模型具有可解释性,能够直观地解释某个时期地用户偏好是怎样的,以及用户的偏好是如何变动的?**\n\n 图片+协同过滤的模型都是简单地将图片特征输入到矩阵分解模型中,使矩阵分解推荐缺乏可解释性,即我们不知道为什么要将这个商品推荐给用户。图片有着丰富的视觉特征,一个用户过去的图片经常出现的特征,很可能就是用户偏好这个商品(或POI)的原因。基于这样的思路,我们考虑如何利用这些图片的特征,去发现用户不同时期的偏好究竟是由哪些元素组成。如何去发现偏好?这里考虑style discovery的方法。\n\n ​\n\n3. **POI的temporal dynamics(可选的思路,暂不考虑): POI的temporal dynamics不仅体现在用户上,还体现在景点上,如何去发现景点的特征变化?**\n\n 景点在不同时期表现出不同的特征,例如新建的设施、举办的活动、推出的新品等。\n\n\n\n\n\n## 模型\n\n### 要达到效果\n\n#### 发现用户偏好变动\n\n可以知道用户在某个时期有哪几种偏好,每种偏好:(以下3种方式都测试一下,看哪个效果好)\n\n1. 由哪些图片组成,每张图片的权重是多少,以及这些图片的不同区域的权重是多少\n\n\n2. 由哪些区域组成,每个区域的权重是多少\n\n\n3. 由哪些图片组成,每张图片的权重是多少。\n\n#### 推荐\n\n具体的设想:输入用户最近的5张图片,用这些图片去更新用户所有时期的偏好分布,再把这个更新好的代表该时期的偏好分布输入到CF模型中,就可以预测在这个时期,用户拥有这样的偏好分布的情况下,应该给用户推荐怎样的商品。\n\n### Temporal dynamics(第一个问题)\n\n#### 思路一\n\n对于某个时期的用户的偏好分为三部分,1.总体来看的一个偏好分布 2.某个时期的偏好偏移 3.对于图片很少的用户可以考虑用群体的图片或所有的图片作为一个初始偏好分布\n\n新建一个和attention一样的层用来学习偏移\n\n#### 思路二\n\n![1526820920626](D:\\Project\\待上传\\Foursquare research\\Research thoughts\\1526820920626.png)\n\n\n\n### 可解释性(第二个问题)\n\n#### 如何提取偏好?\n\n考虑LDA和MF的方法。先考虑MF的方法,1维度是图片,1维度是图片特征向量,这样就可以得到k个偏好所对应得图片分布。\n\n\n\n\n\n" }, { "alpha_fraction": 0.6360917091369629, "alphanum_fraction": 0.6370887160301208, "avg_line_length": 19.489795684814453, "blob_id": "f14f95a16973868ce8e115b999b435524cdab822", "content_id": "68b1b5a8b75b6e2aed2e36bf2cb3bd4296487e0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "permissive", "max_line_length": 51, "num_lines": 49, "path": "/Foursquare research/Projects/Get venues/tutorial/items.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy import Field\n\n\nclass TutorialItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\n\nclass Hotel_Item_Detail(scrapy.Item):\n item_type = Field()\n detail_id = Field()\n geo_id=Field()\n name = Field()\n locality = Field()\n region = Field()\n postal_code = Field()\n country = Field()\n rating = Field()\n review_count = Field()\n price_range = Field()\n url = Field()\n rank=Field()\n traveler_photo_nums = Field()\n offical_photo_nums = Field()\n\nclass Review_Detail(scrapy.Item):\n location=Field()\n username=Field()\n title=Field()\n review_content=Field()\n hotel_id=Field()\n\nclass photo_links(scrapy.Item):\n photo_link = Field()\n hotel_id = Field()\n photo_from=Field()\n\n\nclass venuesItem(scrapy.Item):\n venue=Field()" }, { "alpha_fraction": 0.5985342264175415, "alphanum_fraction": 0.6026058793067932, "avg_line_length": 29.700000762939453, "blob_id": "2ed087fd8156f255a27990eba867c1e716559ab2", "content_id": "6f07ac0746ab75cc0b8c81f32829d3362adbb170", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "permissive", "max_line_length": 68, "num_lines": 40, "path": "/Foursquare research/Projects/Get venues/tutorial/pipelines.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom tutorial.items import *\nimport json\n\nclass TutorialPipeline(object):\n def __init__(self):\n self.count = 0\n self.hotel_detail_all = []\n self.review_all=[]\n\n def open_spider(self, spider):\n self.venues = open('venues.json', 'w', encoding='utf-8')\n self.review_file=open('review.json','w',encoding='utf-8')\n self.photo_file = open('photo.json', 'w', encoding='utf-8')\n\n def close_spider(self, spider):\n self.venues.close()\n self.review_file.close()\n self.photo_file.close()\n\n def process_item(self, item, spider):\n if isinstance(item,venuesItem):\n line = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.venues.write(line)\n\n elif isinstance(item,photo_links):\n line = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.photo_file.write(line)\n\n else:\n line = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.review_file.write(line)\n\n\n return item\n" }, { "alpha_fraction": 0.5011777877807617, "alphanum_fraction": 0.5101842880249023, "avg_line_length": 35.26633071899414, "blob_id": "5d70c27c05ff2af6d24b6e4f6c118ab253d2d607", "content_id": "0d065755685f6dc6005bb0e61c0fdb74d2f67a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7847, "license_type": "no_license", "max_line_length": 175, "num_lines": 199, "path": "/Spider/XieCheng Hotel Data/xiecheng selenium/services/xiechengservices/DriveServices.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n__author__ = 'LiuYang'\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom scrapy.http import HtmlResponse\nfrom datetime import datetime\nimport re\nimport time\nimport uuid\nimport random\nfrom DAO.xiecheng import xiechengDAO\n\nclass XiechengDriverService(object):\n\n def __init__(self):\n self.driver = webdriver.Chrome()\n self.xiechengDao = xiechengDAO()\n # 存放列表页数据\n self.listPageInfo = []\n # 存放酒店详情数据\n self.hotelItem = {}\n # 存放酒店评论数据\n self.commList = []\n\n # 打开携程首页\n def start(self):\n # self.driver.get(\"http://hotels.ctrip.com/hotel/nanjing12#ctm_ref=hod_hp_sb_lst\")\n self.driver.get(\"http://hotels.ctrip.com/hotel/nanjing12\")\n # 将界面最大化\n self.driver.maximize_window()\n self.driver.implicitly_wait(30)\n self.crawlxiecheng()\n\n\n def crawlxiecheng(self):\n # 单页循环次数\n loopNum = 0\n # 标识当前页面是否已经爬取:False为未处理,反之为已处理\n ifHandle = False\n # 获取总页面数\n pageNum = 163#280\n while(pageNum>=1):\n # 循环次数加1\n loopNum = loopNum + 1\n # 到达页面90%处\n js=\"var q=document.documentElement.scrollTop=9000\"\n self.driver.execute_script(js)\n # 当页面中出现“返前价”字样时,爬取页面并跳转到下一页\n if u\"收藏\" in self.driver.page_source:\n # 对未解析过的页面进行解析\n if ifHandle==False:\n self.crawllianjie(self.driver.page_source)\n ifHandle = True\n # 跳转到下一页\n if u\"下一页\" in self.driver.page_source:\n # self.driver.find_element_by_partial_link_text(u\"下一页\").click()\n pageNum = pageNum - 1\n # self.driver.find_element_by_xpath(\"//a[@class='c_down']\").click()\n element = self.driver.find_element_by_xpath(\"//a[@class='c_down']\")\n self.driver.execute_script(\"arguments[0].click();\", element)\n # 处理标识重新置为未处理\n ifHandle = False\n # 单页循环次数置为零\n loopNum = 0\n time.sleep(random.uniform(3, 6))\n print(\"页数:\" + str(pageNum))\n # try:\n # if u\"下一页\" in self.driver.page_source:\n # # self.driver.find_element_by_partial_link_text(u\"下一页\").click()\n # pageNum = pageNum - 1\n # self.driver.find_element_by_xpath(\"//a[@class='c_down']\").click()\n # # 处理标识重新置为未处理\n # ifHandle = False\n # # 单页循环次数置为零\n # loopNum = 0\n # time.sleep(random.uniform(3, 6))\n # print(\"页数:\" + str(pageNum))\n # except :\n # pageNum = pageNum + 1\n # 将当前的错误页保存下来\n # self.driver.save_screenshot('%s.png'%pageNum)\n # 如果单页循环次数不为零,说明没有跳转到下一页\n if loopNum != 0:\n # 循环次数较大的情况下(此处预定为15次)说明页面可能加载失败,跳出循环,否则继续循环获取\n if loopNum < 15:\n time.sleep(3)\n continue\n else:\n break\n return False if pageNum > 1 else True\n # 爬取酒店基本信息\n def pagecollect(self,response):\n items = []\n commentData = response.xpath(\"//div[@id='hotel_list']/div[@class='searchresult_list ']/ul[@class='searchresult_info']\")\n\n for itemData in commentData:\n itemDict = dict()\n\n # 唯一标识\n itemDict['guid'] = uuid.uuid1()\n\n # 城市名\n itemDict[\"city\"] = \"南京\"\n\n # 名称\n itemDict[\"title\"] = itemData.xpath(\"li/h2/a[@title]/text()\").extract()[0]\n\n # 价格\n price = itemData.xpath(\"li[@class='hotel_price_icon']/div/span/text()\").extract()[0]\n if price:\n itemDict[\"price\"] = price\n # print(price)\n else:\n itemDict[\"price\"] = \"\"\n\n # 评分\n score = itemData.xpath(\"li[@class='searchresult_info_judge ']/div/a/span[@class='hotel_value']/text()\").extract()[0]\n if score:\n itemDict[\"score\"] = score\n else:\n itemDict[\"score\"] = \" \"\n\n # 位置\n location = itemData.xpath(\"li[@class='searchresult_info_name']/p[@class='searchresult_htladdress']/text()\").extract()[0]\n Location = location.split(\" \")\n if(Location):\n itemDict[\"location\"] = Location[0]\n else:\n itemDict[\"location\"] = \" \"\n\n # 评论\n discussnum = itemData.xpath(\"li[@class='searchresult_info_judge ']/div/a/span[@class='hotel_judgement']/text()\").extract()[0]\n Discuss = re.sub('\\D','',discussnum)\n if Discuss:\n itemDict[\"discussnum\"] = Discuss\n else:\n itemDict[\"discussnum\"] = \"\"\n\n # 有无wifi\n havewifi1 = itemData.xpath(\"li[@class='searchresult_info_name']/div[@class='icon_list']/i[@class='icons-facility32']\")\n havawifi2 = itemData.xpath(\"li[@class='searchresult_info_name']/div[@class='icon_list']/i[@class='icons-facility01']\")\n if (havewifi1 or havawifi2):\n itemDict[\"havawifi\"] = \"yes\"\n else:\n itemDict[\"havawifi\"] = \"no\"\n\n # 用户推荐百分比\n recommend = itemData.xpath(\"li[@class='searchresult_info_judge ']/div[@class='searchresult_judge_box']/a/span[@class='total_judgement_score']/text()\").extract()[1]\n Recommend = re.sub('\\D','',recommend)\n Recommend += \"%\"\n if Recommend:\n itemDict[\"recommend\"] = Recommend\n else:\n itemDict[\"recommend\"] = \" \"\n\n items.append(itemDict)\n return items\n\n\n # 爬取页面链接\n def crawllianjie(self,page_sourse):\n response = HtmlResponse(url=\"my HTML string\",body=page_sourse,encoding=\"utf-8\")\n\n A = response.xpath(\"//div[@class='searchresult_list ']/ul\")\n # 获取每个酒店的链接\n for B in A:\n url = B.xpath(\"li[@class='searchresult_info_name']/h2/a/@href\").extract()\n # 评论\n commnum = B.xpath(\"li[@class='searchresult_info_judge ']/div/a/span[@class='hotel_judgement']/text()\").extract()\n if len(commnum):\n Discuss = re.sub('\\D','',commnum[0])\n if len(Discuss):\n pass\n else:\n Discuss = 0\n else:\n Discuss = 0\n self.listPageInfo.append({\"url\":url[0], \"comm_num\":Discuss, \"city\":\"南京\"})\n self.saveListPageInfo()\n if len(self.listPageInfo) == 25:\n pass\n else:\n print(len(self.listPageInfo))\n self.listPageInfo = []\n\n\n def saveListPageInfo(self):\n self.xiechengDao.savehotellink(self.listPageInfo)\n\n def depose(self):\n self.driver.close()\n\nif __name__==\"__main__\":\n xiechengService = XiechengDriverService()\n xiechengService.start()\n\n xiechengService.depose()\n" }, { "alpha_fraction": 0.5232515335083008, "alphanum_fraction": 0.538996696472168, "avg_line_length": 38.550724029541016, "blob_id": "214d9967cb9306551434c1d707c8ee5372a09d01", "content_id": "22b631b14f2aa4b254bdfa1643a6d329bbb1ff82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5462, "license_type": "no_license", "max_line_length": 167, "num_lines": 138, "path": "/tutorial/tutorial/spiders/scraper.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import scrapy\nimport json\nimport re\nfrom scrapy import Selector\nfrom tutorial.items import *\nfrom urllib.parse import urlencode\n\nheaders = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept - Encoding':'gzip, deflate, br',\n 'Accept-Language':'en-US,en;q=0.9',\n 'Connection':'Keep-Alive',\n\t 'Cache-Control': 'max-age=0',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}\n\nclass scraper(scrapy.Spider):\n name = \"scraper\"\n base_url = \"http://www.tripadvisor.cn\"\n\n def __init__(self):\n\n self.hotel_photo_links=[]\n \n def start_requests(self):\n\t yield scrapy.Request(url=self.base_url+\"/Hotels-g297463-Chengdu_Sichuan-Hotels.html\", callback=self.parse)\n\n \n\n def parse(self, response):\n\n\n\n hotel_urls = response.xpath('//h2[@class=\"listing_title\"]//a[contains(@class, \"property_title\")]/@href').extract()\n\n if hotel_urls:\n for hotel_url in hotel_urls:\n hotel_completed_url = self.base_url + hotel_url\n yield scrapy.Request(url=hotel_completed_url,\n callback=self.parse_fetch_hotel)\n break\n\n\n\n def parse_fetch_hotel(self, response):\n hxs=Selector(response)\n\n hi = Hotel_Item_Detail()\n\n hi['item_type'] = 'hotel'\n hi['detail_id'] = re.search('d[0-9]+', response.url).group(0).strip('d')\n hi['geo_id']=re.search('g[0-9]+',response.url).group(0).strip('g')\n hi['name'] = hxs.xpath( '//h1[contains(@id, \"HEADING\")]/text()').extract()[0]\n hi['url'] = response.url\n hi['rank']=hxs.xpath('//*[@id=\"taplc_location_detail_header_hotels_0\"]//div/span/b/text()').extract()[0]\n hi['traveler_photo_nums']=int(re.search('[0-9]+',\n hxs.xpath('//*[@id=\"taplc_hr_atf_north_star_nostalgic_0\"]//div[@class=\"albumInfo\"]/text()').extract()[0]).group(0))\n hi['offical_photo_nums']=int(re.search('[0-9]+',\n hxs.xpath('//*[@id=\"taplc_hr_atf_north_star_nostalgic_0\"]//span[@class=\"see_all_count\"]/text()').extract()[0]).group(0))\\\n -hi['traveler_photo_nums']\n yield hi\n\n for count,j in enumerate(['offical_photo_nums','traveler_photo_nums']):\n for i in range(int(hi[j]/50)+1):\n headers={\n 'geo':hi['geo_id'],\n 'detail':hi['detail_id'],\n 'albumViewMode':'images',\n 'aggregationId':101,\n 'albumid':101,\n 'cnt':50,\n 'offset': 50*i,\n 'filter':count+1,\n 'albumPartialsToUpdate':'partial',\n }\n photo_url='https://www.tripadvisor.cn/LocationPhotoAlbum?'+urlencode(headers)\n yield scrapy.Request(photo_url,self.parse_fetch_photo,\n meta={'id':hi['detail_id'],'count':count})\n break\n\n\n\n\n #get review\n pages=hxs.xpath('//*[@id=\"taplc_location_reviews_list_hotels_0\"]//div[@class=\"pageNumbers\"]/span[@class=\"pageNum last taLnk \"]/@data-page-number').extract()[0]\n pages=int(pages)\n for i in range(pages):\n split = response.url.split('Reviews')\n review_page_url = ('Reviews-or' + str(i * 5)).join(split)\n yield scrapy.Request(review_page_url,self.parse_review,meta={'id':hi['detail_id']})\n if i > 2:\n break\n\n\n\n def parse_fetch_photo(self,response):\n photos=photo_links()\n\n sel=Selector(response)\n photo_link=sel.xpath('//img[@onload]/@src').extract()\n if response.meta['count']==0:\n photos['photo_from']='official'\n else:\n photos['photo_from'] = 'traveler'\n photos['hotel_id'] =response.meta['id']\n\n temp=[]\n for i in photo_link:\n temp.append(i)\n photos['photo_link']=temp\n\n\n\n yield photos\n\n\n def parse_review(self,response):\n pass\n\n sel=Selector(response)\n\n\n username=sel.xpath('//*[@id=\"taplc_location_reviews_list_hotels_0\"]//div[@class=\"review-conta'\n 'iner\"]//span[@class=\"expand_inline scrname\"]/text()').extract()\n location=sel.xpath('//*[@id=\"taplc_location_reviews_list_hotels_0\"]//div[@class=\"review-conta'\n 'iner\"]//span[@class=\"expand_inline userLocation\"]/text()').extract()\n title=sel.xpath('//*[@id=\"taplc_location_reviews_list_hotels_0\"]//div[@class=\"review-container\"]'\n '//span[@class=\"noQuotes\"]/text()').extract()\n review_content=sel.xpath('//*[@id=\"taplc_location_reviews_list_hotels_0\"]//div[@class=\"review-container\"]'\n '//div[@class=\"wrap\"]/div[@class=\"prw_rup prw_reviews_text_summary_hsx\"]'\n '/div/p[@class=\"partial_entry\"]/text()').extract()\n\n for i in range(len(review_content)):\n Reviews = Review_Detail()\n Reviews['location']=location[i]\n Reviews['username'] = username[i]\n Reviews['title'] = title[i]\n Reviews['review_content'] = review_content[i]\n Reviews['hotel_id']=response.meta['id']\n yield Reviews\n\n\n\n\n" }, { "alpha_fraction": 0.7445783019065857, "alphanum_fraction": 0.7566264867782593, "avg_line_length": 49.10344696044922, "blob_id": "d3615c8af8acf4e1355f55e37b3f981501589506", "content_id": "d1295244e1d8c4e007cb7d03c571d705d8069cca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2929, "license_type": "permissive", "max_line_length": 337, "num_lines": 58, "path": "/Foursquare research/Projects/Get venues/README.md", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "# Get venues\n\nGet venues in a rectangle. I improve the method used in [foursquare-venue-scraper](https://github.com/minimaxir/foursquare-venue-scraper), which is more effecient and complete. \n\n\n\n\n\n# foursquare-venue-scraper\n\n![](sf_map_avg_rating.png)\n\nA [Foursquare](https://foursquare.com) data scraper that gathers all venues within a specified geographic area. The data retrieved can then be used for statistical analysis and fun data visualizations.\n\n## Overview\n\nThe script queries the official [Foursquare API](https://developer.foursquare.com/docs) (with a user-provided `client_id` and `client_secret` from a Foursquare application) to search for venues, and then query additional metadata for each unique venue.\n\nThe specified area is defined by a geographic bounding box (by latitude and longitude). Since the API can only return up to 50 venues per search, the bounding box is broken into sub-bounding boxes as a form of grid search (by default, the `grid_size` is 100, so 100x100=10000 searches total).\n\nThe output file is a CSV with the following fields:\n\n* `id` — The Foursquare ID for the venue.\n* `name` — The name of the venue.\n* `categories` — The list of categories of the venue.\n* `lat` — Latitude\n* `long` — Longitude\n* `num_checkins` — Number of Foursquare Checkins\n* `num_likes` — Number of Foursquare Likes\n* `price` — Price Tier between 1-4 (i.e. $-$$$$)\n* `rating` — Rating for the venue.\n* `num_ratings` — Number of ratings for the venue.\n* `url_venue` — URL of the venue.\n* `url_foursquare` — URL for the Foursquare venue.\n\nA demo CSV of the Top 100 San Francisco venues (by number of checkins) is available in `foursquare_sf_sample.csv`.\n\n## Usage\n\nSet the parameters in `config.yaml` as appropriate, then run `foursquare_venues.py`.\n\nThe Foursquare API has a **daily [rate limit](https://developer.foursquare.com/docs/api/troubleshooting/rate-limits) of 1,000 requests/day** for free accounts, but with verification, the rate limit upgrades to **100,000/day**. You can test the script with a small bounding box/low grid size to see if it fits your needs before upgrading.\n\nBoth of the API endpoints used in this script have a **rate limit of 5,000 requests/hour**. Once the script hits that limit, it'll sleep for an hour. Plan scraping accordingly.\n\n## Foursquare Terms of Use\n\nThis script follows the specifications of the Foursquare API, and does not circumvent the API and its rate limits. If you're using the data for projects other than data analysis, make sure to follow the [Foursquare Terms of Use](https://developer.foursquare.com/docs/terms-of-use/overview).\n\n## Maintainer\n\nMax Woolf ([@minimaxir](http://minimaxir.com))\n\n*Max's open-source projects are supported by his [Patreon](https://www.patreon.com/minimaxir). If you found this project helpful, any monetary contributions to the Patreon are appreciated and will be put to good creative use.*\n\n## License\n\nMIT" }, { "alpha_fraction": 0.5251753330230713, "alphanum_fraction": 0.6241232752799988, "avg_line_length": 52.22666549682617, "blob_id": "000731c85fb40f442bb8e2bdcbde36ef13465b22", "content_id": "e6b5f9c0455e8c829ef068f9cc383cd8a480d2f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8994, "license_type": "no_license", "max_line_length": 692, "num_lines": 150, "path": "/Spider/fangtianxia data with threading/fang_data_threading.py", "repo_name": "Zheng392/Python-Repository", "src_encoding": "UTF-8", "text": "import requests,json,random\nimport re,threading\nfrom lxml import etree\nimport time\n\nlock=threading.Lock()\n\nuser_agent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\" ,\\\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\", \\\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\", \\\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\", \\\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\", \\\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\", \\\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\", \\\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\", \\\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\", \\\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\", \\\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n ]\ncount=0\n\ndef fang_com(page_url,page): ##列表页\n print('page is ',i)\n header={}\n\n header['User-Agent']=random.choice(user_agent_list)\n header.update({\n \"Host\":\"esf.sz.fang.com\",\n #\"Cookie\":\"global_cookie=fb1g6d0w64d2cmu86sv4g9n3va0j137sk48; vh_newhouse=3_1491312022_2816%5B%3A%7C%40%7C%3A%5D833300ee3177d88529c7aa418942ece9; newhouse_user_guid=2F163DE7-8201-7FA9-2FB6-E507FE6F03B1; SoufunSessionID_Esf=3_1495389730_232; sf_source=; s=; showAdsh=1; hlist_xfadhq_SZ=0%7c2017%2f5%2f25+1%3a21%3a47%7c; city=sz; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; logGuid=a768dd46-b85b-47f4-a7a0-0a6596cab4cd; __utma=147393320.1111837171.1491290389.1495646208.1495650134.9; __utmb=147393320.12.10.1495650134; __utmc=147393320; __utmz=147393320.1495650134.9.4.utmcsr=esf.sz.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; unique_cookie=U_cqyov4ut5vv1al8e2858qhzgt17j2z06mph*14\"\n })\n while(1): ###这个主要是,fang.com会随机返回几个10054或者10053,如果连页面都没读取到,提取就是后话了,这网站没有封杀,即使使用单ip只会很少时候随机来几个10054 ,('Connection aborted.', error(10054, ''))\n text=''\n try:\n text=requests.get(page_url,headers=header,timeout=10).text\n #print text\n except Exception as e:\n print(e)\n if text!='':\n break\n\n se = etree.HTML(text) ###########为了利于大家学习,这段演示xpath提取信息\n all_dl=se.xpath('//dl[@class=\"list rel\"]')\n print(len(all_dl))\n for dl in all_dl:\n title=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"title\"]/a/text()')[0]\n url=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"title\"]/a/@href')[0]\n url='http://esf.sz.fang.com'+url\n\n info_list=dl.xpath('.//dd[@class=\"info rel floatr\"]/p[@class=\"mt12\"]/text()')\n #print json.dumps(info,ensure_ascii=False) #py2显示汉字,py3可以直接print mt12\n info=''\n for l in info_list:\n l2= re.findall('\\S*',l)[0] ###消除空白和换行\n #print m_str\n info+=l2+'|'\n\n time.sleep(1)\n # total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years,discription=get_detail(url)\n\n\n lock.acquire() ###这里叫锁,一是保证count计数准确,而是不会导致多个线程乱print,导致看不清楚。加锁的目的是别的线程不能运行这段代码了。但我之前看到有的人乱加锁,把消耗时间很长的代码加锁,那样导致多线程就基本个废物\n global count\n count+=1\n print(time.strftime('%H:%M:%S', time.localtime(time.time())), ' ', count)\n print('列表页:')\n print(' title: %s\\n url: %s\\n info: %s\\n' % (title, url, info))\n\n print('详情页:')\n # print(\n # ' total_price: %s\\n price_squere: %s\\n huxin: %s\\n cankao_shoufu: %s\\n shiyong_mianji: %s\\n jianzhu_mianji: %s\\n years: %s \\n' % (\n # total_price, price_squere, huxin, cankao_shoufu, shiyong_mianji, jianzhu_mianji, years))\n print('**************************************************************')\n lock.release()\n\n\n\ndef get_detail(url): ###详情页\n\n header={'User-Agent':random.choice(user_agent_list)}\n header.update({\"Host\":\"esf.sz.fang.com\"})\n\n while(1):\n content=''\n try:\n content=requests.get(url,headers=header,timeout=10).content\n except Exception as e:\n print(e)\n pass\n if content!='':\n break\n\n content=content.decode('gbk').encode('utf8') ##查看网页源代码可看到是gbk编码,直接print的话,如果你在pycharm设置控制台是utf8编码,那么控制台的中文则会乱码,cmd是gbk的恰好可以显示。如果你在pycharm设置控制台是utf8编码,需要这样做\n #print content\n\n inforTxt=getlist0(re.findall('(<div class=\"inforTxt\">[\\s\\S]*?)<ul class=\"tool\">',content)) ###########为了利于大家学习,这段演示正则表达式提取信息,某些信息可能在有的房子界面没有,要做好判断\n #print inforTxt\n\n total_price=getlist0(re.findall('</span>价:<span class=\"red20b\">(.*?)</span>',inforTxt))\n\n price_squere=getlist0(re.findall('class=\"black\">万</span>\\((\\d+?)元[\\s\\S]*?<a id=\"agantesfxq_B02_03\"',inforTxt))\n huxin=getlist0(re.findall('<dd class=\"gray6\"><span class=\"gray6\">户<span class=\"padl27\"></span>型:</span>(.*?)</dd>',inforTxt))\n cankao_shoufu=getlist0(re.findall('参考首付:</span><span class=\"black floatl\">(.*?万)</span> </dd>',inforTxt))\n shiyong_mianji=getlist0(re.findall('>使用面积:<span class=\"black \">(.*?)</span></dd>',inforTxt))\n shiyong_mianji=getlist0(re.findall('\\d+',shiyong_mianji))\n jianzhu_mianji=getlist0(re.findall('建筑面积:<span class=\"black \">(.*?)</span></dd>',inforTxt))\n jianzhu_mianji=getlist0(re.findall('\\d+',jianzhu_mianji))\n years=getlist0(re.findall('<span class=\"gray6\">年<span class=\"padl27\"></span>代:</span>(.*?)</dd>',inforTxt))\n\n discription=getlist0(re.findall('style=\"-moz-user-select: none;\">([\\s\\S]*?)<div class=\"leftBox\"',content))\n #print discription\n #print total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years\n\n return total_price,price_squere,huxin,cankao_shoufu,shiyong_mianji,jianzhu_mianji,years,discription\n\n\n\n\n#get_detail('http://esf.sz.fang.com/chushou/3_193928457.htm')\ndef getlist0(list):\n if list:\n return list[0]\n else:\n return '空'\n\nif __name__=='__main__':\n ''' ##这个是单线程,单线程爬很慢,3000个房子信息,一个5秒,那也得15000秒了,很耽误时间\n for i in range(1,101):\n page_url='http://esf.sz.fang.com/house/i3%s'%i\n fang_com(page_url)\n '''\n threads=[] ###这个是演示多线程爬取\n for i in range(1,101): #开了100线程,这样开100线程去爬100页面的详情页面,因为fang.com只能看100页\n t=threading.Thread(target=fang_com,args=('http://esf.sz.fang.com/house/i3%s'%i,i)) ###这样做没问题,但如果你是爬取1000页面,也这样做就不合适了,python开多了线程会导致线程创建失败,100线程已经很快了,网速是瓶颈了这时候,我开100线程时候网速是800KB左右的网速,我宽带才4M,运营商还算比较良心了,4M宽带400k\n\n threads.append(t)\n\n t.start()\n\n for t in threads:\n t.join()\n\n print('over')\n" } ]
44
samueltenka/LearnToHack-Compiler
https://github.com/samueltenka/LearnToHack-Compiler
3eea440019562e5b0ccdd6621f3ae0376d7bf84d
301dacbe2d0da5e55c3fc9c6bda07351a1ea6c8e
4e45f7c0664a5dfae76112a6272443853c6d89af
refs/heads/master
2020-12-31T04:28:52.911099
2016-01-10T03:02:33
2016-01-10T03:02:33
46,603,141
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5817132592201233, "alphanum_fraction": 0.5941676497459412, "avg_line_length": 34.021278381347656, "blob_id": "5dc60cae03c2a86726100eb63d6a901849a23bbe", "content_id": "838929dc2ca106258c48645f7ebdedc6a035b789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3292, "license_type": "no_license", "max_line_length": 82, "num_lines": 94, "path": "/Parser.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "import EnsureVersion3\n\ndef is_number(string):\n return string and (string[0] in '0123456789.')\ndef is_identifier(string):\n return string and (string[0] in 'abcdefghijklmnopqrstuvwxyz')\n\nclass Parser:\n def __init__(self, program_text):\n self.tokenized = program_text.split()\n self.index = 0\n self.variable_addresses = {'input':0, 'output':1}\n self.number_addresses = dict([]); self.numbers = []\n self.next_free_address = 3\n self.machine_code = []\n def peek(self):\n return self.tokenized[self.index]\n def match(self, token):\n assert(self.peek() == token)\n self.index += 1\n def at_end(self):\n return self.index >= len(self.tokenized)\n def gen_code(self,instr,a,r):\n self.machine_code.append(instr+' '+str(a)+' '+str(r))\n def use_next_free_address(self):\n nfa = self.next_free_address\n self.next_free_address += 1\n return nfa\n\n def write_constants_table(self):\n l = len(self.machine_code)\n for l in self.machine_code:\n i, n, r = l.split(' ')\n if i=='loadconst':\n l[:] = 'load %s %s' % (int(self.number_addresses[n]) + l, r)\n for n in self.numbers:\n self.machine_code.append(n)\n def match_number(self):\n num=float(self.peek())\n if num not in self.number_addresses:\n self.number_addresses[num] = self.use_next_free_address()\n self.numbers.append(num)\n self.gen_code('loadconst',num,0)\n self.match(self.peek())\n def match_variable(self):\n var=self.peek()\n if var not in self.variable_addresses:\n self.variable_addresses[var] = self.use_next_free_address()\n self.next_free_address += 1\n self.gen_code('load',self.variable_addresses[var],0)\n self.match(self.peek())\n def match_factor(self):\n if is_number(self.peek()): self.match_number()\n elif is_identifier(self.peek()): self.match_identifier()\n else:\n temp1 = self.use_next_free_address()\n temp2 = self.use_next_free_address()\n self.gen_code('store',temp1,1)\n self.gen_code('store',temp2,2)\n self.match('(')\n self.match_expression()\n self.match(')')\n self.gen_code('load',temp1,1)\n self.gen_code('load',temp2,2)\n def match_term(self):\n self.match_factor()\n while not self.at_end() and self.peek() in ['*']:\n self.match('*')\n self.gen_code('swap',0,1)\n self.match_factor()\n self.gen_code('multiply',0,1)\n def match_expression(self):\n self.match_term()\n while not self.at_end() and self.peek() in ['+']:\n self.match('+')\n self.gen_code('swap',0,2)\n self.match_term()\n self.gen_code('add',0,2)\n def match_statement(self):\n pass\n def match_assignment(self):\n #self.match_variable() #generates unnecessary load statement\n var=self.peek(); assert(is_variable(var)); self.match(var)\n self.match('=')\n self.match_expression()\n self.gen_code('store',self.variable_addresses[var],0)\n #NOTE: notation easier to parse: expr->varname (assignment written backward)\n def match_if(self):\n self.match('if')\n self.match('(')\n self.match(')')\n self.match_statement()\n def match_while(self):\n pass\n" }, { "alpha_fraction": 0.5965909361839294, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 21, "blob_id": "d58151f96a1480199b32f524b7b9cb84646423ff", "content_id": "1fe93baac27ee0f3d64173ce37e02e42ceabf389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 41, "num_lines": 8, "path": "/ParserTest.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "from Parser import Parser\n\nprogram = '1 * ( 2 + 3 * 4 + 5 ) + 6 * 7'\nP = Parser(program)\nP.match_expression()\nP.write_constants_table()\nfor mc in P.machine_code:\n print(mc)\n" }, { "alpha_fraction": 0.5738466382026672, "alphanum_fraction": 0.5974112153053284, "avg_line_length": 39.716217041015625, "blob_id": "c761d43d31ed9ab2d008c16d2a898f62c46c3bf9", "content_id": "07afd1e5c097c69d860a43c888db0c5ab6fb1b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3013, "license_type": "no_license", "max_line_length": 91, "num_lines": 74, "path": "/Machine.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "#Michigan Hackers Presentation on Compilers\nimport EnsureVersion3\n\n'''\ninstructions:\nload\t A B\t R[B] <-- M[R[A]]\nstore\t A B\t R[B] --> M[R[A]]\ncopy\t\tA B\t R[B] <-- R[A]\nset\t\t A B\t B --> R[A]\nbranchif0\tA B\t PC <-- R[A] if R[B]==0\nbranchifneg\tA B\t PC <-- R[A] if R[B] < 0\njump\t\tA B\t PC <-- R[A] (so B is dummy var.)\nadd\t\t A B\t R[B] <-- R[B] + R[A]\nsub\t\t A B\t R[B] <-- R[B] - R[A]\nmul\t\t A B\t R[B] <-- R[B] * R[A]\ndiv\t\t A B\t R[B] <-- R[B] / R[A]\nmod\t\t A B\t R[B] <-- R[B] % R[A]\nNote: program might also contain literal numbers in addition to instructions.\nMachine halts when program counter reaches -1.\n\nMachine Specifics:\nEach memory address contains a float or program instruction.\nFloats are rounded to integers when interpreted as addresses.\nThe program counter starts at 4.\nThe first 4 memory addresses are IO devices:\n0 [Input, e.g. temperature sensor]\n1 [Input, e.g. joystick]\n2 [Output, e.g. LED]\n3 [Output, e.g. motor]\n4&beyond [Program then data]\n'''\n\nclass Machine:\n PRECISION = 0.0001\n def __init__(self, num_addresses, num_registers):\n self.memory = [0.0 for i in range(num_addresses)]\n self.registers = [0.0 for i in range(num_registers)]\n self.program_counter = None\n def load_program(self, lines, inputs=(0.0,0.0)):\n self.memory[:2] = inputs\n for i in range(len(lines)):\n self.memory[4+i] = lines[i] if ' ' in lines[i] else eval(lines[i])\n self.program_counter = 4\n def print_mem(self, l=8):\n print('memory', ' '.join(str(s).replace(' ','_') for s in self.memory[:l])+'. . .')\n print('registers', self.registers)\n def step(self):\n instr = self.memory[self.program_counter]\n print(\"instr \", self.program_counter, instr)\n command, arg0, arg1 = instr.split(' ')\n getattr(self,command)(eval(arg0),eval(arg1))\n self.program_counter += 1\n def at_end(self):\n return self.program_counter == -1\n\n def load(self, r, r_): self.registers[r_] = self.memory[int(self.registers[r])]\n def store(self, r, r_): self.memory[int(self.registers[r])] = self.registers[r_]\n def copy(self, r, r_): self.registers[r_] = self.registers[r]\n def set(self, r, f): self.registers[r] = f\n\n def branchif0(self, r, r_):\n if self.registers[r_]==0.0: self.jump(r)\n def branchifneg(self, r, r_):\n if self.registers[r_] < 0.0: self.jump(r)\n def jump(self, r, dummy):\n #subtract 1 to counter end-of-cycle PC increment:\n self.program_counter = int(self.registers[r])-1\n\n def add (self, r, r_): self.registers[r_] += self.registers[r]\n def sub (self, r, r_): self.registers[r_] -= self.registers[r]\n def mul (self, r, r_): self.registers[r_] *= self.registers[r]\n def div (self, r, r_): self.registers[r_] /= self.registers[r]\n def mod (self, r, r_): self.registers[r_] = self.registers[r_] % self.registers[r]\n '''Beware of floating point modulo: 0.0 != 3.50 % 0.10 == 0.09999999999999992 != 0.10'''\n" }, { "alpha_fraction": 0.6748878955841064, "alphanum_fraction": 0.6860986351966858, "avg_line_length": 30.85714340209961, "blob_id": "6bef20996c03ef619c8c011d09c4573b93bbb201", "content_id": "d3e8d38dac5e1705c621f6eb8d5101cd5261b635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 87, "num_lines": 14, "path": "/MachineTest.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "import Machine\n\ndef readfile(filename):\n with open(filename,'r') as f:\n return f.read()\n\nlines = readfile('MachineCode01.test').strip().split('\\n')\nlines = [l.split('#')[0].strip() for l in lines] #remove line-comments such as this one\nprint(lines)\nnum_registers, num_addresses = lines[0].split()\nM = Machine.Machine(eval(num_registers), eval(num_addresses), debug=False)\nM.print_mem()\nM.load_program(lines[2:], float(input()))\nM.run()\n" }, { "alpha_fraction": 0.5475285053253174, "alphanum_fraction": 0.5779467821121216, "avg_line_length": 42.83333206176758, "blob_id": "e3958a2552af2d5dee45b4e199ab4518935b0320", "content_id": "1b532d87a13fb0a52ea2e116eaf15bbeaf452dd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 65, "num_lines": 6, "path": "/PrettyPrint.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "def pretty_print(string, minlen=10):\n if type(string) in [type(0), type(0.0)] or ' ' not in string:\n string = str(round(float(string),4))\n else:\n string = string[:3] + ';'.join(string.split(' ')[1:])\n return string+' '*(minlen-len(string))\n" }, { "alpha_fraction": 0.6497461795806885, "alphanum_fraction": 0.6565144062042236, "avg_line_length": 24.69565200805664, "blob_id": "3a550a270c80f3d4773035fee84b7843d100cbb7", "content_id": "ba500bc2cbaa3c1561e0ec7fa930ff62e297c3bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/NumberedTextboxTest.py", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "'''\nThanks to Robert@pytrash (see link below)\nhttp://tk.unpythonic.net/wiki/A_Text_Widget_with_Line_Numbers\n'''\n\nimport tkinter as tk\nimport NumberedTextbox\n\nroot = tk.Tk()\n\ndef demo(noOfLines):\n pane = tk.PanedWindow(root, orient=tk.HORIZONTAL, opaqueresize=True)\n ed = NumberedTextbox.EditorClass(root)\n pane.add(ed.frame)\n s = 'line %s'\n s = '\\n'.join( s%i for i in range(3, noOfLines+3) )\n ed.text.insert(tk.END, s)\n pane.pack(fill='both', expand=1)\n root.title(\"Example - Line Numbers For Text Widgets\")\n\nif __name__ == '__main__':\n demo(9)\n tk.mainloop()\n" }, { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 22, "blob_id": "3a2f83fc51cf2649be0a8bdbf1bbe71b2e7cbedd", "content_id": "3c3382a92ddbab51348cac4c32750d7e0d2665b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/README.md", "repo_name": "samueltenka/LearnToHack-Compiler", "src_encoding": "UTF-8", "text": "# LearnToHack-Compiler\n" } ]
7
alexseitsinger/django-rest-framework-expandable
https://github.com/alexseitsinger/django-rest-framework-expandable
1cf7c4737f33a0d050e8944dc9f1cfe34586ad2e
aacff4781387c19d319a778c569d36d57ab4b079
488635ce385c5235c5df4224d20a8be0f35a25f1
refs/heads/master
2022-05-04T04:45:48.314469
2019-12-15T00:05:32
2019-12-15T00:05:32
215,830,180
0
1
BSD-2-Clause
2019-10-17T15:49:41
2019-12-15T04:01:18
2022-04-22T22:34:18
Python
[ { "alpha_fraction": 0.8670212626457214, "alphanum_fraction": 0.8670212626457214, "avg_line_length": 30.33333396911621, "blob_id": "3bd0647f9aa87d6a787af50215cdae38cd5a740c", "content_id": "1c1a3b981a7050b70b52f14396c54b13361e444a", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "permissive", "max_line_length": 82, "num_lines": 12, "path": "/src/rest_framework_expandable/serializers.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "from rest_framework.serializers import ModelSerializer, HyperlinkedModelSerializer\nfrom .mixins.expandable_model_serializer import ExpandableModelSerializerMixin\n\n\nclass ExpandableHyperlinkedModelSerializer(\n ExpandableModelSerializerMixin, HyperlinkedModelSerializer\n):\n pass\n\n\nclass ExpandableModelSerializer(ExpandableModelSerializerMixin, ModelSerializer):\n pass\n" }, { "alpha_fraction": 0.556477963924408, "alphanum_fraction": 0.5625157356262207, "avg_line_length": 26.040817260742188, "blob_id": "a91062de4c9652cafeeceb6e9b08db8fa6c999fa", "content_id": "638e5f9f22c621e78f0b0076af9eb3833b77356f", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3975, "license_type": "permissive", "max_line_length": 88, "num_lines": 147, "path": "/src/rest_framework_expandable/utils.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "import re\nfrom django.db.models import Manager\nfrom django.db.models.query import QuerySet\n\n\ndef get_class_name(obj=None):\n # Get name of parent object.\n if obj is None:\n return \"Unnamed\"\n else:\n return obj.__class__.__name__\n\n\nclass HashableList(list):\n def __hash__(self):\n return id(self)\n\n\nclass HashableDict(dict):\n \"\"\"\n Hashable Dictionary\n\n Hashables should be immutable -- not enforcing this but TRUSTING you not to mutate a\n dict after its first use as a key.\n\n https://stackoverflow.com/questions/1151658/python-hashable-dicts\n \"\"\"\n\n def __hash__(self):\n vals = ()\n for v in self.values():\n try:\n hash(v)\n vals += (str(v),)\n except TypeError:\n if isinstance(v, list):\n for x in v:\n vals += (str(x),)\n else:\n vals += (str(v),)\n return hash((frozenset(self), frozenset(vals)))\n\n\ndef normalize_path(path):\n if path.startswith(\".\"):\n path = path[1:]\n if path.endswith(\".\"):\n path = path[:-1]\n return path\n\n\ndef get_path_parts(obj, path, base_name=None):\n pattern = re.compile(r\"(\\w+\\.\\w+)\")\n parts = [normalize_path(x) for x in pattern.split(path, 1) if len(x)]\n parts_final = []\n for part in parts:\n try:\n part_field = part.split(\".\")[1]\n except IndexError:\n part_field = part\n parts_final.append([part_field, part])\n\n ret = (parts_final[0][0], parts_final[0][1])\n if len(parts_final) > 1:\n ret += (parts_final[1][0], parts_final[1][1])\n else:\n ret += (\"\", \"\")\n return ret\n\n\ndef get_object(obj):\n if isinstance(obj, Manager):\n obj = obj.all()\n if isinstance(obj, QuerySet):\n obj = obj.first()\n return obj\n\n\nclass DictDiffer(object):\n \"\"\"\n Calculate the difference between two dictionaries as:\n (1) items added\n (2) items removed\n (3) keys same in both but changed values\n (4) keys same in both and unchanged values\n \"\"\"\n\n def __init__(self, current_dict, past_dict):\n self.current_dict, self.past_dict = current_dict, past_dict\n self.current_keys, self.past_keys = (\n set(current_dict.keys()),\n set(past_dict.keys()),\n )\n self.intersect = self.current_keys.intersection(self.past_keys)\n\n def added(self):\n \"\"\" Find keys that have been added \"\"\"\n return self.current_keys - self.intersect\n\n def removed(self):\n \"\"\" Find keys that have been removed \"\"\"\n return self.past_keys - self.intersect\n\n def changed(self):\n \"\"\" Find keys that have been changed \"\"\"\n return set(\n o for o in self.intersect if self.past_dict[o] != self.current_dict[o]\n )\n\n def unchanged(self):\n \"\"\" Find keys that are unchanged \"\"\"\n return set(\n o for o in self.intersect if self.past_dict[o] == self.current_dict[o]\n )\n\n def new_or_changed(self):\n \"\"\" Find keys that are new or changed \"\"\"\n # return set(k for k, v in self.current_dict.items()\n # if k not in self.past_keys or v != self.past_dict[k])\n return self.added().union(self.changed())\n\n\ndef remove_redundant_paths(paths):\n \"\"\"\n Returns a list of unique paths.\n \"\"\"\n results = []\n for path in paths:\n redundant = False\n paths_copy = paths[:]\n paths_copy.pop(paths.index(path))\n for p in paths_copy:\n if p.startswith(path) and len(p) > len(path):\n redundant = True\n if redundant is False:\n results.append(path)\n return results\n\n\ndef sort_field_paths(field_paths):\n \"\"\"\n Clean up a list of field paths by removing duplicates, etc.\n \"\"\"\n result = list(set(field_paths))\n result = remove_redundant_paths(result)\n result = [x for x in result if len(x)]\n return result\n" }, { "alpha_fraction": 0.5601385831832886, "alphanum_fraction": 0.5625120401382446, "avg_line_length": 35.25348663330078, "blob_id": "d6fd0414adba0594e4e089861a1f8784206a5876", "content_id": "edef52e16ee682d6afae91c8bda58d432dfe6acd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15589, "license_type": "permissive", "max_line_length": 90, "num_lines": 430, "path": "/src/rest_framework_expandable/mixins/expandable_related_field.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "from rest_framework.relations import ManyRelatedField\nfrom django.db.models import Manager\nfrom django.db.models.query import QuerySet\nfrom django.utils.module_loading import import_string\n\nfrom .expandable import ExpandableMixin\nfrom ..utils import (\n get_object,\n get_class_name,\n get_path_parts,\n DictDiffer,\n HashableList,\n HashableDict,\n)\n\n\n# TODO: Add an assertion for field names existing on the model.\n# TODO: Detect and fallback to default representation for circular references instead of\n# just removing the field completely on the parent.\n\n\nclass ExpandableRelatedFieldMixin(ExpandableMixin):\n settings_attr = \"expand_settings\"\n initialized_attrs = [\"allowed\", \"ignored\"]\n comparison_field_name = \"uuid\"\n\n def __init__(self, *args, **kwargs):\n # When we set read_only on the related field instance, the queryset attribute\n # will raise an exception. So, to avoid this, reset the queryset attribute to\n # None to allow these instances to be read_only when specified.\n read_only = kwargs.get(\"read_only\", False)\n if read_only is True:\n setattr(self, \"queryset\", None)\n\n for name in self.initialized_attrs:\n kwarg = kwargs.pop(name, None)\n if kwarg is not None:\n setattr(self, name, kwarg)\n\n super().__init__(*args, **kwargs)\n\n @property\n def settings(self):\n \"\"\"\n Returns the settings used for this related field instance.\n \"\"\"\n return getattr(self, self.settings_attr, {})\n\n @property\n def ignored_paths(self):\n \"\"\"\n Returns a list of field paths to ignore when generating the representation of\n this field instance.\n \"\"\"\n ignored_paths = []\n ignored = getattr(self, \"ignored\", None)\n\n if ignored is not None:\n for path in ignored:\n ignored_paths.append(self.get_field_path(path))\n\n return ignored_paths\n\n def is_ignored(self, path):\n \"\"\"\n Returns True/False if the specified path is one of the ignored field paths. Used\n by to_representation_for_field to determine if the field is the one to expand.\n \"\"\"\n if path in self.ignored_paths:\n return True\n\n return False\n\n def to_non_circular_path(self, path):\n if self.is_circular(path):\n try:\n prefix, field_name = path.rsplit(\".\", 1)\n return prefix\n except ValueError:\n return path\n return path\n\n def is_circular(self, path):\n try:\n prefix, field_name = path.rsplit(\".\", 1)\n except ValueError:\n field_name = path\n\n if field_name in self.circular_field_names:\n return True\n return False\n\n @property\n def circular_field_names(self):\n circular_field_names = []\n\n # Remove circular references to the parent model.\n parent_model_name = self.model_serializer.get_model_name()\n parent_set_name = \"{}_set\".format(parent_model_name)\n parent_names = (parent_model_name, parent_set_name)\n for parent_name in parent_names:\n circular_field_names.append(parent_name)\n\n return circular_field_names\n\n def get_skipped_fields(self, skipped=None):\n \"\"\"\n Returns a list of field paths (ignored and skipped) to pass to the serializer\n class so it doensn't return them in the representation.\n \"\"\"\n skipped_fields = self.ignored_paths\n\n for field_name in self.circular_field_names:\n skipped_fields.append(field_name)\n\n if skipped is not None:\n skipped_fields.extend(skipped)\n\n return list(set(skipped_fields))\n\n @property\n def allowed_paths(self):\n \"\"\"\n Returns a list of field paths that are permitted to be expanded from this\n expandable class instance.\n \"\"\"\n allowed = getattr(self, \"allowed\", [])\n allowed_paths = [self.get_field_path(x) for x in allowed]\n return allowed_paths\n\n def is_allowed(self, path):\n \"\"\"\n Returns True/False if the specified path is one of the allowed field paths. Used\n by to_representation_for_field to determine if the field is to be expanded.\n \"\"\"\n if path.startswith(self.allowed_prefix):\n return True\n if path in self.allowed_paths:\n return True\n return False\n\n def assert_is_allowed(self, path):\n \"\"\"\n Raises an AssertionError if the field path specified is not in the list of\n allowed field paths.\n \"\"\"\n model_serializer_name = get_class_name(self.model_serializer)\n model_serializer_field_name = self.model_serializer_field_name\n related_field_class_name = get_class_name(self)\n if self.is_allowed(path) is False:\n\n path = \".\".join(path.split(\".\")[1:])\n\n raise AssertionError(\n \"The path '{}' is not listed as an allowed field path on {}'s {} \"\n \"field. Please add the path to 'allowed' kwarg on {}'s '{}' field \"\n \"to allow its expansion.\".format(\n path,\n model_serializer_name,\n model_serializer_field_name,\n model_serializer_name,\n model_serializer_field_name,\n )\n )\n\n def assert_is_specified(self, path):\n \"\"\"\n Raises an AssertionError if the field path specified is not in the list of\n entries in the 'expands' attribute on the related field class instance.\n \"\"\"\n if self.is_specified(path) is False:\n # if field_path.startswith(self.model_name):\n # field_path.replace(\"{}.\".format(self.model_name), \"\")\n msg = []\n indent = \"\\n\"\n for d in self.settings.get(\"serializers\", []):\n msg.append(\n \"{}{}{}\".format(d[\"serializer\"], indent, indent.join(d[\"paths\"]))\n )\n\n raise AssertionError(\n \"The field path '{field_path}' is not specified in '{attr_name}' on \"\n \"{related_field_class_name}.\\n\\nCurrently Specified:\\n{specified}\".format(\n field_path=path,\n attr_name=self.settings_attr,\n related_field_class_name=get_class_name(self),\n specified=\"\\n\".join(msg),\n )\n )\n\n def is_specified(self, path):\n \"\"\"\n Returns True/False if the specified path is in any of the listed paths on the\n class isntance's 'expands' attribute.\n \"\"\"\n for d in self.settings.get(\"serializers\", []):\n if path in d.get(\"paths\", []):\n return True\n return False\n\n def is_matching(self, requested_path):\n \"\"\"\n Returns True/False if the requested path starts with the current\n 'model_serializer_field_name'.\n \"\"\"\n base_path = self.get_field_path(self.model_serializer_field_name)\n if requested_path == base_path:\n return True\n\n prefix = \"{}.\".format(base_path)\n if requested_path.startswith(prefix):\n return True\n\n return False\n\n def to_default_representation(self, obj):\n \"\"\"\n Returns the default representation of the object.\n \"\"\"\n return super().to_representation(obj)\n\n def expand_object(self, obj, path):\n \"\"\"\n Method for expanding a model instance object. If a target field name is\n specified, the serializer will use that nested object to generate a\n representation.\n \"\"\"\n # If the field exists, but its an empty object (no entry saved), obj will be\n # None. So, if we get None as obj, return None instead of trying to serializer\n # its representation.\n if obj is None:\n return None\n\n serializer = self.get_serializer(obj, path)\n representation = serializer.to_representation(obj)\n\n return representation\n\n def get_alias(self, prefix_field, prefix_path, suffix_field, suffix_path):\n for d in self.settings.get(\"aliases\", []):\n if prefix_path in d.get(\"paths\", []):\n alias = d.get(\"alias\", {})\n prefix_field = alias.get(\"prefix_field\", prefix_field)\n prefix_path = alias.get(\"prefix_path\", prefix_path)\n suffix_field = alias.get(\"suffix_field\", suffix_field)\n suffix_path = alias.get(\"suffix_path\", suffix_path)\n return (prefix_field, prefix_path, suffix_field, suffix_path)\n\n def expand(self, obj, prefix_field, prefix_path, suffix_field, suffix_path):\n if isinstance(obj, Manager):\n obj = obj.all()\n\n target = obj\n target_name = get_class_name(get_object(target)).lower()\n names = (target_name, \"{}_set\".format(target_name))\n\n if len(prefix_field) and prefix_field not in names:\n target = getattr(target, prefix_field, target)\n\n expanded = self.expand_object(target, prefix_path)\n\n if len(suffix_field):\n # If our prefix path is a manytomanyfield, then use the first string in the\n # suffix path as the field name.\n if prefix_path.endswith(\"_set\"):\n try:\n suffix_field, _ = suffix_path.split(\".\", 1)\n except ValueError:\n suffix_field = suffix_path\n expanded[suffix_field] = self.get_expanded(target, suffix_path)\n\n return expanded\n\n def get_expanded(self, obj, path):\n \"\"\"\n Fascade method for expanding objects or querysets into expanded (nested)\n representations.\n \"\"\"\n prefix_field, prefix_path, suffix_field, suffix_path = get_path_parts(obj, path)\n prefix_field, prefix_path, suffix_field, suffix_path = self.get_alias(\n prefix_field, prefix_path, suffix_field, suffix_path\n )\n if isinstance(obj, QuerySet):\n return [self.get_expanded(o, path) for o in obj]\n\n return self.expand(obj, prefix_field, prefix_path, suffix_field, suffix_path)\n\n def has_comparison_field(self, d1, d2):\n \"\"\"\n Returns True/False if both 'd1' and 'd2' have the 'comparison_field' key,\n regardless of their respective values.\n \"\"\"\n result = False\n for name in self.settings.get(\"comparison_fields\", []):\n if result is True:\n break\n result = all([name in x for x in [d1, d2]])\n return result\n\n def compare_objects(self, d1, d2):\n for name in self.settings.get(\"comparison_fields\", []):\n if all([name in x for x in [d1, d2]]):\n return d1[name] == d2[name]\n return False\n\n def get_changed_field_names(self, d1, d2):\n return DictDiffer(d1, d2).changed()\n\n def get_target_field_names(self, paths):\n result = []\n for path in paths:\n bits = path.split(\".\")\n field_name = bits[-1]\n try:\n i = bits.index(field_name)\n if bits[i - 2].endswith(\"_set\"):\n field_name = bits[i - 1]\n except IndexError:\n pass\n result.append(field_name)\n return result\n\n def to_expanded_representation(self, obj, paths):\n \"\"\"\n Entry method for converting an model object instance into a representation by\n expanding the paths specified (if they are allowed and specified).\n \"\"\"\n if isinstance(obj, Manager):\n obj = obj.all()\n\n expanded = None\n target_fields = self.get_target_field_names(paths)\n\n if len(paths) > 1:\n # expand multiple fields\n for path in paths:\n current = self.get_expanded(obj, path)\n\n if expanded is None:\n expanded = current\n elif isinstance(expanded, list):\n for d1 in expanded:\n for d2 in current:\n if self.has_comparison_field(d1, d2):\n if self.compare_objects(d1, d2):\n changed_fields = self.get_changed_field_names(\n d1, d2\n )\n for field_name in changed_fields:\n # The dict with the updated (from a url) will\n # have a smaller length.\n if len(d2[field_name]) < len(d1[field_name]):\n d1[field_name] = d2[field_name]\n else:\n # expand single field\n expanded = self.get_expanded(obj, paths[0])\n\n if isinstance(expanded, list):\n return HashableList(expanded)\n return HashableDict(expanded)\n\n def get_serializer_context(self):\n return self.context\n\n def get_serializer(self, source, path=None, context=None):\n \"\"\"\n Finds and returns the serializer class instance to use. Either imports the class\n specified in the entry on the 'expands' attribute of the ExpandableRelatedField\n instance, or re-uses the serializer class that was already imported and saved to\n the settings previously.\n \"\"\"\n serializer_class = None\n\n if context is None:\n context = self.context\n\n ret = {\"skipped_fields\": [], \"many\": False, \"context\": context}\n\n if isinstance(source, Manager):\n source = source.all()\n\n if isinstance(source, (ManyRelatedField, QuerySet)):\n ret[\"many\"] = True\n\n for d in self.settings.get(\"serializers\", []):\n if path in d.get(\"paths\", []):\n serializer_class = self.get_serializer_class(d[\"serializer\"])\n ret[\"skipped_fields\"] = self.get_skipped_fields(d.get(\"skipped\", []))\n ret[\"many\"] = d.get(\"many\", ret[\"many\"])\n\n if not isinstance(source, QuerySet):\n ret[\"many\"] = False\n # if ret[\"many\"] is True:\n # if not isinstance(source, (QuerySet)):\n # source = QuerySet(source)\n\n if serializer_class is None:\n raise RuntimeError(\n \"There is no specification for '{path}' in {class_name}.\\n\\n\"\n \"Add a dictionary to the 'expandable' list with:\\n\"\n \" 'paths': ['{path}']\".format(\n path=path, class_name=get_class_name(self)\n )\n )\n\n # print(\"---------- get_serializer_class -----------\")\n # print(\"path: \", path)\n # print(\"serializer_class: \", serializer_class.__name__)\n return serializer_class(**ret)\n\n def get_serializer_class(self, serializer_path):\n \"\"\"\n Returns the serializer class to use for serializing the object instances.\n \"\"\"\n target = None\n\n for d in self.settings.get(\"serializers\", []):\n if serializer_path == d.get(\"serializer\", \"\"):\n target = d\n\n if target is None:\n raise AttributeError(\n \"Failed to find an entry for serializer '{}'.\".format(serializer_path)\n )\n\n klass = target.get(\"serializer_class\", None)\n if klass is None:\n klass = target[\"serializer_class\"] = import_string(serializer_path)\n\n return klass\n" }, { "alpha_fraction": 0.5984615087509155, "alphanum_fraction": 0.5984615087509155, "avg_line_length": 30.325302124023438, "blob_id": "d68e28e4237b4629d81195011b4f487aa20f08c0", "content_id": "e0408a2b54ce4b284875222427f4cd9f1f3d3da5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2600, "license_type": "permissive", "max_line_length": 88, "num_lines": 83, "path": "/src/rest_framework_expandable/mixins/expandable.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "from ..utils import sort_field_paths\n\n\nclass ExpandableMixin(object):\n model_name = None\n query_param = \"expand\"\n expanded_fields = None\n\n @property\n def request(self):\n \"\"\"\n Returns the current request context passed from DRF.\n \"\"\"\n context = getattr(self, \"context\", None)\n if context is None:\n raise AttributeError(\"Context not found.\")\n\n request = context.get(\"request\", None)\n if request is None:\n raise AttributeError(\"Request not found in context.\")\n\n return request\n\n @property\n def all_query_params(self):\n return getattr(self.request, \"query_params\", getattr(self.request, \"GET\", {}))\n\n @property\n def params(self):\n \"\"\"\n Returns a list of unique relative field paths that should be used for expanding.\n \"\"\"\n field_paths = []\n\n target_param = getattr(self, \"query_param\", None)\n if target_param is not None:\n values = self.all_query_params.get(target_param, \"\").split(\",\")\n for param in values:\n field_paths.append(param)\n\n return sort_field_paths(field_paths)\n\n def get_model_name(self):\n \"\"\"\n Returns the model name from the ModelSerializer Meta class model specified, or\n from the previously saved model name on the class.\n \"\"\"\n model_name = getattr(self, \"model_name\", None)\n\n if model_name is None:\n model = self.Meta.model\n model_name = model.__name__.lower()\n self.model_name = model_name\n\n return model_name\n\n def get_field_path(self, path):\n \"\"\"\n Returns a list of possible field paths that are prefixed with the current\n serializers model name, plus one suffixed with _set for django's default\n reverse relationship names.\n \"\"\"\n model_name = self.get_model_name()\n prefix = \"{}.\".format(model_name)\n if not path.startswith(prefix):\n return \"{}{}\".format(prefix, path)\n return path\n\n @property\n def requested_fields(self):\n \"\"\"\n Returns a list of field paths to expand.\n Can be specified via class instance or via query params.\n \"\"\"\n requested_fields = self.params\n\n # Add our target fields that we specified on the class.\n if isinstance(self.expanded_fields, list):\n for field_path in self.expanded_fields:\n requested_fields.append(field_path)\n\n requested_fields = sort_field_paths(requested_fields)\n return requested_fields\n" }, { "alpha_fraction": 0.6968048810958862, "alphanum_fraction": 0.704282820224762, "avg_line_length": 21.630769729614258, "blob_id": "c21107e4e49c11124f4b831d4975123c8eafd024", "content_id": "ba85ce52cb5228247386bcb62fa159a715aa3357", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1471, "license_type": "permissive", "max_line_length": 76, "num_lines": 65, "path": "/README.md", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "# Django Rest Framework Expandable\n\n## Description\n\nExpandable serializers for Django REST Framework. Allow for selective object\nexpansion through query parameters or serializer class kwargs.\n\n## Installation\n\n```\npip install django-rest-framework-expandable\n```\n\n## Usage\n\n```python\n# apps/users/api.serializers.py\n# (User serializer)\nclass UserSerializer(ExpandableHyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = (\"username\", \"id\", ...)\n\n# apps/users/api/fields.py\n# (User expandable serializer field)\nclass UserRelatedField(ExpandableHyperlinkedRelatedField):\n queryset = User.objects.all()\n lookup_field = \"id\"\n view_name = \"api:user-detail\"\n expand_settings = {\n \"comparison_fields\": [\"id\"],\n \"serializers\": [\n {\n \"paths\": [\"example.user\"],\n \"serializer\": \"apps.users.api.serializers.UserSerializer\",\n \"skipped\": [],\n }\n ]\n }\n\n# apps/example/api/serializers.py\n# Example serializer (using nested expandable serializer fields)\nfrom apps.users.api.fields import UserRelatedField\nfrom apps.another.api.fields import AnotherRelatedField\n\nclass ExampleSerializer(ExpandableHyperlinkedModelSerializer):\n another_related_field = AnotherRelatedField()\n user = UserRelatedField()\n ...\n```\n\nReturns...\n\n```\nGET http://localhost:8000/api/examples/?expand=example.user\n{\n id: 1,\n another_related_field: \"http://localhost:8000/api/another/1\",\n user: {\n id: 1,\n username: \"Alex\",\n ...\n }\n}\n```\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 26.923076629638672, "blob_id": "1d17affc78aeb22f17a1076b522c13fd7c4083b0", "content_id": "abee381392056880b28cca41a369c15e7f91ca08", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "permissive", "max_line_length": 80, "num_lines": 13, "path": "/src/rest_framework_expandable/fields.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "from rest_framework.serializers import SlugRelatedField, HyperlinkedRelatedField\nfrom .mixins.expandable_related_field import ExpandableRelatedFieldMixin\n\n\nclass ExpandableHyperlinkedRelatedField(\n ExpandableRelatedFieldMixin,\n HyperlinkedRelatedField,\n):\n pass\n\n\nclass ExpandableSlugRelatedField(ExpandableRelatedFieldMixin, SlugRelatedField):\n pass\n" }, { "alpha_fraction": 0.5850965976715088, "alphanum_fraction": 0.7424103021621704, "avg_line_length": 44.25, "blob_id": "c2c378c4fa93a1e85f2e165f1420c57c83cdf12f", "content_id": "423e99d7d459be91cae435884fd87fc549d10e2c", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2174, "license_type": "permissive", "max_line_length": 158, "num_lines": 48, "path": "/CHANGELOG.md", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "<a name=\"v0.5.0\"></a>\n## [v0.5.0](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.4.1...v0.5.0) (2019-12-15)\n\n### Bug Fixes\n- Allows for read_only fields. ([e681d1e](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/e681d1e1c6b126a6b979b509002606fda0a54e54))\n\n### Code Refactoring\n- Fixes query params. ([d086d4c](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/d086d4c5bc60a69340a0ceb2aba2488728bfeef8))\n\n\n<a name=\"v0.4.1\"></a>\n## [v0.4.1](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.4.0...v0.4.1) (2019-10-17)\n\n\n<a name=\"v0.4.0\"></a>\n## [v0.4.0](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.3.0...v0.4.0) (2019-10-17)\n\n\n<a name=\"v0.3.0\"></a>\n## [v0.3.0](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.2.0...v0.3.0) (2019-10-17)\n\n\n<a name=\"v0.2.0\"></a>\n## [v0.2.0](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.1.2...v0.2.0) (2019-10-17)\n\n### Code Refactoring\n- Renamed class property. ([05cb5a1](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/05cb5a16c09013a29c8097cfebaefd182a888626))\n\n\n<a name=\"v0.1.2\"></a>\n## [v0.1.2](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.1.1...v0.1.2) (2019-10-17)\n\n### Bug Fixes\n- Fixes import. ([b92de0c](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/b92de0c7dbe718acb084b7153a48365eca2f734f))\n\n\n<a name=\"v0.1.1\"></a>\n## [v0.1.1](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/v0.1.0...v0.1.1) (2019-10-17)\n\n### Features\n- Adds missing util. ([19f910a](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/19f910a0ab91019e400d035e5fb7abe58ab967dc))\n\n\n<a name=\"v0.1.0\"></a>\n## [v0.1.0](https://github.com/alexseitsinger/django-rest-framework-expandable/compare/cdaeffc293fd0bf333d7048e2cb587d977b90a05...v0.1.0) (2019-10-17)\n\n### Features\n- Initial commit, master. ([cdaeffc](https://github.com/alexseitsinger/django-rest-framework-expandable/commit/cdaeffc293fd0bf333d7048e2cb587d977b90a05))\n\n\n" }, { "alpha_fraction": 0.6382263898849487, "alphanum_fraction": 0.6382263898849487, "avg_line_length": 34.867469787597656, "blob_id": "980253617f3d915c1cbacfae9ecbf36a8adecb3f", "content_id": "bfb265e86dd36fe59a57e07214facf7510ee3cf3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2977, "license_type": "permissive", "max_line_length": 87, "num_lines": 83, "path": "/src/rest_framework_expandable/mixins/expandable_model_serializer.py", "repo_name": "alexseitsinger/django-rest-framework-expandable", "src_encoding": "UTF-8", "text": "from django.db.models import Manager\nfrom rest_framework.relations import ManyRelatedField\n\nfrom .expandable import ExpandableMixin\nfrom .expandable_related_field import ExpandableRelatedFieldMixin\nfrom rest_framework_helpers.mixins import RepresentationMixin\n\n\nclass ExpandableModelSerializerMixin(RepresentationMixin, ExpandableMixin):\n def __init__(self, *args, **kwargs):\n self.expanded_fields = kwargs.pop(\"expanded_fields\", None)\n super().__init__(*args, **kwargs)\n self.initialize_expandable_fields()\n\n def initialize_expandable_fields(self):\n model_name = self.get_model_name()\n\n for field_name, field in self.expandable_fields:\n field.model_name = model_name\n field.model_serializer = self\n field.model_serializer_field_name = field_name\n field.allowed_prefix = \"{}.{}.\".format(model_name, field_name)\n field.allowed = list(set([field_name] + getattr(field, \"allowed\", [])))\n\n @property\n def expandable_fields(self):\n \"\"\"\n Returns a list of all the fields that subclass ExpandableRelatedFieldMixin\n \"\"\"\n fields = []\n\n for field_name, field in self.fields.items():\n target = (\n field.child_relation if isinstance(field, ManyRelatedField) else field\n )\n\n if isinstance(target, ExpandableRelatedFieldMixin):\n fields.append([field_name, target])\n\n return fields\n\n def is_expandable(self, field):\n \"\"\"\n Returns True if the field is a subclass of the ExpandableRelatedFieldMixin\n \"\"\"\n target = field.child_relation if isinstance(field, ManyRelatedField) else field\n\n for field_name, field in self.expandable_fields:\n if field == target:\n return True\n\n return False\n\n def get_matched_paths(self, expandable_field):\n matched = []\n\n for requested_path in self.requested_fields:\n if expandable_field.is_matching(requested_path):\n expandable_field.assert_is_allowed(requested_path)\n expandable_field.assert_is_specified(requested_path)\n matched.append(requested_path)\n\n return matched\n\n def to_representation_for_field(self, field, obj):\n \"\"\"\n A function to customize what each field representation produces. Can be\n overwritten in sublclasses to add custom behavoir on a per-field basis.\n\n By default, if the field is an expandable field, it will check if it should be\n expanded, and do so if checks pass.\n \"\"\"\n if isinstance(obj, Manager):\n obj = obj.all()\n\n if self.is_expandable(field):\n target = getattr(field, \"child_relation\", field)\n\n matched = self.get_matched_paths(target)\n if len(matched):\n return target.to_expanded_representation(obj, matched)\n\n return field.to_representation(obj)\n" } ]
8
TruongKhang/topic_models
https://github.com/TruongKhang/topic_models
44505e89d3609c511aa2f29b349abf438c1796a2
ed912425532dad46792986b768aa8e37bd647b2a
d1bb25cf90a0499cfb351d787f304db537560840
refs/heads/master
2019-08-23T11:25:50.637917
2017-03-22T19:13:53
2017-03-22T19:13:53
67,353,561
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6532507538795471, "alphanum_fraction": 0.6656346917152405, "avg_line_length": 23.846153259277344, "blob_id": "99b5494de905cdebe776ee9a4a85048a31a1b015", "content_id": "71d7a738ef132f37d0ec62b280565aaf4f355167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 71, "num_lines": 13, "path": "/ap/read_line_specific.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "from time import time\ndef readline_number_x(file,x):\n for index,line in enumerate(iter(file)):\n if index+1 == x: return line\n\n return None\n\nf = open('grolier_train.txt')\nx = 1\nt0 = time()\nline_number_x = readline_number_x(f,x) #This will return the third line\nprint \"Time: %f\" %(time()-t0)\nprint line_number_x\n" }, { "alpha_fraction": 0.5002655386924744, "alphanum_fraction": 0.5087625980377197, "avg_line_length": 36.90604019165039, "blob_id": "3fbc3be3b46f12507dae158c3db1b72b069a20e1", "content_id": "b8dbc9dd62b23582084773c480f109b150558ef4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5649, "license_type": "no_license", "max_line_length": 184, "num_lines": 149, "path": "/run_batch.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 26 15:52:29 2015\n\n@author: dhbk\n\"\"\"\nimport sys, os, shutil\nimport numpy as np\nmypath = ['./lda/Batch_VB', './lda/OPE', './lda/Online-VB']\n\n# Name of current path directory which contains this file\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nfor temp in mypath:\n sys.path.insert(0, temp)\nfrom datasets import base\n#import run_Batch_VB\n#import run_OPE\n\ndef main():\n # Check input\n if len(sys.argv)!=10 and len(sys.argv)!=9:\n print(\"usage: python run_batch.py [method name] [data file] [setting file] [model folder] [test_folder] [method noise] [alpha] [only_doc] [[vocab:vocabulary file (optional)]]\")\n print(\"or: python run_batch.py [method name] [data file] [setting file] [model folder] [test_folder] [a] [method noise] [only_doc] [[beta:beta file (optional)]]\")\n exit()\n else:\n # Get environment variables\n method_name = sys.argv[1]\n data_file = sys.argv[2]\n setting_file = sys.argv[3]\n model_folder = sys.argv[4]\n test_folder = sys.argv[5]\n method_noise = sys.argv[6]\n alpha = sys.argv[7]\n only_doc = sys.argv[8]\n beta_file = ''\n vocab_file = ''\n if len(sys.argv) == 10:\n name = sys.argv[9].split('[')\n if name[0] == 'vocab':\n vocab_file = name[1]\n elif name[0] == 'beta':\n beta_file = name[1]\n\n tops = 10 # int(sys.argv[5])\n # Check format data: raw text or not\n data = base.Dataset(data_file)\n formatted_data = data.load_dataset()\n if len(sys.argv) == 9:\n if not data.is_raw_text:\n print(\"File %s is formatted file. Please add argument [vocabulary file] into command\" % data_file)\n exit()\n else:\n vocab_file = data.vocab_path\n elif len(sys.argv) == 10:\n if data.is_raw_text:\n if beta_file:\n print(\"Input is raw text. Library will preprocessing it.\\\n Vocaburaly can change. So, beta file can't be used in this case. Please remove this argument\")\n exit()\n vocab_file = data.vocab_path\n\n # Read settings\n print('reading setting ...')\n settings = base.read_setting(setting_file)\n num_docs = formatted_data.num_doc\n if beta_file:\n file = beta_file\n else:\n file = vocab_file\n if os.path.isfile(file):\n f = open(file, 'r')\n lines = f.readlines()\n if vocab_file:\n num_terms = len(lines)\n if beta_file:\n num_terms = len(lines[0].strip().split())\n shutil.copyfile(beta_file, dir_path+'/beta_copy.txt')\n beta_file = dir_path+'/beta_copy.txt'\n del lines\n f.close()\n settings['num_docs'] = num_docs\n settings['num_terms'] = num_terms\n else:\n print(\"Can't find vocabulary file!\")\n exit()\n\n # Create model folder if it doesn't exist\n if os.path.exists(model_folder):\n shutil.rmtree(model_folder)\n os.makedirs(model_folder)\n a = None\n if beta_file:\n namebeta = name[1]\n lname = namebeta.split('/')\n b = lname[len(lname)-2]\n if 'True' in b:\n value = b[4:]\n elif 'False' in b:\n value = b[5:]\n elif 'batchvb' in b:\n value = b[7:]\n settings['iter_train'] = int(value)\n if method_noise == 'add_ope' or method_noise == 'mult_ope':\n direct = namebeta[:-(len('beta_final.dat'))]\n filea = direct + '/a_' + value + '.txt'\n f = open(filea)\n\n print('Read a...')\n if only_doc == 'True':\n lines = f.readlines()\n a = np.zeros(len(lines), dtype=np.int32)\n for d in range(len(lines)):\n a[d] = int(lines[d].strip())\n else:\n lines = f.readlines()\n a = list()\n for d in range(len(lines)):\n words = lines[d].strip().split()\n w = np.zeros(len(words), dtype=np.int32)\n for n in range(len(words)):\n w[n] = int(words[n])\n a.append(w)\n f.close()\n # Read data for computing perplexities\n print('read data for computing perplexities ...')\n test_data = base.read_data_for_perpl(test_folder)\n # Check method and run algorithm\n alpha = float(alpha)\n if only_doc == 'True':\n only_doc = True\n else:\n only_doc = False\n\n if method_name == 'batch-vb':\n import run_Batch_VB\n runbatchvb = run_Batch_VB.runBatchVB(formatted_data, settings, model_folder, tops, test_data, beta_file)\n runbatchvb.run(method_noise,alpha,only_doc,a)\n elif method_name == 'batch-ope':\n import run_OPE\n runbatchope = run_OPE.runOPE(formatted_data, settings, model_folder, tops, test_data, beta_file)\n runbatchope.run(method_noise, alpha, only_doc, a)\n elif method_name == 'online-vb':\n import run_VB\n runbatchvb = run_VB.runOnlineVB(formatted_data, settings, model_folder, tops, test_data, beta_file)\n runbatchvb.run(method_noise,alpha,only_doc,a)\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.45909690856933594, "alphanum_fraction": 0.4643869400024414, "avg_line_length": 34.2933349609375, "blob_id": "7cf6a5f1cf23ce76398b34373a52de2f9aa67a99", "content_id": "17d3281195ef03b4e4eb1f3604f7d48cd16140fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5293, "license_type": "no_license", "max_line_length": 82, "num_lines": 150, "path": "/preprocessing/preprocessing.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "import os, os.path\nfrom os.path import isfile, join, isdir\nimport numpy as np\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem.porter import PorterStemmer\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\ntokenizer = RegexpTokenizer(r'\\w+')\np_stemmer = PorterStemmer()\n\nclass PreProcessing:\n def __init__(self, stemmed=False, remove_rare_word=True):\n self.list_doc = list()\n self.vocab = list()\n self.list_doc_freq = list()\n self.stemmed = stemmed\n self.remove_rare_word = remove_rare_word\n self.id_doc = 0\n self.df = list()\n\n def pro_per_doc(self, doc, stop_list):\n words = tokenizer.tokenize(doc.lower())\n list_word = list()\n for word in words:\n if word not in stop_list:\n if word.isalpha() and len(word) > 2:\n if self.stemmed:\n word = p_stemmer.stem(word)\n if word in self.vocab:\n index = self.vocab.index(word)\n if self.id_doc not in self.df[index]:\n self.df[index].append(self.id_doc)\n #list_word.append(word)\n else:\n self.vocab.append(word)\n self.df.append([self.id_doc])\n list_word.append(word)\n self.id_doc += 1\n return list_word\n\n def filter(self):\n if self.vocab:\n V = len(self.vocab)\n remove_list = []\n i = 0\n while i < V:\n #print(V)\n freq = len(self.df[i])\n #print(freq)\n if freq <= 5 or freq > int(0.5*self.id_doc):\n #remove_list.append(i)\n docs = self.df[i]\n word = self.vocab[i]\n for j in docs:\n while word in self.list_doc[j]:\n self.list_doc[j].remove(word)\n del self.vocab[i]\n del self.df[i]\n V = len(self.vocab)\n i = i - 1\n i += 1\n else:\n print(\"Vocabulary is empty! Please run process first!\")\n\n def process(self, path):\n name_file = path.split(\"\\\\\")\n name_file = name_file[-1].split(\"/\")\n name = name_file[-1].split(\".\")\n self.filename = name[0]\n #print(self.filename)\n fin = open(dir_path+\"/stop_word_list.txt\")\n stop_list = list()\n line = fin.readline()\n while line:\n line = line.strip()\n stop_list.append(line)\n line = fin.readline()\n fin.close()\n stop_list = stop_list + ['_',]\n print(\"Waiting...\")\n if isfile(path):\n fread = open(path)\n line = fread.readline()\n num = 1\n while line:\n line = line.strip()\n if line == \"<TEXT>\":\n doc = fread.readline().strip()\n #print(num)\n list_word = self.pro_per_doc(doc,stop_list)\n self.list_doc.append(list_word)\n num += 1\n line = fread.readline()\n if self.remove_rare_word:\n self.filter()\n\n numDocs = len(self.list_doc)\n for d in range(0,numDocs):\n list_word = []\n numWords = len(self.list_doc[d])\n for w in range(0,numWords):\n word = self.list_doc[d][w]\n self.list_doc[d][w] = self.vocab.index(word)\n inlist = False\n for elem in list_word:\n if self.list_doc[d][w] == elem[0]:\n elem[1] += 1\n inlist = True\n break\n if not inlist:\n list_word.append([self.list_doc[d][w],1])\n self.list_doc_freq.append(list_word)\n\n def extract_vocab(self):\n if self.vocab:\n self.dir_path_data = dir_path[:-13] + \"datasets/data/\" + self.filename\n if not os.path.exists(self.dir_path_data):\n os.makedirs(self.dir_path_data)\n fout = open(join(self.dir_path_data,\"vocab.txt\"),\"w\")\n for word in self.vocab:\n fout.write(\"%s\\n\" %word)\n fout.close()\n\n def format_seq(self):\n if self.list_doc:\n fout = open(join(self.dir_path_data,\"term_sequence.txt\"), \"w\")\n for doc in self.list_doc:\n fout.write(\"%d \" %len(doc))\n for word in doc:\n fout.write(\"%d \" %word)\n fout.write(\"\\n\")\n fout.close()\n\n def format_freq(self):\n if self.list_doc:\n fout = open(join(self.dir_path_data,\"term_frequency.txt\"), \"w\")\n for doc in self.list_doc_freq:\n fout.write(\"%d \" %len(doc))\n for elem in doc:\n fout.write(\"%d:%d \" %(elem[0],elem[1]))\n fout.write(\"\\n\")\n fout.close()\n\nif __name__ == '__main__':\n p = PreProcessing()\n p.process(\"D:\\\\UCR_TS_Archive_2015\\\\ap/ap.txt\")\n p.extract_vocab()\n p.format_freq()\n p.format_seq()" }, { "alpha_fraction": 0.5302867293357849, "alphanum_fraction": 0.5345979928970337, "avg_line_length": 38.99137878417969, "blob_id": "9e19973236fdadce9914de69d2ae441dd8e15ad4", "content_id": "17d89ecc721c9af1a5647fc6d1a32b39ef7a8201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4639, "license_type": "no_license", "max_line_length": 116, "num_lines": 116, "path": "/lda/Batch_VB/run_Batch_VB.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys, os\nimport Batch_VB\nimport numpy as np\n\nfrom datasets import base, noise_data\nfrom evaluation import perplexity\n\nclass runBatchVB:\n\n def __init__(self, formatted_data, settings, model_folder, tops, test_data, beta_file=None):\n self.formatted_data = formatted_data\n self.settings = settings\n self.model_folder = model_folder\n self.beta_file = beta_file\n self.test_data = test_data\n self.tops = tops\n\n def run(self,method,alpha,only_doc=True, a=None):\n # Initialize the algorithm\n if self.beta_file:\n print('Load beta to continue learn...')\n beta = base.read_beta(self.beta_file)\n os.remove(self.beta_file)\n\t i = self.settings['iter_train']\n\t self.settings['iter_train'] += 50\n else:\n print('initialize the algorithm ...')\n beta = None\n\t i = 0\n batch_vb = Batch_VB.BatchVB(self.settings['num_terms'], self.settings['num_topics'], self.settings['alpha'],\n self.settings['eta'], self.settings['iter_infer'],\n self.settings['conv_infer'], beta)\n # Start\n print('start!!!')\n datafp = open(self.formatted_data.data_path, 'r')\n (wordids, wordcts) = self.formatted_data.load_batch(datafp)\n\tif a is None:\n if only_doc:\n a = np.zeros(len(wordids), dtype=np.int32)\n else:\n\t a = list()\n\t for d in range(0, len(wordids)):\n\t\t N = len(wordids[d])\n\t\t w = np.zeros(N, dtype=np.int32)\n\t\t a.append(w)\n del wordids\n del wordcts\n if method != 'none':\n if only_doc:\n print('Method: %s, alpha: %.1f, only_doc=true' %(method,alpha))\n else:\n print('Method: %s, alpha: %.1f, only_doc=false' % (method, alpha))\n else:\n print('Method: none noise data')\n while i < self.settings['iter_train']:\n i += 1\n print('***num_iter_train***: %d' %i)\n iter_train = i\n train_file = self.formatted_data.data_path\n datafp = open(train_file, 'r')\n (wordids, wordcts) = self.formatted_data.load_batch(datafp)\n data = noise_data.Noise_Data(wordids,wordcts,iter_train,alpha,a,self.settings['p'],only_doc)\n if method == 'add_uniform':\n (wordids,wordcts) = data.add_uniform()\n elif method == 'mult_uniform':\n (wordids,wordcts) = data.mult_uniform()\n elif method == 'add_ope':\n (wordids,wordcts,a) = data.add_ope()\n elif method == 'mult_ope':\n (wordids,wordcts,a) = data.mult_ope()\n (time_e, time_m, theta) = batch_vb.static_online(wordids, wordcts)\n # Compute sparsity\n #sparsity = base.compute_sparsity(theta, theta.shape[0], theta.shape[1], 't')\n # Compute perplexities\n LD2 = perplexity.average_predict(self.test_data, self.settings['alpha'], self.settings['eta'],\n batch_vb._lambda, self.settings['iter_infer'])\n # Search top words of each topics\n #list_tops = base.list_top(batch_vb._lambda, self.tops)\n # Write files\n base.write_file(i, 0, batch_vb._lambda, time_e, time_m, theta, [], LD2, [], self.tops,\n self.model_folder)\n datafp.close()\n # Write settings\n print('write setting ...')\n file_name = '%s/setting.txt'%(self.model_folder)\n base.write_setting(self.settings, file_name)\n # Write final model to file\n print('write final model ...')\n file_name = '%s/beta_final.dat'%(self.model_folder)\n base.write_topics(batch_vb._lambda, file_name)\n\t# Write a\n\tfile_name = '%s/a_%d.txt'%(self.model_folder, i)\n\tf = open(file_name, 'w')\n\tD = len(a)\n\tif only_doc:\n\t for d in range(0, D):\n\t\tf.write('%d\\n' %a[d])\n\telse:\n\t for d in range(0,D):\n\t\tN = len(a[d])\n\t\tfor n in range(0,N):\n\t\t f.write('%d '%a[d][n])\n\t\tf.write('\\n')\n\tf.close()\n # caculate avarage predict probability\n \"\"\"file_name = '%s/perplexity.txt'%(self.model_folder)\n value = perplexity.average_predict(self.test_folder, self.settings['alpha'], self.settings['eta'],\n batch_vb._lambda, self.settings['iter_infer'])\n base.write_perplexities(value,file_name)\"\"\"\n\n # Finish\n print('done!!!')\n print('-----------------------------------------\\n')\n" }, { "alpha_fraction": 0.5662844181060791, "alphanum_fraction": 0.6080275177955627, "avg_line_length": 33.60317611694336, "blob_id": "b642126ca3844bdadaefaff8b0354cd11fc47778", "content_id": "125b06905636338ddf2ea3cb07144065635b2876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4360, "license_type": "no_license", "max_line_length": 117, "num_lines": 126, "path": "/plot_graph.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\ncopus = ['grolier']\n#method_learing = ['OPE_linear_function', 'OPE_linear_function']\nnoise = ['add_uniform', 'mult_uniform', 'add_ope', 'mult_ope']\nalpha = [0.3, 0.5, 0.7, 1.0]\nonly_doc = ['True', 'False']\nnum_train = [100] #[50, 100, 150, 200]\nlist_alpha1 = list()\nfor name in copus:\n for value in alpha:\n\tnew_value = int(value*10)\n\tlist_method = list()\n\tfor method in noise:\n\t list_only = list()\n\t for boolean in only_doc:\n\t\tlist_per = list()\n\t\tfor index in num_train:\n\t\t for i in range(1,index+1):\n\t\t\taverage = 0.\n\t\t\tfor j in range(4,5):\n\t\t\t file_name = '%s_model_ver2/VB%d/%s/alpha%d/%s200/perplexities_%d.csv'%(name, j, method, new_value, boolean, i)\n\t\t\t f = open(file_name, 'rb')\n\t\t\t reader = csv.reader(f)\n\t\t\t for row in reader:\n\t\t\t perplex = float(row[0])\n\t\t\t average += perplex\n\t\t\taverage = float(average/1)\n\t\t\tlist_per.append(average)\n\t\tlist_only.append(np.array(list_per))\n\t list_method.append(list_only)\n list_alpha1.append(list_method)\nprint(len(list_alpha1[0]))\n\nper_batch = list()\nnum_train1 = [100]\nfor index in num_train1:\n for i in range(1,index+1):\n\taverage = 0.\n\tfor j in range(4,5):\n\t filename = 'grolier_model_ver2/VB%d/batch_vb/batchvb200/perplexities_%d.csv'%(j,i)\n\t f = open(filename, 'rb')\n\t reader = csv.reader(f)\n\t for row in reader:\n\t perplex = float(row[0])\n\t average += perplex\n\taverage = float(average/1)\n\tper_batch.append(average)\n#print(len(list_alpha))\n\nt = np.arange(1,101,1)\ncolor = ['red', 'blue', 'green', 'orange', 'black']\nname_method = ['add1', 'mult1', 'add2', 'mult2']\nfont = {#'family' : 'normal',\n #'weight' : 'bold',\n 'size' : 8}\nmatplotlib.rc('font', **font)\nfor j in range(0,len(list_alpha1)):\n fig = plt.figure()\n plt.xlabel('number of iteration training')\n plt.ylabel('Log Predict Probability')\n plt.title('Method: VB. Grolier dataset, c=%.1f, function: 1/t^3' %alpha[j])\n ax = plt.subplot(111)\n for i in range(0,len(list_alpha1[j])):\n ax.plot(t, list_alpha1[j][i][1], color=color[i], linewidth=1.0, label=name_method[i])\n ax.plot(t, list_alpha1[j][i][0], color=color[i], linewidth=1.0, linestyle='--')\n ax.plot(t, per_batch, color=color[4], linewidth=1.0, label='Original VB')\n #plt.axis([0, 200, -8.30, -7.90])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n #plt.axis([0,200,-10,0])\n plt.show()\n\n\"\"\"list_alpha2 = list()\nnoise = ['add_ope', 'mult_ope']\nalpha = [1.0]\nfor name in copus:\n for value in alpha:\n\tnew_value = int(value*10)\n\tlist_method = list()\n\tfor method in noise:\n\t list_only = list()\n\t for boolean in only_doc:\n\t\tlist_per = list()\n\t\tfor index in num_train:\n\t\t for i in range(index-199,index+1):\n\t\t\taverage = 0.\n\t\t\tfor j in range(1,11):\n\t\t\t file_name = '%s_model/OPE%d/%s/alpha%d/%s200/perplexities_%d.csv'%(name, j, method, new_value, boolean, i)\n\t\t\t f = open(file_name, 'rb')\n\t\t\t reader = csv.reader(f)\n\t\t\t for row in reader:\n\t\t\t perplex = float(row[0])\n\t\t\t average += perplex\n\t\t\taverage = float(average/10)\n\t\t\tlist_per.append(average)\n\t\tlist_only.append(np.array(list_per))\n\t list_method.append(list_only)\n list_alpha2.append(list_method)\n\nt = np.arange(1,201,1)\ncolor = ['green', 'orange', 'black']\nname_method = ['add2', 'mult2']\nfont = {#'family' : 'normal',\n #'weight' : 'bold',\n 'size' : 8}\nmatplotlib.rc('font', **font)\nfor j in range(0,len(list_alpha2)):\n fig = plt.figure()\n plt.xlabel('number of iteration training')\n plt.ylabel('Log Predict Probability')\n plt.title('Method: OPE. AP dataset, c=%.1f, function: 1/t' %alpha[j])\n ax = plt.subplot(111)\n for i in range(0,len(list_alpha2[j])):\n ax.plot(t, list_alpha2[j][i][1], color=color[i], linewidth=1.0, label=name_method[i])\n ax.plot(t, list_alpha2[j][i][0], color=color[i], linewidth=1.0, linestyle='--')\n ax.plot(t, per_batch, color=color[2], linewidth=1.0, label='Original OPE')\n plt.axis([0, 200, -8.30, -7.90])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n #plt.axis([0,200,-10,0])\n plt.show()\"\"\"\n" }, { "alpha_fraction": 0.4723404347896576, "alphanum_fraction": 0.4959810972213745, "avg_line_length": 42.597938537597656, "blob_id": "7e1287757533b37b92f10e900a8fd3468548492e", "content_id": "0faa277e3e2ea54591ed97833ca509ca09ac64e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 142, "num_lines": 97, "path": "/datasets/noise_data.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport random\n\nclass Noise_Data:\n def __init__(self, wordids, wordcts, iter_train, alpha, a, p, only_doc=True):\n self.wordids = wordids\n self.wordcts = wordcts\n self.iter_train = iter_train\n self.alpha = alpha # hyperparameter\n self.only_doc = only_doc\n self.a = a #number of sample which received value -1 in set {-1, 1}\n self.p = p\n\n def add_uniform(self):\n D = len(self.wordids)\n new_wordcts = list()\n for d in range(0,D):\n if self.only_doc:\n c = random.choice([-1,0,1])#np.random.random_integers(-1,1)\n N = len(self.wordids[d])\n new_wordct = np.zeros(N, dtype=np.float64)\n for n in range(0,N):\n if not self.only_doc:\n c = random.choice([-1,0,1])#np.random.random_integers(-1,1)\n new_wordct[n] = self.wordcts[d][n] + c*self.alpha/math.pow(self.iter_train,self.p)\n new_wordcts.append(new_wordct)\n return (self.wordids, new_wordcts)\n\n def mult_uniform(self):\n D = len(self.wordids)\n new_wordcts = list()\n for d in range(0,D):\n if self.only_doc:\n c = random.choice([-1,0,1])#np.random.random_integers(-1,1)\n N = len(self.wordids[d])\n new_wordct = np.zeros(N, dtype=np.float64)\n for n in range(0,N):\n if not self.only_doc:\n c = random.choice([-1,0,1])#np.random.random_integers(-1,1)\n new_wordct[n] = self.wordcts[d][n] * (1 + float(c*self.alpha/math.pow(self.iter_train,self.p)))\n new_wordcts.append(new_wordct)\n return (self.wordids, new_wordcts)\n\n def add_ope(self):\n D = len(self.wordids)\n new_wordcts = list()\n for d in range(0,D):\n N = len(self.wordids[d])\n new_wordct = np.zeros(N, dtype=np.float64)\n if self.only_doc:\n rand = random.choice([0,1])#np.random.random_integers(0,1)\n if rand == 0:\n self.a[d] = self.a[d] + 1\n #if d == 0:\n # print(self.a[d])\n for n in range(0,N):\n new_wordct[n] = self.wordcts[d][n] + self.alpha*(self.iter_train - 2*self.a[d])/math.pow(self.iter_train, self.p)\n else:\n for n in range(0,N):\n rand = random.choice([0,1])#np.random.random_integers(0,1)\n if rand == 0:\n self.a[d][n] = self.a[d][n] + 1\n new_wordct[n] = self.wordcts[d][n] + self.alpha*(self.iter_train - 2*self.a[d][n])/math.pow(self.iter_train, self.p)\n new_wordcts.append(new_wordct)\n return (self.wordids, new_wordcts, self.a)\n\n def mult_ope(self):\n D = len(self.wordids)\n new_wordcts = list()\n for d in range(0,D):\n N = len(self.wordids[d])\n new_wordct = np.zeros(N, dtype=np.float64)\n if self.only_doc:\n rand = random.choice([0,1])#np.random.random_integers(0,1)\n if rand == 0:\n self.a[d] = self.a[d] + 1\n for n in range(0,N):\n new_wordct[n] = self.wordcts[d][n] * (1 - self.alpha*(self.iter_train - 2*self.a[d])/math.pow(self.iter_train, self.p))\n else:\n for n in range(0,N):\n rand = random.choice([0,1])#np.random.random_integers(0,1)\n if rand == 0:\n self.a[d][n] = self.a[d][n] + 1\n new_wordct[n] = self.wordcts[d][n] * (1 - self.alpha*(self.iter_train - 2*self.a[d][n])/math.pow(self.iter_train, self.p))\n new_wordcts.append(new_wordct)\n return (self.wordids, new_wordcts, self.a)\n\nif __name__ == '__main__':\n for i in range(0,1):\n print('setting %d: '%i)\n a = np.zeros(3, dtype=np.int32)\n for t in range(1,50):\n wordids = [[1, 2, 3], [4, 6, 7], [1, 4, 7]]\n wordcts = [[1., 1., 2.], [1., 5., 2.], [1., 4., 1.]]\n noise = Noise_Data(wordids,wordcts,t,1,a,only_doc=True)\n (wordids,wordcts,a) = noise.add_ope()\n\n" }, { "alpha_fraction": 0.4721164405345917, "alphanum_fraction": 0.48028498888015747, "avg_line_length": 35.022125244140625, "blob_id": "ca01e1a98457f47052eddd9c260db763ba16b922", "content_id": "8e0408ba2653298d92b61c10405df1030db12681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16282, "license_type": "no_license", "max_line_length": 120, "num_lines": 452, "path": "/datasets/base.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "import os, os.path\nimport sys\nimport shutil\nfrom os.path import isdir, isfile, join\nfrom preprocessing import preprocessing\nimport numpy as np\nfrom time import time\n\n# Name of current path directory which contains this file\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n# check format of input file(text formatted or raw text)\ndef check_format(line, c):\n result = True\n l = len(line)\n for i in range(0,l):\n if line[i].isalpha():\n result = False\n break\n elif not line[i].isalnum() and line[i] != ' ' and line[i] != c:\n result = False\n break\n return result\n\nclass Dataset:\n def __init__(self,path):\n self.vocab_path = None\n self.is_raw_text = False\n if isfile(path):\n print(\"Path %s is a file\" %path)\n self.path_isfile = path\n self.path_isdir = None\n name_file = self.path_isfile.split(\"\\\\\")\n name_file = name_file[-1].split(\"/\")\n main_name = name_file[-1]\n self.main_name_file = main_name[:-4]\n elif isdir(path):\n self.path_isdir = path\n self.path_isfile = None\n print(\"Path %s is a directory\" %path)\n else:\n self.path_isdir = None\n self.path_isfile = None\n print(\"Unknown path %s!\" %path)\n exit()\n\n \"\"\"\n read file input, check format is raw input or term frequency or term sequence\n \"\"\"\n def load_dataset(self, term_seq=False, term_freq=True):\n if self.path_isfile:\n f = open(self.path_isfile)\n line = f.readline().strip()\n while len(line) == 0:\n line = f.readline().strip()\n if line == \"<DOC>\":\n self.is_raw_text = True\n print(\"Pre-processing:\")\n p = preprocessing.PreProcessing()\n p.process(self.path_isfile)\n p.extract_vocab()\n p.format_freq()\n p.format_seq()\n self.vocab_path = p.dir_path_data + \"/vocab.txt\"\n if term_freq:\n data_path = p.dir_path_data + \"/term_frequency.txt\"\n term_seq = False\n else:\n data_path = p.dir_path_data + \"/term_sequence.txt\"\n term_seq = True\n elif check_format(line, ' '):\n data_path = self.path_isfile\n \"\"\"if 'wikipedia' in self.path_isfile:\n data_path = self.path_isfile\n else:\n dir_folder = dir_path + \"/data/\" + self.main_name_file\n # Create model folder if it doesn't exist\n if os.path.exists(dir_folder):\n shutil.rmtree(dir_folder)\n os.makedirs(dir_folder)\n data_path = dir_folder + \"/term_sequence.txt\"\n print(\"Copy file %s => %s\" %(self.path_isfile,data_path))\n shutil.copyfile(self.path_isfile,data_path)\"\"\"\n term_freq = False\n term_seq = True\n elif check_format(line, ':'):\n data_path = self.path_isfile\n \"\"\"if 'wikipedia' in self.path_isfile:\n data_path = self.path_isfile\n else:\n dir_folder = dir_path + \"/data/\" + self.main_name_file\n # Create model folder if it doesn't exist\n if os.path.exists(dir_folder):\n shutil.rmtree(dir_folder)\n os.makedirs(dir_folder)\n data_path = dir_folder + \"/term_frequency.txt\"\n print(\"Copy file %s => %s\" % (self.path_isfile, data_path))\n shutil.copyfile(self.path_isfile, data_path)\"\"\"\n term_seq = False\n term_freq = True\n else:\n print(\"File %s is not true format!\" %self.path_isfile)\n sys.exit()\n f.close()\n bunch = self.Bunch(data_path,term_seq,term_freq)\n return bunch\n\n \"\"\"\n inner class with methods load data from formatted file\n \"\"\"\n class Bunch:\n def __init__(self, data_path, term_seq, term_freq):\n self.data_path = data_path\n self.term_seq = term_seq\n self.term_freq = term_freq\n self.copus = []\n # load number of documents\n f = open(data_path,'r')\n lines = f.readlines()\n self.num_doc = len(lines)\n del lines\n f.close()\n\n \"\"\"\n shuffle input and write into file file_shuffled.txt, return path of this file\n \"\"\"\n def shuffle(self):\n f = open(self.data_path)\n lines = f.readlines()\n self.num_doc = len(lines)\n np.random.shuffle(lines)\n f.close()\n if self.term_freq:\n d = self.data_path[:-19]\n elif self.term_seq:\n d = self.data_path[:-18]\n fout = open(join(d,\"file_shuffled.txt\"), \"w\")\n for line in lines:\n fout.write(\"%s\" %line)\n fout.close()\n del lines\n return join(d,\"file_shuffled.txt\")\n\n def load_batch(self, fp):\n docs = fp.readlines();\n D = len(docs)\n wordids = list()\n wordcts = list()\n for d in range(0,D):\n list_word = docs[d].strip().split()\n N = int(list_word[0])\n if N+1 != len(list_word):\n print(\"Line %d in file %s is error!\" % (d + 1, self.data_path))\n exit()\n wordid = np.zeros(N, dtype=np.int32)\n wordct = np.zeros(N, dtype=np.float64)\n for n in range(1, N+1):\n id_ct = list_word[n].split(':')\n wordid[n-1] = int(id_ct[0])\n wordct[n-1] = float(id_ct[1])\n wordids.append(wordid)\n wordcts.append(wordct)\n return (wordids,wordcts)\n\n \"\"\"\n read mini-batch data and store with format term sequence\n fp is file pointer after shuffled\n \"\"\"\n def load_minibatch_term_seq(self, fp, size_batch):\n doc_terms = []\n doc_lens = []\n for i in range(0, size_batch):\n doc = fp.readline()\n #check end file\n if len(doc) < 5:\n break\n list_word = doc.strip().split()\n N = int(list_word[0])\n if N + 1 != len(list_word):\n print(\"Line %d in file %s is error!\" % (i + 1, self.data_path))\n sys.exit()\n if self.term_freq:\n tokens = list()\n for j in range(1, N + 1):\n tf = list_word[j].split(\":\")\n for k in range(0,int(tf[1])):\n tokens.append(int(tf[0]))\n doc_terms.append(np.array(tokens))\n doc_lens.append(len(tokens))\n elif self.term_seq:\n doc_t = np.zeros(N, dtype=np.int32)\n for j in range(1, N + 1):\n doc_t[j - 1] = int(list_word[j])\n doc_l = N\n doc_terms.append(doc_t)\n doc_lens.append(doc_l)\n del list_word\n return (doc_terms,doc_lens)\n\n \"\"\"\n read mini-batch data and store with format term frequency\n fp is file pointer after shuffled\n \"\"\"\n def load_minibatch_term_freq(self,fp,size_batch):\n doc_terms = []\n doc_freqs = []\n for i in range(0,size_batch):\n doc = fp.readline()\n if len(doc) < 5:\n break\n list_word = doc.strip().split()\n N = int(list_word[0])\n if N + 1 != len(list_word):\n print(\"Line %d in file %s is error!\" % (i + 1, self.data_path))\n sys.exit()\n if self.term_freq:\n doc_t = np.zeros(N,dtype=np.int32)\n doc_f = np.zeros(N,dtype=np.int32)\n for j in range(1,N+1):\n tf = list_word[j].split(\":\")\n doc_t[j-1] = int(tf[0])\n doc_f[j-1] = int(tf[1])\n doc_terms.append(doc_t)\n doc_freqs.append(doc_f)\n elif self.term_seq:\n terms = []\n freqs = []\n k = 0\n for j in range(1,N+1):\n if int(list_word[j]) not in terms:\n terms.append(int(list_word[j]))\n freqs.append(1)\n else:\n index = terms.index(int(list_word[j]))\n freqs[index] += 1\n doc_terms.append(np.array(terms))\n doc_freqs.append(np.array(freqs))\n del list_word\n return (doc_terms,doc_freqs)\n\n\"\"\"------------------------------------------------------------------------------------------------------------------\"\"\"\n\ndef read_setting(file_name):\n if isfile(file_name):\n f = open(file_name, 'r')\n settings = f.readlines()\n f.close()\n sets = list()\n vals = list()\n for i in range(len(settings)):\n # print'%s\\n'%(settings[i])\n line = settings[i].strip()\n if len(line) != 0:\n if line[0] == '#':\n continue\n set_val = settings[i].strip().split(':')\n sets.append(set_val[0])\n vals.append(float(set_val[1]))\n ddict = dict(zip(sets, vals))\n #ddict['num_terms'] = int(ddict['num_terms'])\n ddict['num_topics'] = int(ddict['num_topics'])\n ddict['iter_train'] = int(ddict['iter_train'])\n ddict['iter_infer'] = int(ddict['iter_infer'])\n ddict['batch_size'] = int(ddict['batch_size'])\n ddict['num_crawling'] = int(ddict['num_crawling'])\n #ddict['p'] = int(ddict['p'])\n return (ddict)\n else:\n print(\"Can't find file!\")\n sys.exit()\n\n\"\"\"\n Read all documents in the file and stores terms and counts in lists.\n\"\"\"\ndef read_data(filename):\n wordids = list()\n wordcts = list()\n fp = open(filename, 'r')\n while True:\n line = fp.readline()\n # check end of file\n if len(line) < 1:\n break\n terms = line.split()\n doc_length = int(terms[0])\n ids = np.zeros(doc_length, dtype = np.int32)\n cts = np.zeros(doc_length, dtype = np.int32)\n for j in range(1, doc_length + 1):\n term_count = terms[j].split(':')\n ids[j - 1] = int(term_count[0])\n cts[j - 1] = int(term_count[1])\n wordids.append(ids)\n wordcts.append(cts)\n fp.close()\n return(wordids, wordcts)\n\n\"\"\"\n Read data for computing perplexities.\n\"\"\"\ndef read_data_for_perpl(test_data_folder):\n onlyfiles = [f for f in os.listdir(test_data_folder) if isfile(os.path.join(test_data_folder, f))]\n k_fold_cross = int(len(onlyfiles) / 2)\n list_data_test = list()\n for i in range(0,k_fold_cross):\n filename_part1 = '%s/data_test_%d_part_1.txt'%(test_data_folder, i+1)\n filename_part2 = '%s/data_test_%d_part_2.txt'%(test_data_folder, i+1)\n (wordids_1, wordcts_1) = read_data(filename_part1)\n (wordids_2, wordcts_2) = read_data(filename_part2)\n data_test = list()\n data_test.append(wordids_1)\n data_test.append(wordcts_1)\n data_test.append(wordids_2)\n data_test.append(wordcts_2)\n list_data_test.append(data_test)\n return(list_data_test)\n\ndef read_beta(filename):\n if isfile(filename):\n f = open(filename)\n lines = f.readlines()\n words = lines[0].strip().split()\n K = len(lines)\n W = len(words)\n beta = np.zeros((K,W))\n for i in range(0,K):\n words = lines[i].strip().split()\n if len(words) != W:\n print('File %s is error' %filename)\n exit()\n for j in range(0,W):\n beta[i][j] = float(words[j])\n return beta\n else:\n print('Unknown file %s' %filename)\n exit()\n\n\"\"\"\n Compute document sparsity.\n\"\"\"\ndef compute_sparsity(doc_tp, batch_size, num_topics, _type):\n sparsity = np.zeros(batch_size, dtype = np.float)\n if _type == 'z':\n for d in range(batch_size):\n N_z = np.zeros(num_topics, dtype = np.int)\n N = len(doc_tp[d])\n for i in range(N):\n N_z[doc_tp[d][i]] += 1.\n sparsity[d] = len(np.where(N_z != 0)[0])\n else:\n for d in range(batch_size):\n sparsity[d] = len(np.where(doc_tp[d] > 1e-10)[0])\n sparsity /= num_topics\n return(np.mean(sparsity))\n\n\"\"\"\n Create list of top words of topics.\n\"\"\"\ndef list_top(beta, tops):\n min_float = -sys.float_info.max\n num_tops = beta.shape[0]\n list_tops = list()\n for k in range(num_tops):\n top = list()\n arr = np.array(beta[k,:], copy = True)\n for t in range(tops):\n index = arr.argmax()\n top.append(index)\n arr[index] = min_float\n list_tops.append(top)\n return(list_tops)\n\n\n\"\"\"------------------------------------------------------------------------------------------------------------------\"\"\"\ndef write_setting(ddict,file_name):\n keys = list(ddict.keys())\n vals = list(ddict.values())\n f = open(file_name, 'w')\n for i in range(len(keys)):\n f.write('%s: %f\\n' % (keys[i], vals[i]))\n f.close()\n\ndef write_topics(beta, file_name):\n num_terms = beta.shape[1]\n num_topics = beta.shape[0]\n f = open(file_name, 'w')\n for k in range(num_topics):\n for i in range(num_terms - 1):\n f.write('%.10f ' % (beta[k][i]))\n f.write('%.10f\\n' % (beta[k][num_terms - 1]))\n f.close()\n\ndef write_topic_mixtures(theta, file_name):\n batch_size = theta.shape[0]\n num_topics = theta.shape[1]\n f = open(file_name, 'a')\n for d in range(batch_size):\n for k in range(num_topics - 1):\n f.write('%.5f ' % (theta[d][k]))\n f.write('%.5f\\n' % (theta[d][num_topics - 1]))\n f.close()\n\ndef write_perplexities(LD2, file_name):\n f = open(file_name, 'a')\n f.writelines('%f,'%(LD2))\n f.close()\n\ndef write_time(i, j, time_e, time_m, file_name):\n f = open(file_name, 'a')\n f.write('tloop_%d_iloop_%d, %f, %f, %f,\\n' % (i, j, time_e, time_m, time_e + time_m))\n f.close()\n\ndef write_loop(i, j, file_name):\n f = open(file_name, 'w')\n f.write('%d, %d' % (i, j))\n f.close()\n\ndef write_file(i, j, beta, time_e, time_m, theta, sparsity, LD2, list_tops, tops, model_folder):\n beta_file_name = '%s/beta_%d_%d.dat'%(model_folder, i, j)\n theta_file_name = '%s/theta_%d.dat'%(model_folder, i)\n per_file_name = '%s/perplexities_%d.csv'%(model_folder, i)\n #top_file_name = '%s/top%d_%d_%d.dat'%(model_folder, tops, i, j)\n #spar_file_name = '%s/sparsity_%d.csv'%(model_folder, i)\n time_file_name = '%s/time_%d.csv'%(model_folder, i)\n loop_file_name = '%s/loops.csv'%(model_folder)\n\n # write beta\n #if i % 10 == 1:\n # write_topics(beta, beta_file_name)\n # write theta\n #write_topic_mixtures(theta, theta_file_name)\n\n # write perplexities\n write_perplexities(LD2, per_file_name)\n # write list top\n ##write_topic_top(list_tops, top_file_name)\n # write sparsity\n ##write_sparsity(sparsity, spar_file_name)\n # write time\n write_time(i, j, time_e, time_m, time_file_name)\n # write loop\n write_loop(i, j, loop_file_name)\n\nif __name__ == '__main__':\n t0 = time()\n data = Dataset(\"D:\\\\UCR_TS_Archive_2015\\\\ap/ap.txt\").load_dataset()\n (docs,freqs) = data.load_batch()\n print(len(docs))\n print(docs[0])\n (docs,lengs) = data.load_minibatch(2)\n print(docs)\n print(lengs)\n print(\"Done in %.3f\" %(time() - t0))\n" }, { "alpha_fraction": 0.5423541069030762, "alphanum_fraction": 0.5463746190071106, "avg_line_length": 34.70792007446289, "blob_id": "97a9c32c646dab1ae89a9660a9ce34aa14826a53", "content_id": "c98ccaa4e5737c6666e1f6aea27b6b113408cd0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7213, "license_type": "no_license", "max_line_length": 119, "num_lines": 202, "path": "/datasets/crawl_wiki_ariticles.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "\"\"\"Reference: M.Hoffman - onlineldavb\"\"\"\n\nimport sys, urllib2, re, string, time, threading\nimport os\n\n# Name of current path directory which contains this file\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef get_random_wikipedia_article():\n \"\"\"\n Downloads a randomly selected Wikipedia article (via\n http://en.wikipedia.org/wiki/Special:Random) and strips out (most\n of) the formatting, links, etc.\n\n This function is a bit simpler and less robust than the code that\n was used for the experiments in \"Online VB for LDA.\"\n \"\"\"\n failed = True\n while failed:\n articletitle = None\n failed = False\n try:\n req = urllib2.Request('http://en.wikipedia.org/wiki/Special:Random',\n None, { 'User-Agent' : 'x'})\n f = urllib2.urlopen(req)\n while not articletitle:\n line = f.readline()\n result = re.search(r'title=\"Edit this page\" href=\"/w/index.php\\?title=(.*)\\&amp;action=edit\"\\/>', line)\n if (result):\n articletitle = result.group(1)\n break\n elif (len(line) < 1):\n sys.exit(1)\n\n req = urllib2.Request('http://en.wikipedia.org/w/index.php?title=Special:Export/%s&action=submit' \\\n % (articletitle),\n None, { 'User-Agent' : 'x'})\n f = urllib2.urlopen(req)\n all = f.read()\n except (urllib2.HTTPError, urllib2.URLError):\n print('oops. there was a failure downloading %s. retrying...' % articletitle)\n failed = True\n continue\n print('downloaded %s. parsing...' % articletitle)\n\n try:\n all = re.search(r'<text.*?>(.*)</text', all, flags=re.DOTALL).group(1)\n all = re.sub(r'\\n', ' ', all)\n all = re.sub(r'\\{\\{.*?\\}\\}', r'', all)\n all = re.sub(r'\\[\\[Category:.*', '', all)\n all = re.sub(r'==\\s*[Ss]ource\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Rr]eferences\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Ee]xternal [Ll]inks\\s*==.*', '', all)\n all = re.sub(r'==\\s*[Ee]xternal [Ll]inks and [Rr]eferences==\\s*', '', all)\n all = re.sub(r'==\\s*[Ss]ee [Aa]lso\\s*==.*', '', all)\n all = re.sub(r'http://[^\\s]*', '', all)\n all = re.sub(r'\\[\\[Image:.*?\\]\\]', '', all)\n all = re.sub(r'Image:.*?\\|', '', all)\n all = re.sub(r'\\[\\[.*?\\|*([^\\|]*?)\\]\\]', r'\\1', all)\n all = re.sub(r'\\&lt;.*?&gt;', '', all)\n except:\n # Something went wrong, try again. (This is bad coding practice.)\n print('oops. there was a failure parsing %s. retrying...' % articletitle)\n failed = True\n continue\n\n return(all, articletitle)\n\nclass WikiThread(threading.Thread):\n articles = list()\n articlenames = list()\n lock = threading.Lock()\n def __init__(self):\n\tthreading.Thread.__init__(self)\n\n def run(self):\n (article, articlename) = get_random_wikipedia_article()\n WikiThread.lock.acquire()\n WikiThread.articles.append(article)\n WikiThread.articlenames.append(articlename)\n WikiThread.lock.release()\n\ndef get_random_wikipedia_articles(n):\n \"\"\"\n Downloads n articles in parallel from Wikipedia and returns lists\n of their names and contents. Much faster than calling\n get_random_wikipedia_article() serially.\n \"\"\"\n maxthreads = 8\n WikiThread.articles = list()\n WikiThread.articlenames = list()\n wtlist = list()\n for i in range(0, n, maxthreads):\n print('downloaded %d/%d articles...' % (i, n))\n for j in range(i, min(i+maxthreads, n)):\n wtlist.append(WikiThread())\n wtlist[len(wtlist)-1].start()\n for j in range(i, min(i+maxthreads, n)):\n wtlist[j].join()\n\n return (WikiThread.articles, WikiThread.articlenames)\n\ndef parse_doc_list(fp, docs, vocab):\n \"\"\"\n Parse a document into a list of word ids and a list of counts,\n or parse a set of documents into two lists of lists of word ids\n and counts.\n\n Arguments:\n docs: List of D documents. Each document must be represented as\n a single string. (Word order is unimportant.) Any\n words not in the vocabulary will be ignored.\n vocab: Dictionary mapping from words to integer ids.\n\n Returns a pair of lists of lists.\n\n The first, wordids, says what vocabulary tokens are present in\n each document. wordids[i][j] gives the jth unique token present in\n document i. (Don't count on these tokens being in any particular\n order.)\n\n The second, wordcts, says how many times each vocabulary token is\n present. wordcts[i][j] is the number of times that the token given\n by wordids[i][j] appears in document i.\n \"\"\"\n if (type(docs).__name__ == 'str'):\n temp = list()\n temp.append(docs)\n docs = temp\n\n D = len(docs)\n\n wordids = list()\n wordcts = list()\n for d in range(0, D):\n docs[d] = docs[d].lower()\n docs[d] = re.sub(r'-', ' ', docs[d])\n docs[d] = re.sub(r'[^a-z ]', '', docs[d])\n docs[d] = re.sub(r' +', ' ', docs[d])\n words = string.split(docs[d])\n ddict = dict()\n for word in words:\n if (word in vocab):\n wordtoken = vocab[word]\n if (not wordtoken in ddict):\n ddict[wordtoken] = 0\n ddict[wordtoken] += 1\n wordids = ddict.keys()\n wordcts = ddict.values()\n fp.write('%d ' %len(wordids))\n for i in range(0,len(wordids)):\n fp.write('%d:%d ' %(wordids[i],wordcts[i]))\n\tif d < D-1:\n\t fp.write('\\n')\n\n del wordids\n del wordcts\n\ndef crawl(size_batch, num_crawling):\n path_vocab = dir_path+\"/data/wikipedia/vocab.txt\"\n if(os.path.isfile(path_vocab)):\n f = open(path_vocab,'r')\n l_vocab = f.readlines()\n f.close()\n else:\n print('Unknown file %s' %path_vocab)\n exit()\n d_vocab = dict()\n for word in l_vocab:\n word = word.lower()\n word = re.sub(r'[^a-z]', '', word)\n d_vocab[word] = len(d_vocab)\n del l_vocab\n\n fdata = open(os.path.join(dir_path+\"/data/wikipedia\",\"articles.txt\"),'w')\n fformat = open(os.path.join(dir_path+\"/data/wikipedia\",\"input.txt\"),'w')\n\n for i in range(0,num_crawling):\n (docset,articlenames) = get_random_wikipedia_articles(size_batch)\n N = len(docset)\n \tfor j in range(0,N):\n fdata.write(\"<DOC>\\n\")\n fdata.write(\"<TITLE> %s <\\\\TITLE>\\n\" %articlenames[j])\n fdata.write(\"<TEXT>\\n\")\n fdata.write(\"%s\\n\" %docset[j])\n fdata.write(\"<\\DOC>\\n\")\n fdata.write(\"<\\TEXT>\\n\")\n parse_doc_list(fformat,docset,d_vocab)\n\tif i < num_crawling-1:\n\t fformat.write('\\n')\n fdata.close()\n fformat.close()\n\nif __name__ == '__main__':\n t0 = time.time()\n\n (articles, articlenames) = get_random_wikipedia_articles(4)\n for i in range(0, len(articles)):\n print(articlenames[i])\n\n t1 = time.time()\n print('took %f' % (t1 - t0))\n" }, { "alpha_fraction": 0.5380309224128723, "alphanum_fraction": 0.5591043829917908, "avg_line_length": 29.676767349243164, "blob_id": "b4150171ac2937919f7579a3e937a4c998502c45", "content_id": "37e91a59a128b94ba707394d771ca3dc5d4383a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3037, "license_type": "no_license", "max_line_length": 84, "num_lines": 99, "path": "/lda/Batch_VB/Batch_VB.py", "repo_name": "TruongKhang/topic_models", "src_encoding": "UTF-8", "text": "import re, time\nimport sys\nfrom datetime import datetime\n\nimport numpy as n\nfrom scipy.special import gammaln, psi\n\nt = datetime.now()\ntime_cur = t.hour*3600 + t.minute*60 + t.second\nn.random.seed(time_cur+100000000)\n\"\"\"\nCreated : 03 /08 /2016\n@author : Ha Nhat Cuong\n\"\"\"\n\ndef dirichlet_expectation(alpha):\n if len(alpha.shape) == 1:\n return psi(alpha) - psi(n.sum(alpha))\n return psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis]\n\n\nclass BatchVB:\n\n def __init__(self, num_terms, num_topics, alpha, eta,\n iter_infer, meanchangethresh, beta=None):\n #self._D = num_docs\n\n self._K = num_topics\n self._W = num_terms\n\n self._iter_infer = iter_infer\n self._meanchangethresh = meanchangethresh\n\n self._alpha = alpha\n self._eta = eta\n\n if beta is not None:\n self._lambda = beta\n else:\n self._lambda = 1*n.random.gamma(100.0, 1.0/100.0, (self._K, self._W))\n self._Elogbeta = dirichlet_expectation(self._lambda)\n self._expElogbeta = n.exp(self._Elogbeta)\n\n def do_e_step(self, wordids, wordcts):\n batchD = len(wordids)\n\n gamma = n.random.gamma(100.0, 1.0/100.0, (batchD, self._K))\n Elogtheta = dirichlet_expectation(gamma)\n expElogtheta = n.exp(Elogtheta)\n\n sstats = n.zeros(self._lambda.shape)\n\n for d in range(0, batchD) :\n ids = wordids[d]\n cts = wordcts[d] # 1 * size(ids)\n\n gammad = gamma[d, :] # gammad [1* K]\n Elogthetad = Elogtheta[d, :]\n expElogthetad = expElogtheta[d, :] #1*K\n expElogbetad = self._expElogbeta[:, ids] # K * size(ids)\n\n phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100 # 1 * size(ids)\n\n for it in range(0, self._iter_infer):\n lastgamma = gammad\n\n gammad = self._alpha + expElogthetad*\\\n n.dot(cts/phinorm, expElogbetad.T)\n\n Elogthetad = dirichlet_expectation(gammad)\n expElogthetad = n.exp(Elogthetad)\n phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100\n\n meanchange = n.mean(abs(lastgamma - gammad))\n if meanchange < self._meanchangethresh :\n break\n\n gamma[d, :] = gammad\n sstats[:, ids] += n.outer(expElogthetad.T, cts/phinorm) # K * size(ids)\n\n sstats = sstats * self._expElogbeta # K * W\n return gamma, sstats # batchD * K , K * W\n\n def update_lambda(self, sstats):\n\n self._lambda = self._eta + sstats # num_topics * W\n self._Elogbeta = dirichlet_expectation(self._lambda)\n self._expElogbeta = n.exp(self._Elogbeta)\n\n\n def static_online(self, wordids, wordcts):\n # E step\n start = time.time()\n (gamma, sstats) = self.do_e_step(wordids, wordcts)\n end1 = time.time()\n # M step\n self.update_lambda(sstats)\n end2 = time.time()\n return (end1 - start, end2 - start, gamma)\n" } ]
9
shannonserrao/Stack_Exchange
https://github.com/shannonserrao/Stack_Exchange
dfcc1ba8ad47296f8785d9c751cdcb85fd4567d2
502806325175c3c965cd5c571fb61c0338cb8d3b
c5767ab2af88e5eede79ae8495c003fe0b7026d0
refs/heads/master
2020-12-19T19:13:38.849408
2020-02-11T04:26:44
2020-02-11T04:26:44
235,825,185
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8042226433753967, "alphanum_fraction": 0.8157389760017395, "avg_line_length": 29.52941131591797, "blob_id": "b5690b391bf5a07d954ab373ac869a8d827b6320", "content_id": "50c6bd73cde672281dadd5166eb3688fa698e4cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 521, "license_type": "no_license", "max_line_length": 125, "num_lines": 17, "path": "/Project files final/README.md", "repo_name": "shannonserrao/Stack_Exchange", "src_encoding": "UTF-8", "text": "# Read me \n\n1. Python Processing file\nThis file does some of the preprocessing like removing special charachters from the nominal fields and converting to numeric.\ndatapreprocessing.py\n\n2. Filter file description\nSO_Weka_Data_PreProc_filters\n\n3. Classification/Regression configuration files\nSO_Weka_Classification_regression_config\n\n4. Model files : Present in 'model'folder in Classification/Regression folder\n\n5. Results Dump Files: Present in Classification/Regression folder\n\n6. Data Files: Present in Data folder\n\n\n" }, { "alpha_fraction": 0.7084047794342041, "alphanum_fraction": 0.7217838764190674, "avg_line_length": 25.990739822387695, "blob_id": "f6624a32951029e8747f75f7cdc98eb6676b9b1c", "content_id": "e1c66a06adb5a58f608a89da8c72f8190559991f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2915, "license_type": "no_license", "max_line_length": 114, "num_lines": 108, "path": "/datapreprocessing.py", "repo_name": "shannonserrao/Stack_Exchange", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 18:49:47 2018\n\n@author: shannon\n\"\"\"\nimport pandas as pd\n#data = pd.read_csv('/home/shannon/Pictures/Data analysis 5525/Project stuff/survey_results_public.csv')\nimport warnings\ndata= pd.read_csv('/home/shannon/Pictures/Data analysis 5525/Project stuff/survey_results_stackoverflow_only.csv')\ndata.describe\ndata.shape\ndata['StackOverflowDescribes'].replace(\n to_replace=\"I have a login for Stack Overflow, but haven\\'t created a CV or Developer Story\",\n value=\"2\",\n inplace=True\n)\ndata['StackOverflowDescribes'].replace(\n to_replace=\"I\\'ve visited Stack Overflow, but haven\\'t logged in/created an account\",\n value='1',\n inplace=True\n)\ndata['StackOverflowDescribes'].replace(\n to_replace='I have created a CV or Developer Story on Stack Overflow',\n value='3',\n inplace=True\n)\ndata['StackOverflowDescribes'].replace(\n to_replace='I\\'d never heard of Stack Overflow before today',\n value='0',\n inplace=True\n)\ndata['StackOverflowDescribes'].replace(\n to_replace='I\\'ve heard of Stack Overflow, but have never visited',\n value='0',\n inplace=True\n)\ndata.StackOverflowDescribes.unique()\ndata.StackOverflowFoundAnswer.unique()\ndata['StackOverflowFoundAnswer'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowCopiedCode.unique()\ndata['StackOverflowCopiedCode'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\n\n\ndata.StackOverflowJobListing.unique()\ndata['StackOverflowJobListing'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\n\ndata['StackOverflowJobListing'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowJobListing.unique()\n#StackOverflowCompanyPage\ndata['StackOverflowCompanyPage'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowCompanyPage.unique()\n\n#StackOverflowJobSearch\ndata['StackOverflowJobSearch'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowJobSearch.unique()\n#StackOverflowNewQuestion\ndata['StackOverflowNewQuestion'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowNewQuestion.unique()\n#StackOverflowAnswer\n#StackOverflowMetaChat\ndata['StackOverflowAnswer'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowMetaChat.unique()\ndata['StackOverflowMetaChat'].replace(\n to_replace='Haven\\'t done at all',\n value='0',\n inplace=True\n)\ndata.StackOverflowAnswer.unique()\ndata.to_csv('SOformat.csv', sep=',', encoding='utf-8')\ndata.to_csv('SOformat1.csv', sep=',')\ndata.to_csv('SOformattab.csv', sep='\\t', encoding='utf-8')\n\ndata.StackOverflowAdsDistracting.unique()\ndata.StackOverflowMakeMoney.unique()\n" } ]
2
AlbertoDelLop/NextEra-Energy---WSMS-project
https://github.com/AlbertoDelLop/NextEra-Energy---WSMS-project
fa8a54cd44d96fd19420ccce6ff2642bd2233a3b
24c29c7e2b32059335203a3711f3d452c7156d67
398cd3f9b97646c328a181044f4fe4fd37c5f52e
refs/heads/main
2023-04-19T01:20:18.400595
2021-05-09T15:10:53
2021-05-09T15:10:53
365,616,898
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7901430726051331, "alphanum_fraction": 0.7901430726051331, "avg_line_length": 26.34782600402832, "blob_id": "e56c73cdde9b9702863e73b250b0d5a9c4084d2f", "content_id": "6296a6d7a8d784198e1c87a67e26ce2094618610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 629, "license_type": "no_license", "max_line_length": 245, "num_lines": 23, "path": "/README.md", "repo_name": "AlbertoDelLop/NextEra-Energy---WSMS-project", "src_encoding": "UTF-8", "text": "# NextEra-Energy---WSMS-project\nWeb Scraping and Social Media Scraping project for Data Science Master's Studies in WNE University of Warsaw. This scraping project intends to extract some basic stock information from the Investor Relations portal of the NextEra Energy website.\n\nhttp://www.investor.nexteraenergy.com/\n\n## How to run scrapers:\n\n- Beautiful Soup\n\n\n- Scrapy\n\nIn terminal run the following commands:\n\nscrapy crawl crawler -o overview.csv(or preferred name for saving the output)\n\nscrapy crawl stock -o stock.csv (or preferred name for saving the output)\n\n- Selenium\n\nIn terminal run the command:\n\npython Selenium.py\n" }, { "alpha_fraction": 0.6583476662635803, "alphanum_fraction": 0.6781411170959473, "avg_line_length": 43.11538314819336, "blob_id": "915172b4e3522d7c74384a380d64289437311049", "content_id": "66d3f46995338bef4f0d823a9dd8b01f6028e779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4648, "license_type": "no_license", "max_line_length": 134, "num_lines": 104, "path": "/soup/soup.py", "repo_name": "AlbertoDelLop/NextEra-Energy---WSMS-project", "src_encoding": "UTF-8", "text": " #NextEra energy\n#Extracting Company Overview\n#First of all, upload necessary packages\nimport time\nfrom urllib import request as re\nfrom bs4 import BeautifulSoup as BS\nimport pandas as pd\nimport requests\n# Then, open the page below in a browser of your choice.\nurl = 'http://www.investor.nexteraenergy.com'\nhtml = re.urlopen(url)\nbs = BS(html.read(), 'html.parser')\n\n\n# Beautiful Soup 'find' method allows to create list of tags by class:\nintro = bs.find('div', {'class': 'home-intro'}).get_text()\nprint(intro)\n\nintro=intro.split(sep='\\n')\nprint(type(intro))\nprint(intro[3])\n\n# Instead of displaying it one by one we might use list comprehension to put them into new list and pandas data frame:\n# The data can be put into data frame, later into .csv file.\noverview = {'Title': [intro[1]], 'Text': [intro[3]]}\n\nd = pd.DataFrame(overview)\n\nprint(d)\n# ################################################################################\n# # This part saves data to csv.\n# ################################################################################\nd.to_csv('overview.csv')\n\n # Extraction of Stock Chart Table\n\n\n# We download the code as before:\n#Changing the link for getting stock chart information\nurl = 'http://ir.tools.investis.com/Clients/(S(i350sggxfguad3cfd2icvazs))/us/nextera_energy_inc/SM8/Default.aspx?culture=en-US'\nhtml = re.urlopen(url)\nbs = BS(html.read(), 'html.parser')\n# Variables created is a list to store quotes\nISIN = []\nSymbol = []\nNumber_of_shares_Tota=[]\nMarket_Cap_Mn=[]\nBest_Bid=[]\nBest_offer=[]\nDay_Volume=[]\nDividend=[]\nLast_Close=[]\nOpen=[]\nDay_High=[]\nDay_Low=[]\n_52_Week_High=[]\n_52_Week_Low=[]\nDividend_yield=[]\n\n\n# Agent is any software that retrieves and presents Web content for end users.\n# it can be web browsers, media players, and plug-ins that help in retrieving, rendering and interacting with web content.\nagent = {\"User-Agent\":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\n\nrqst_html= requests.get(url, headers=agent)\nhtml=rqst_html.text\ndata=BS(html, 'html.parser')\n\n#Here with find_all method using span and its attributes via indexes [0] I am getting the information.\n#Span id's are unique for each information this is why it is the best way to extract information with those id's.\n#There are some classes that are the same and I can't use it for getting the information I needed.\n#And this information that I get is appending to the quotes that I created to store\nISIN.append(data.find_all('span', attrs={'id':'snapShotBox_Instrument4'})[0].text)\nSymbol.append(data.find_all('span', attrs={'id':'snapShotBox_inst1'})[0].text)\nNumber_of_shares_Tota.append(data.find_all('span', attrs={'id':'snapShotBox_Instrument9'})[0].text)\nMarket_Cap_Mn.append(data.find_all('span', attrs={'id':'snapShotBox_marketcap2'})[0].text)\nBest_Bid.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot1'})[0].text)\nBest_offer.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot16'})[0].text)\nDay_Volume.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot11'})[0].text)\nDividend.append(data.find_all('span', attrs={'id':'snapShotBox_Dividend1'})[0].text)\nLast_Close.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot10'})[0].text)\nOpen.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot9'})[0].text)\nDay_High.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot13'})[0].text)\nDay_Low.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot14'})[0].text)\n_52_Week_High.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot17'})[0].text)\n_52_Week_Low.append(data.find_all('span', attrs={'id':'snapShotBox_Snapshot18'})[0].text)\nDividend_yield.append(data.find_all('span', attrs={'id':'snapShotBox_Dividend2'})[0].text)\n\n\n\n# Dataframe\n# I am creating dataframe with the main names in the table and giving them titles to make it clearer.\n# These names can be found in\nDF = pd.DataFrame({\"ISIN\": ISIN, \"Symbol\": Symbol, \"Number of shares - Total\": Number_of_shares_Tota,\n\t\"Market Cap\": Market_Cap_Mn, \"Best Bid\": Best_Bid, \"Best Offer\": Best_offer, \"Day Volume\": Day_Volume,\n \t\t\t\"Dividend\": Dividend,\"Last Close\":Last_Close, \"Open\": Open,\n \t\t\t\"Day High\": Day_High, \"Day Low\": Day_Low,\n \t\t\t\"52 Week High\": _52_Week_High, \"52 Week Low\": _52_Week_Low, \"Dividend Yield\": Dividend_yield})\n\n# save to StockChart\nDF.to_csv('StockChart.csv', header = True)\n\nstart = time.perf_counter()\nprint('\\nExecution time:',str(round(time.perf_counter() - start , 4))+'s') #execution time is 0 second\n" }, { "alpha_fraction": 0.5442337989807129, "alphanum_fraction": 0.5570695400238037, "avg_line_length": 38.17460250854492, "blob_id": "2297d7a6a8ce12167a0db6d822c1dd89d0582d46", "content_id": "63467c94929207abb6420f50bb3b35499c51edf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5066, "license_type": "no_license", "max_line_length": 144, "num_lines": 126, "path": "/scrapy/scrapy_group_2/scrapy_group_2/spiders/scrapy_pr.py", "repo_name": "AlbertoDelLop/NextEra-Energy---WSMS-project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport time\r\n\r\n\r\nstart = time.perf_counter()\r\n\r\n\r\n\r\n\r\n\r\nclass Overview(scrapy.Item):\r\n CompanyOverview = scrapy.Field()\r\n\r\n#creating spider named crawler and telling it to go to the following webpage\r\nclass OverviewSpider(scrapy.Spider):\r\n name = 'crawler'\r\n start_urls = ['http://www.investor.nexteraenergy.com/']\r\n\r\n#scraping the text from the assigned xpath\r\n def parse(self, response):\r\n p = Overview()\r\n p['CompanyOverview'] = response.xpath('//*[@id=\"contentwrapperinner\"]/div/div[1]/p/span/text()').getall()\r\n\r\n yield p\r\n\r\n\r\nclass StockTable(scrapy.Item):\r\n Stock_parameter = scrapy.Field()\r\n Value = scrapy.Field()\r\n\r\n#creating spider named stock and telling it to go to the following url\r\nclass StockSpider(scrapy.Spider):\r\n name = 'stock'\r\n start_urls = ['http://ir.tools.investis.com/Clients/(S(1odes4dttvfhicymklu3tcp3))/us/nextera_energy_inc/SM8/Default.aspx?culture=en-US']\r\n \r\n def parse(self, response):\r\n \r\n#the spider will extract information from the following xpath\r\n col1_xpath = '//div[re:test(@class, \"box share-information\")]//div[re:test(@class, \"col-one\")]//text()'\r\n selection = response.xpath(col1_xpath)\r\n col1=[] #creating an empty vector\r\n \r\n for s in selection: #removing all empty spaces from the scraped text\r\n if ('\\n' or '\\r') in s.getall()[0]:\r\n continue\r\n col1.append(s.getall()[0]) #appends to the empty vector every extracted element from the given xpath\r\n\r\n #the vector contains every text element separately, however we need for some elements to be together such as money symbol and value\r\n #let's fix the first vector\r\n\r\n s=0\r\n stockp=[]\r\n while (s<len(col1)):\r\n \r\n if (col1[s+1]==' / '): #appending the elements if there's '/'\r\n stockp.append([col1[s]+col1[s+1]+col1[s+2]])\r\n s = s+3\r\n else:\r\n stockp.append([col1[s]]) #if no '/' keep iterating\r\n s = s+1\r\n \r\n if s+1 == len(col1): #stop the while loop once the correct length is reached\r\n stockp.append([col1[s]])\r\n break\r\n\r\n# the spider will extract information from the following xpath\r\n \r\n col2_xpath = '//div[re:test(@class, \"box share-information\")]//div[re:test(@class, \"col-two\")]//text()'\r\n selection = response.xpath(col2_xpath)\r\n col2=[] #creating an empty vector\r\n \r\n for s in selection: #appending the scraped text without empty spaces\r\n if ('\\n' or '\\r') in s.getall()[0]:\r\n continue\r\n col2.append(s.getall()[0])\r\n \r\n#fixing the second vector\r\n\r\n s=0\r\n value=[] #empty vector that will contain the fixed data\r\n while (s<len(col2)):\r\n \r\n if (col2[s+1]==' / '):\r\n value.append([col2[s]+col2[s+1]+col2[s+2]]) #appending the elements if there's '/' and then moving to the next index\r\n s = s+3\r\n elif (col2[s]=='$'): #appending the elements if there's '$' and then moving to the next index\r\n value.append([col2[s]+col2[s+1]])\r\n s = s+2\r\n else: #keep iterating if none of the symbols are found\r\n value.append([col2[s]])\r\n s = s+1\r\n \r\n if s+1 >= len(col2): #stop while loop at reached length\r\n value.append([col2[s]])\r\n break\r\n \r\n\r\n #creating a table with two columns from the data\r\n d = StockTable()\r\n \r\n for data in range(14):\r\n d['Stock_parameter']=stockp[data]\r\n d['Value']=value[data]\r\n yield d\r\n\r\n\r\nprint('\\nExecution time:',str(round(time.perf_counter() - start , 4))+'s')\r\n\r\n#after running the first spider, i saved the output with overview.csv\r\n\r\n#the code below serves to read the saved file in order to check the most common words\r\n#uncomment it if this needs to be checked and eneter the path to the saved output\r\n\r\n\r\n#text = open('C:/Users/Dell/Desktop/pythonProject/scrapy_group_2/scrapy_group_2/overview.csv').read()\r\n#words = text.split() #spliting every word as separate elements\r\n#word_count = {}\r\n#for word in words:\r\n# count = word_count.get(word, 0) #counting the number of times the word appears\r\n# # the number is either the number of times we encountered the word, or 0 if we haven’t seen it yet\r\n# count += 1\r\n# word_count[word] = count #store the updated count into the dictionary\r\n# word_count_list = sorted(word_count, key=word_count.get, reverse=True) #sort the input and return a list\r\n# for word in word_count_list[:20]:\r\n# print(word, word_count[word]0)\r\n\r\n" }, { "alpha_fraction": 0.6723589897155762, "alphanum_fraction": 0.6795075535774231, "avg_line_length": 28.611764907836914, "blob_id": "7a99844d35af15c17416cd451deca29c4210c684", "content_id": "1293e515dc9edea2686841963f6466a9424088f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2518, "license_type": "no_license", "max_line_length": 127, "num_lines": 85, "path": "/selenium/Selenium.py", "repo_name": "AlbertoDelLop/NextEra-Energy---WSMS-project", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport time\nimport pandas as pd\n\nstart = time.perf_counter()\n\ngecko_path = '/usr/local/bin/geckodriver'\nurl = 'http://www.investor.nexteraenergy.com/'\n\noptions = webdriver.firefox.options.Options()\noptions.headless = False\n\ndriver = webdriver.Firefox(options = options, executable_path = gecko_path)\n\n# Actual program:\ndriver.get(url)\ntime.sleep(2)\n\n\n#Extract the information from overview site\nintro = driver.find_element_by_xpath('/html/body/form/div[3]/div[5]/div/div/div/div[1]').text\nintro = intro.split(sep='\\n') #divides the elements in a vector\n\nprint(\"************************************\")\n\nd = pd.DataFrame({'Title':[intro[0]],'Text':[intro[2]]}) #Adds the title and the main text in a dataframe from the vector intro\nprint(d)\nd.to_csv('overview.csv') #Saves it in a .csv file\n\nprint(\"************************************\")\ntime.sleep(2)\n\n\n#click and change to the Stock Information section\nstock = driver.find_element_by_xpath('//*[@id=\"secondnav-nav-stock-information-level1\"]')\nstock.click()\n\ntime.sleep(2)\n\n\n# It's necessary to switch to the iframe because the next information we want to extract is in there\ndriver.switch_to.frame(driver.find_element_by_id(\"ExternalWebContentExternalIFrame\"))\n\n#The information is hidden so it is necessary to click the button 'Show more' in order to extract it\nshowmore = driver.find_element_by_xpath('/html/body/form/div[3]/div[2]/div/a')\nshowmore.click()\n\ntime.sleep(2)\n\n\n#Etract the data from the two columns of the data\nstockp = driver.find_elements_by_xpath('//*[@id=\"upSnapshot\"]//*[@class=\"col-one\" or @class=\"col-one share_in_issue\"]')\nvalues = driver.find_elements_by_xpath('//*[@id=\"upSnapshot\"]//*[@class=\"col-two\"]')\n\nprint(\"************************************\")\n\nprint('Stock parameters\\n')\nprint(len(stockp))\n#print(stockp)\nStockp=[]\nfor data in stockp:\n print(data.text)\n Stockp.append(data.text) #Creates a vector with the names of the stock parameters from column 1\n\nprint('Values\\n')\nprint(len(values))\n#print(values)\nValues=[]\nfor data in values:\n print(data.text)\n Values.append(data.text) #Creates a vector with the values of the stock parameters from column 2\n\nd = pd.DataFrame({'Stock parameter':Stockp,'Value':Values}) #Creates a dataframe with the table\nprint('\\n',d)\nd.to_csv('Stock output.csv') #Saves it in a .csv file\n\nprint(\"************************************\")\n\n\ntime.sleep(5)\n\n# Close browser:\ndriver.quit()\n\nprint('\\nExecution time:',str(round(time.perf_counter() - start - 13, 4))+'s')\n\n" } ]
4
KiboNaku/dice_simulator
https://github.com/KiboNaku/dice_simulator
23f9e74df051cb2bb2b018eb131156b97d9c23b9
6af753822816ad5d0f4e226f16a8c21a7c9b51d5
4611f362e5dfce18d662a27c47500085ac22fdfa
refs/heads/master
2020-06-23T01:56:09.820875
2019-07-30T01:13:44
2019-07-30T01:13:44
198,467,367
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5991674065589905, "alphanum_fraction": 0.6037020683288574, "avg_line_length": 29.09395980834961, "blob_id": "f0a66d8a18f1371b1336e8b51a8699c92e34cd6f", "content_id": "22d2204fb61deda59080742453b3e183d6e2ef37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13452, "license_type": "no_license", "max_line_length": 120, "num_lines": 447, "path": "/dice_roller.py", "repo_name": "KiboNaku/dice_simulator", "src_encoding": "UTF-8", "text": "import pickle\nimport sys\nfrom os import path, makedirs\nfrom random import randrange as rand\n\nimport math\n\n\nclass Die:\n\n \"\"\"\"\n Class used to simulate rolling of a die.\n\n :param values: values on the die (each value represents a \"face\").\n :type values: sequence of any\n :param name: value used to identify the die. If no value is specified, name is defaulted to \"generic die\",\n meaning all unnamed dice will have the same name (NOT recommended).\n :type name: str\n \"\"\"\n\n def __init__(self, values, name=\"generic die\"):\n self._values = values\n self.name = name\n\n def roll(self):\n \"\"\"\n Simulates a single die roll.\n\n :returns: the value rolled.\n :rtype: type contained in self._values\n \"\"\"\n return self._values[rand(len(self._values))]\n\n def roll_multiple(self, times):\n \"\"\"\n Simulates a specified number of die rolls.\n\n :param times: number of times to roll the die.\n :type times: int\n\n :return: a list of the the roll results.\n :rtype: list of type contained in self._values\n \"\"\"\n rolls = []\n for _ in range(times):\n rolls.append(self.roll())\n return rolls\n\n def get_values(self):\n \"\"\"\n :return: a separate copy of the self._values.\n :rtype: list\n \"\"\"\n return list(self._values)\n\n\nclass NumericalDie (Die):\n\n \"\"\"\"\n Class used to simulate numerical die. Allows for mathematical operations.\n\n :param num_values: numerical values on the die.\n :type num_values: sequence of int\n :param name: defaults to \"numerical die\"\n :type name: str\n \"\"\"\n\n def __init__(self, num_values, name=\"numerical die\"):\n \"\"\"\n :raises: \"ValueError\": a value in the sequence passed is not of int type.\n \"\"\"\n for num in num_values:\n if type(num) is not int:\n raise ValueError(\"Each of the value passed to the NumericalDie must be an integer value.\")\n super().__init__(num_values, name)\n\n def roll_multiple(self, times):\n \"\"\"\n Simulates a specified number of die rolls. Overrides the super().roll_multiple(self, times) method by\n also returning additional values (mathematical calculations).\n Additional Values: sum, average, max, min.\n\n :param times: number of times to roll the die.\n :type times: int\n\n :return: a list of the the roll results, additional numerical calculations.\n :rtype: tuple of (list, tuple of (float, float, float, float))\n \"\"\"\n rolls = super().roll_multiple(times)\n return rolls, NumericalDie._additional_values(rolls)\n\n @staticmethod\n def _additional_values(rolls):\n \"\"\"\n Returns additional mathematical calculations for the rolls passed: sum, average, max, min.\n\n :param rolls: a list of the rolls.\n :type rolls: list\n\n :return: additional mathematical calculations: (sum, average, max, min).\n :rtype: tuple\n \"\"\"\n return NumericalDie._sum(rolls), NumericalDie._average(rolls), \\\n NumericalDie._max(rolls), NumericalDie._min(rolls)\n\n @staticmethod\n def _sum(rolls):\n \"\"\"\n :param rolls: list of the dice rolls.\n :type rolls: list\n\n :return: sum of the values.\n :rtype: int\n \"\"\"\n return sum(rolls)\n\n @staticmethod\n def _average(rolls):\n \"\"\"\n :param rolls: list of the dice rolls.\n :type rolls: list\n\n :return: average of the values.\n :rtype: double\n \"\"\"\n return NumericalDie._sum(rolls) / len(rolls)\n\n @staticmethod\n def _max(rolls):\n \"\"\"\n :param rolls: list of the dice rolls.\n :type rolls: list\n\n :return: max of the values.\n :rtype: int\n \"\"\"\n return max(rolls)\n\n @staticmethod\n def _min(rolls):\n \"\"\"\n :param rolls: list of the dice rolls.\n :type rolls: list\n\n :return: min of the values.\n :rtype: int\n \"\"\"\n return min(rolls)\n\n\nclass ClassicDie (NumericalDie):\n \"\"\"\"\n Simplified version of NumericalDie with values ranging from 1 to the specified max.\n\n :param max_val: the max values on the die. Example: enter 6 to create a die with values from 1 to 6.\n :type max_val: int\n :param name: defaults to \"classical die\"\n :type name: str\n \"\"\"\n\n def __init__(self, max_val, name=\"classical die\"):\n super().__init__(range(0, max_val + 1), name)\n\n\n# constants\n_back = \"back\"\n_user_dir = \"userdata\"\n_dice_file = \"custom_dice.pickle\"\n_file_path = path.join(_user_dir, _dice_file)\n\n# holds all of the dice\n_all_dice = [Die(ClassicDie(spots, \"D{}\".format(spots))) for spots in [4, 6, 8, 10, 12, 20]]\n\n\ndef _print_die_values(die_obj):\n \"\"\"Prints out all of the potential values of the die in a table format with \"Potential Value\" heading.\"\"\"\n print(\"Potential values:\")\n _print_table(die_obj.get_values())\n\n\ndef _print_die_summary(die_obj):\n \"\"\"\n Prints out simple summary of the die.\n Name: (name)\n Potential Values:\n (values in table format)\n \"\"\"\n print(\"Name:\\t{}\".format(die_obj.name))\n _print_die_values(die_obj)\n\n\ndef _print_table(values, columns=5, padding=4):\n \"\"\"\n Prints a list of values in table format.\n\n :param values: values to print.\n :type values: list\n :param columns: number of columns. Defaults to 5.\n :type columns: int\n :param padding: padding for each column (padded on the left)\n :type padding: int\n \"\"\"\n val_len = len(values)\n\n # width is padding + length of the longest value.\n i_width = padding + len(str(val_len))\n v_width = padding + max([len(str(val)) for val in values])\n\n # printing the values in rows\n for row in range(int(math.ceil(val_len/columns))):\n num_index = row + columns\n num_rolls = values[row:num_index]\n print(' '.join(\"{:{i_width}}. {:{v_width}}\\t\"\n .format(columns * row + roll_index + 1, num_rolls[roll_index], i_width=i_width, v_width=v_width)\n for roll_index in range(len(num_rolls))))\n\n\ndef _print_rolls(die_obj, rolls_list, other_info=None):\n \"\"\"\n Prints the dice rolls with additional information.\n\n :param die_obj: the die object that was rolled.\n :type die_obj: Die\n :param rolls_list: list of the rolls.\n :type rolls_list: list\n :param other_info: additional info to display (eg the mathematical calculations returned by NumericalDie).\n if not specified, nothing is printed for this.\n :type other_info: list or tuple\n \"\"\"\n\n # the main info\n print()\n _print_die_values(die_obj)\n _print_table(rolls_list)\n\n # the mathematical info\n other_labels = [\"sum\", \"avg\", \"max\", \"min\"]\n if other_info is not None:\n print(\"\\nsimple results:\\n\", \"\\n\".join(\"\\t{}:\\t{}\"\n .format(other_labels[i], other_info[i]) for i in range(len(other_info))))\n\n # the frequency at which each value was rolled\n print(\"\\nfrequency of each value:\")\n print(\"\\n\".join(\"\\t{}:\\t{}\".format(val, rolls_list.count(val)) for val in die_obj.get_values()))\n\n _print_menu_die()\n\n\n# below are the \"menu methods\" used to help users navigate and perform what they want.\n\ndef _menu_input(input_min, input_max):\n \"\"\"\n Simple method used to request a numerical input or the back command which takes them back to the previous menu.\n\n :param input_min: the minimum integer that can be accepted.\n :type input_min: int\n :param input_max: the maximum integer that can be accepted.\n :type input_max: int\n\n :return: the numerical input or \"back\"\n :rtype: int or str\n \"\"\"\n input_error = \"Integer Input Error:\"\n input_val = input()\n try:\n input_int = int(input_val)\n except ValueError:\n if input_val.lower() == _back:\n return _back\n else:\n if input_min <= input_int <= input_max:\n return input_int\n print(input_error, \"invalid input.\")\n return _menu_input(input_min, input_max)\n\n\ndef _print_menu(*options, back_string=\"previous menu\"):\n \"\"\"\n Simple function used to print a basic menu.\n\n :param options: tuple arguments that contain the menu options.\n :type options: tuple of (str, any) where str is what will be displayed to the user as the option and any is what\n will be returned if the option is chosen.\n :param back_string: option printed for the \"back command\". If nothing is specified, \"previous menu\" will be used.\n :type back_string: str\n\n :return: the option chosen.\n :rtype: any (what is contained in the option tuple)\n \"\"\"\n option_format = \"{:<8}{}\"\n options_size = len(options)\n\n # prints heading: \"INPUT OPTION\"\n print(option_format.format(\"INPUT\", \"OPTION\") + \"\\n\")\n\n # prints the options for the menu\n print(\"\\n\".join(option_format.format(i + 1, options[i][0]) for i in range(options_size)))\n print(option_format.format(_back, back_string))\n print(\"\\n\")\n\n menu_option = _menu_input(1, options_size)\n return None if menu_option == _back else options[menu_option - 1][1]\n\n\ndef _print_menu_main():\n \"\"\"\"\n The first menu: choose die or exit.\n \"\"\"\n print(\"\\nDice Roll Simulator\")\n main_menu_input = _print_menu((\"choose die\", _print_menu_die), back_string=\"exit app\")\n\n if main_menu_input is None:\n return\n\n return main_menu_input()\n\n\ndef _print_menu_die():\n \"\"\"\n Second menu: choose die or create custom die.\n \"\"\"\n print(\"\\n\")\n print(\"Please choose of the following dice:\")\n selected_die = _print_menu(*[(dice.name, dice) for dice in _all_dice], (\"create custom die\", create_custom_die))\n if selected_die is None:\n return _print_menu_main()\n elif isinstance(selected_die, Die):\n return _print_menu_roll(selected_die)\n else:\n create_custom_die()\n return _print_menu_die()\n\n\ndef create_custom_die():\n \"\"\"simple menu used to create a custom die and write to file.\"\"\"\n custom_die = custom_numerical_die()\n _print_die_summary(custom_die)\n while True:\n verify_die = input(\"\\n\\ncontinue with these values? y or n\\n\").lower()\n if verify_die == \"n\":\n return create_custom_die()\n elif verify_die == \"y\":\n break\n print(\"Please enter y or n\")\n with open(_file_path, \"ab\") as dice_file:\n pickle.dump(custom_die, dice_file)\n _all_dice.append(custom_die)\n\n\ndef custom_numerical_die():\n \"\"\"\n ask user to input values and create the custom die.\n\n :return: the custom die the user specified.\n :rtype: NumericalDie\n \"\"\"\n print(\"Please enter a list of numerical values separated by commas (spaces ok).\")\n print(\"Example 1: '1,2,3,4,5,6,7' --> [1,2,3,4,5,6,7]\")\n print(\"Example 2: '1, 2 , 3, 4, 5, 6, 7' --> [1,2,3,4,5,6,7]\\n\")\n values_input = input().replace(\" \", \"\").split(',')\n num_values = []\n invalid_values = []\n for val in values_input:\n try:\n num_values.append(int(val))\n except ValueError:\n invalid_values.append(val)\n if len(num_values) > 0:\n print(\"Values added to die: \")\n _print_table(num_values)\n else:\n print(\"No values was added to die.\\nTry again\")\n return custom_numerical_die()\n if len(invalid_values) > 0:\n print(\"Invalid values that failed to add: \")\n _print_table(invalid_values)\n else:\n print(\"All values was added properly\")\n name = input(\"Please enter a name for the die:\\n\")\n return NumericalDie(num_values, name)\n\n\ndef _print_menu_roll(current_die):\n \"\"\"\n User specifies the number of rolls to roll. If successful, the rolls are printed and they are brought back to\n the _print_menu_die().\n\n :param current_die: the die used to roll.\n :type current_die: Die\n \"\"\"\n _print_die_values(current_die)\n\n while True:\n print(\"how many times? ({} to {} times)\\n\"\n \"enter {} to go back\".format(1, sys.maxsize, _back))\n\n num_input = _menu_input(1, sys.maxsize)\n try:\n num_rolls = int(num_input)\n break\n except TypeError:\n if num_input == _back:\n return _print_menu_die()\n print(\"Invalid value.\")\n\n rolls = current_die.roll_multiple(num_rolls)\n if rolls is None:\n print(\"sorry something unexpected happened with the rolls.\\n\"\n \"please try again\\n\")\n return _print_menu_roll(current_die)\n return _print_rolls(current_die, rolls[0], rolls[1])\n\n\n# initialization methods\n\ndef run():\n \"\"\"used to run the program from start to finish\"\"\"\n _create_user_dir_file()\n _load_saved_dice()\n _print_menu_main()\n print(\"closing app...\")\n\n\ndef _create_user_dir_file():\n \"\"\"creates the directory for storing the custom dice if doesn't already exist.\"\"\"\n if not path.isfile(_file_path):\n try:\n open(_file_path, 'r')\n except IOError:\n open(_file_path, 'w')\n\n if not path.exists(_user_dir):\n makedirs(_user_dir)\n\n\ndef _load_saved_dice():\n \"\"\"loads the custom die that have been saved and appends them to all_dice\"\"\"\n with open(_file_path, \"rb\") as dice_file:\n while True:\n try:\n _all_dice.append(pickle.load(dice_file))\n except EOFError:\n break\n\n\nif __name__ == \"__main__\":\n run()\n" } ]
1
JohnWinkeler/LaxLinkProto
https://github.com/JohnWinkeler/LaxLinkProto
3fee40af08614cf7457ccd1bea9bc3088cef71db
3a34c0e6c907b6b6bfb9a253216640f0ac89b987
47568272f6d4c0d7420ddd8a5631484c01de7c21
refs/heads/master
2020-03-31T06:06:20.853260
2018-10-07T17:39:53
2018-10-07T17:39:53
151,968,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 10.333333015441895, "blob_id": "55a2ce8db54fad8119b1391dc0e5486b0346bed9", "content_id": "6d817b12d236a8cb06ee300f69855e6a78494e7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/LaxLinkProto/LaxLinkProto/__init__.py", "repo_name": "JohnWinkeler/LaxLinkProto", "src_encoding": "UTF-8", "text": "\"\"\"\nPackage for LaxLinkProto.\n\"\"\"\n" } ]
1
eekhait/1008_Project
https://github.com/eekhait/1008_Project
3a4cc3a3dcec866cbb088d493d7e71cb8a235ced
55b161d33e2ddd41889a31666dffcf427ac421f6
d336d75e3e4443ddce3d909c31dc0f40b2415bf6
refs/heads/master
2022-04-18T01:39:53.902233
2020-04-03T12:45:31
2020-04-03T12:45:31
244,609,796
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5688451528549194, "alphanum_fraction": 0.5842322111129761, "avg_line_length": 42.77777862548828, "blob_id": "6448e485c4fcdbbaf2d1225e1d358defac085294", "content_id": "2621f67eeb107180d66ed8257df35c109589baf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6304, "license_type": "no_license", "max_line_length": 137, "num_lines": 144, "path": "/lrt_adj.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport csv\nimport math\n\nimport main_graph as m_graph\n\nlrtData = pd.read_csv('Punggol_LRT_Routing.csv', sep=',', header=None)\n\n\ndef round_up(n, decimals=0):\n # this is to round up even is 0.1 above\n multiplier = 10 ** decimals\n return math.ceil(n * multiplier) / multiplier\n\n\ndef bfs_route(graph, start, end):\n # maintain a queue of paths\n queue = []\n # push the first path into the queue\n queue.append([start])\n while queue:\n # get the first path from the queue\n path = queue.pop(0)\n # get the last node from the path\n node = path[-1]\n # path found\n if node == end:\n return path\n # enumerate all adjacent nodes, construct a new path and push it into the queue\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)\n\n\ndef cal_distance(adj_list_val, result):\n distance = 0\n # e.g. PE1, PE2, PE3\n for i in range(len(result) - 1): # number of time to run\n for y in range(len(adj_list_val[result[i]])): # e.g. adj_list_val['PE1'] return number of list value\n if result[i + 1] in adj_list_val[result[i]][\n y]: # e.g. check if PE2 is in adj_list_val['PE1'][0] or adj_list_val['PE1'][1] LISTING\n distance += int(adj_list_val[result[i]][y][result[\n i + 1]]) # e.g. adj_list_val['PE1'][0][['PE2'] will return the distance weightage\n return distance\n\n\ndef take_lrt(start_node, end_node):\n start_node = str(start_node) # Store the start name\n end_node = str(end_node) # Store the end name\n walk_start_node = [] # Store the array from the TAKE_WALK Function FROM START POINT TO LRT\n walk_end_node = [] # Store the array from the TAKE_WALK Function FROM LRT TO END POINT\n lrt_name = [] # Store the LRT NAME\n lrt_code = [] # Store the LRT CODE\n adj_list = {} # Store the Adj list\n adj_list_val = {} # Store the Adj list with value\n with open('Punggol_LRT_Routing.csv', 'r') as csv_file:\n reader = csv.reader(csv_file)\n first = True\n for row in reader:\n if (first == True):\n for i in range(len(row)):\n first = False\n else:\n # for i in range(0, len(row)):\n # # key_value = {row[0]: row[2].split()} # This is to create the Adj\n lrt_name.append(row[1]) # Append the LRT NAME into the lrt_name\n lrt_code.append(row[0]) # Append the LRT CODE into the lrt_code\n keys = row[2].split(\", \")\n values = row[3].split(\", \")\n add_value = []\n for i in range(len(keys)):\n add_value.append({keys[i]: values[i]}) # Create a list of dict e.g. 'PE1' : 1010\n adj_list_val[row[0]] = add_value # Append the linked code into the list\n adj_list[row[0]] = row[2].split(\", \") # Append the linked code into the list\n\n # Check if start node is mrt or blocks\n if start_node in lrt_name:\n # Convert the LRT NAME INTO LRT CODE\n for i in range(len(adj_list)):\n if lrt_name[i] == start_node:\n start_node = lrt_code[i] # Convert start_node Into LRT CODE\n break\n else:\n temp_string_start_node = start_node # Store the postal code\n start_node = m_graph.get_nearest_lrt(start_node) # To Store the nearest LRT station with the postal code\n walk_start_node = m_graph.take_walk(temp_string_start_node, start_node) # Store the walking node from Start of Postal Code to LRT\n\n if end_node in lrt_name:\n for i in range(len(adj_list)):\n if lrt_name[i] == end_node:\n end_node = lrt_code[i] # Convert end_noce Into LRT CODE\n break\n else:\n temp_string_end_node = end_node # Store the postal code\n end_node = m_graph.get_nearest_lrt(end_node) # To Store the nearest LRT station with the postal code\n walk_end_node = m_graph.take_walk(end_node, temp_string_end_node) # Store the walking node from LRT To the End of Postal code\n\n\n\n # if start and end are connected\n if m_graph.is_adjacent_lrt(adj_list, start_node, end_node):\n result = [start_node, end_node]\n\n # average SG MRT 45km/h == 12.5m/s\n # Calculate the timing Second in minutes,\n distance = cal_distance(adj_list_val, result)\n timing = round_up((distance / 12.5) / 60)\n\n # Check if there any array\n if len(walk_start_node) != 0:\n del result[0] # To delete the first array as is duplicated\n result = walk_start_node[1] + result # Combine the Walking array with result (LRT)\n timing = walk_start_node[0] + timing # Combine the Time required\n if len(walk_end_node) != 0:\n del result[-1] # To delete the last array as is duplicated\n result = result + walk_end_node[1] # Combine the result (LRT) with Walking array\n return [int(timing), result]\n else:\n result = (bfs_route(adj_list, start_node, end_node))\n\n # average SG MRT 45km/h == 12.5m/s\n # Calculate the timing Second in minutes,\n distance = cal_distance(adj_list_val, result)\n timing = round_up((distance / 12.5) / 60)\n # average timing stop at each mrt is 30second == 0.5\n mrt_stopping = 0.5 * int(len(result) - 1)\n # Calculate the timing Second in minutes,\n timing = round_up((distance / 12.5) / 60) + mrt_stopping\n # Add another 5 min flat waiting for the train to arrival\n timing = timing + 5\n\n if len(walk_start_node) != 0:\n del result[0] # To delete the first array as is duplicated\n result = walk_start_node[1] + result # Combine the Walking array with result (LRT)\n timing = walk_start_node[0] + timing # Combine the Time required\n if len(walk_end_node) != 0:\n del result[-1] # To delete the last array as is duplicated\n result = result + walk_end_node[1] # Combine the result (LRT) with Walking array\n\n # print([int(timing), result])\n return [int(timing), result]\n\n# print(\"LRT ROUTE: \", take_lrt(\"828858\",\"65009\"))\n" }, { "alpha_fraction": 0.535467803478241, "alphanum_fraction": 0.5473470091819763, "avg_line_length": 33.5234375, "blob_id": "734a621e48078d64bb4301cd28ecb71513e7e2ce", "content_id": "d96a7c5f2e19546ca3844f59f17e0587283b975e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8839, "license_type": "no_license", "max_line_length": 107, "num_lines": 256, "path": "/main_graph.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n\n# THIS PART CONCERNS WITH:\n# THE CSV FILES AND EXTRACTING DATA FROM THEM\n# --------------------------------- #\n# Indexes of the Complete_Punggol_Graph.csv file:\n# Columns: 0-Code, 1-Name, 2-Type, 3-Latitude, 4-Longitude, 5-Buses, 6-ConnectedWalks, 7-ConnectedDistances\n# Columns: 8-1197 to refer to nodes (+7 difference from corresponding node in rows)\n# Rows: 1-1190 to refer to nodes\n# --------------------------------- #\n# How to use pandas dataframe (treat it as a 2D array/excel file):\n# mainGraph.at[row,column]\n# --------------------------------- #\nmainGraph = pd.read_csv('Complete_Punggol_Graph.csv', sep=',', header=None)\nmainGraph[0] = mainGraph[0].apply(str) # converts column to be string-only (rather than int+str)\nstartIndex = 1\nendIndex = len(mainGraph.index)\n# --------------------------------- #\n\n\ndef get_distance_to_from(point_a, point_b):\n index_a = get_node_index(point_a)+7\n index_b = get_node_index(point_b)\n return int(mainGraph.at[index_b, index_a])\n\n\ndef get_long_lat(target):\n index = get_node_index(target)\n return [round(float(mainGraph.at[index, 3]), 4), round(float(mainGraph.at[index, 4]), 4)]\n\n\ndef get_lat_long(target):\n index = get_node_index(target)\n return [round(float(mainGraph.at[index, 4]), 4), round(float(mainGraph.at[index, 3]), 4)]\n\n\ndef get_node_index(target):\n # Start location codes are from index 1 to 1190\n # print(type(target),target)\n target=str(target)\n low = startIndex\n high = endIndex\n mid = (startIndex+endIndex)//2\n while target != str(mainGraph.at[mid, 0]):\n if target < str(mainGraph.at[mid, 0]): # if target is in smaller half\n high = mid\n if mid == (low+high)//2:\n return -1\n mid = (low+high)//2\n elif target > str(mainGraph.at[mid, 0]): # if target is in larger half\n low = mid\n if mid == (low+high)//2:\n return -1\n mid = (low+high)//2\n return mid\n\n\ndef get_nearest_bus_stops(target, distance):\n pass\n\n\ndef get_nearest_lrt(target):\n if len(target) == 3:\n return target\n else:\n index = get_node_index(target)\n node = \"\"\n distance = 3000\n for i in range(endIndex+7-14, endIndex+7): # start and end of LRT columns in csv\n if int(mainGraph.at[index, i]) < distance:\n node = mainGraph.at[0, i]\n distance = int(mainGraph.at[index, i])\n return str(node)\n\n\ndef get_adjacent_walks(start_node):\n start_index = get_node_index(start_node)\n connected_nodes = mainGraph.at[start_index, 6].split(', ')\n return connected_nodes\n\n\ndef is_adjacent_walk(start_node, end_node):\n start_index = get_node_index(start_node)\n connected_nodes = mainGraph.at[start_index, 6].split(', ')\n if end_node in connected_nodes:\n return True\n else:\n return False\n\n\ndef is_adjacent_bus(start_node, end_node):\n pass\n\n\ndef is_adjacent_lrt(adj_list, start_node, end_node):\n # Check If Are Both LRT are directly connected!\n for i in adj_list:\n if start_node == i: # To check if able to found the KEY\n if end_node in adj_list[i]: # To check if both Start_Node & End_Node are directly connected\n return 1 # If Yes, return 1\n else:\n return 0 # If No, return 0\n\n\n# ----------------------------------\n# THIS PART CONCERNS WITH ALGORITHMS:\n# ----------------------------------\nclass AStarStack:\n def __init__(self):\n self.top = -1\n self.data = []\n self.total_distance = 0\n self.distance_to_end = 0\n\n def show_stack(self):\n print(\"start\")\n for i in self.data:\n print(i)\n print(\"end\")\n\n def push(self, node, val):\n self.top += 1\n self.data.append(node)\n if self.top > 0: # if there is at least two elements...\n self.total_distance += get_distance_to_from(self.data[self.top], self.data[self.top-1])\n self.distance_to_end = val\n\n # def pop(self):\n # if self.top > -1:\n # node = self.data[self.top]\n # if self.top > 0:\n # self.total_distance -= get_distance_to_from(self.data[self.top], self.data[self.top-1])\n # del self.data[self.top]\n # self.top -= 1\n # return node\n\n def is_empty(self):\n if self.top < 0:\n return True\n else:\n return False\n\n def peek(self):\n if not self.is_empty():\n return self.data[self.top]\n\n def peek_distance(self):\n if not self.is_empty():\n return self.total_distance\n\n def copy_from(self, a_stack):\n for x in a_stack.data:\n self.push(x, a_stack.distance_to_end)\n\n\nclass AStarQueue:\n def __init__(self):\n self.top = -1\n self.data = []\n self.distances_to_target = []\n\n def enqueue(self, node):\n temp = node.distance_to_end\n front = 1\n back = self.top\n mid = (front+back)//2\n if self.top > -1:\n # print(str(temp) + \" \" + str(self.distances_to_target[0]))\n if temp < self.distances_to_target[0]: # add to the front\n self.data.insert(0, node)\n self.distances_to_target.insert(0, temp)\n elif temp > self.distances_to_target[self.top]: # add to the back\n self.data.append(node)\n self.distances_to_target.append(temp)\n else:\n while temp != self.distances_to_target[mid] and front != mid:\n if temp < self.distances_to_target[mid]:\n back = mid\n mid = (front + back) // 2\n elif temp > self.distances_to_target[mid]:\n front = mid\n mid = (front + back) // 2\n # if temp == self.distances_to_target[mid]\n self.data.insert(mid, node)\n self.distances_to_target.insert(mid, temp)\n elif self.top < 0:\n self.data.append(node)\n self.distances_to_target.append(temp)\n self.top += 1\n # print(\"[\", end='')\n # for i in self.data:\n # print(str(i.distance_to_end) + \", \", end='')\n # print(\"]\")\n # print(str(self.distances_to_target))\n\n def dequeue(self):\n if self.top > -1:\n temp = self.data[0]\n del self.data[0]\n del self.distances_to_target[0]\n self.top -= 1\n return temp\n\n def is_empty(self):\n if self.top < 0:\n return True\n else:\n return False\n\n def peek(self):\n if not self.is_empty():\n return self.data[0]\n\n\ndef take_walk(start_node, end_node):\n start_node = str(start_node)\n end_node = str(end_node)\n # if start and end are connected\n if is_adjacent_walk(start_node, end_node):\n return [1, [start_node, end_node]]\n else: # this part begins like the word ladder\n # initialization of queue and first stack (of just start node)\n # also initialization of visited nodes\n star_queue = AStarQueue()\n star_stack = AStarStack()\n star_stack.push(start_node, get_distance_to_from(start_node, end_node))\n star_queue.enqueue(star_stack)\n visited_nodes = {}\n counter = 0\n # while end node is not reached\n while star_queue.data[0].peek() != end_node:\n # dequeue the first stack\n temp_stack = star_queue.dequeue()\n mid_node = temp_stack.peek()\n # add all adjacent nodes to mid_node in separate stacks\n # move stacks to queue\n for i in get_adjacent_walks(mid_node):\n # create new stack with each adjacent node\n temper_stack = AStarStack()\n temper_stack.copy_from(temp_stack)\n temper_stack.push(str(i), get_distance_to_from(str(i), end_node))\n # temper_stack.show_stack()\n # if node is visited before\n if i in visited_nodes:\n # only enqueue if new path/stack is shorter than old path\n if temper_stack.total_distance < visited_nodes[i]:\n star_queue.enqueue(temper_stack)\n visited_nodes[i] = temper_stack.total_distance\n # if node is new, enqueue normally\n elif i not in visited_nodes:\n # enqueue the stack\n star_queue.enqueue(temper_stack)\n visited_nodes[i] = temper_stack.total_distance\n # return assumes a walking speed of 5km/h. first element is time taken in minutes\n return [round(star_queue.data[0].total_distance/5000*60), star_queue.data[0].data]\n\n" }, { "alpha_fraction": 0.7825203537940979, "alphanum_fraction": 0.7876016497612, "avg_line_length": 32.89655303955078, "blob_id": "a9279f2eab0fb5749fedddfab8ee9bbd465939c5", "content_id": "465f2746e9c27b36e44542024b69717a2b977f55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 984, "license_type": "no_license", "max_line_length": 170, "num_lines": 29, "path": "/README.md", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "# 1008_Project\nPunggol Pathfinder\n\n\n*Introduction*\nAn application that lets users find a path (from a start and end node input) within Punggol. They will be able to select the main method of travel (Bus, LRT, or Walking).\n\nThe output includes strings printed by the console as well as a graph that shows the path the user has to take. \n\n\n*Installation*\nThe application has been developed with the following:\nPyCharm - https://www.jetbrains.com/pycharm/download/#section=windows\nGit - https://git-scm.com/download \nAnaconda - https://www.anaconda.com/distribution/#download-section\n\nThe following line of pip install is also required:\npip install folium PyQt5 PyQtWebEngine\n\n\n*How to Use*\nRun map.py and follow the instructions displayed. \nIn order, the console will ask for:\nStart location\nEnd location\nLocation confirmation\nMethod of travel\n\nThereafter, an estimated time of travel is shown together with a list of nodes in the path, and the method of connection between the nodes. \n" }, { "alpha_fraction": 0.5936199426651001, "alphanum_fraction": 0.6393897533416748, "avg_line_length": 40.20000076293945, "blob_id": "5ed6b9f0026b640978f9f27e464ff7dee2149f2a", "content_id": "000a43277cfd3b899256979bb65da46950dd452c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1442, "license_type": "no_license", "max_line_length": 107, "num_lines": 35, "path": "/oldfiles/DistanceTabulator.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom geopy.distance import geodesic\n\n# Take note:\n# Latitude is vertical (How 'north-south' a place is)\n# Longitude is horizontal (How 'east-west' a place is)\ndistance_table = pd.read_csv(\"Punggol Coordinates.csv\", header=None)\n\n# to mimic cell selection in pandas dataframe, for iteration\n# iloc[horizontal, vertical]\n# iloc[1123, 1119] <--- most bottom right value\n# 0,0 0,1 0,2 0,3 0,4...\n# postal_code Street Latitude Longitude 820136...\n\n# FOR EVERY ROW... (Skips header row)\nfor i in range(1, 1191): # 1191\n # FOR EVERY COLUMN... (Skips first 4 already-populated columns)\n nearbyNodes = []\n nearbyDistances = []\n for j in range(7, 1197):\n # Assign distance between nodes in meters\n distance = 1000 * round(geodesic((distance_table.iloc[i, 2], distance_table.iloc[i, 3]),\n (distance_table.iloc[j-6, 2], distance_table.iloc[j-6, 3])).km, 3)\n distance_table.iloc[i, j] = distance\n if 0 < distance < 180:\n nearbyNodes.append(str(distance_table.iloc[j-6, 0]))\n nearbyDistances.append(int(distance))\n distance_table.iloc[i, 5] = str(nearbyNodes)\n distance_table.iloc[i, 6] = str(nearbyDistances)\n\n # Prints progress of population per row\n print(round(i / 1191 * 100, 2))\n\n# Create new csv\ndistance_table.to_csv('Complete_Punggol_Graph.csv', header=False, index=False)\n" }, { "alpha_fraction": 0.501806378364563, "alphanum_fraction": 0.5308887362480164, "avg_line_length": 36.4054069519043, "blob_id": "a93d902b55395015fdedbf4179e38abe46e389aa", "content_id": "f4e62748caa9e7020e1db37b271a7b89aaa78b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5536, "license_type": "no_license", "max_line_length": 223, "num_lines": 148, "path": "/oldfiles/LRT_Algorithm.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "from collections import deque\nimport csv\n\n\nclass Graph:\n def __init__(self, lists):\n self.lists = lists\n\n def get_neighbours(self, i):\n return self.lists[i]\n\n import csv\n cove_lrt, meridian_lrt, coraledge_lrt, riviera_lrt, kadaloor_lrt, oasis_lrt, damai_lrt, punggol_lrt, samkee_lrt, tecklee_lrt, punggolpoint_lrt, samudera_lrt, nibong_lrt, sumang_lrt, sooteck_lrt = ([] for i in range(15))\n\n with open('LRT.csv') as file:\n lrt = list(csv.reader(file))\n cove_lrt.append(lrt[1])\n meridian_lrt.append(lrt[2])\n coraledge_lrt.append(lrt[3])\n riviera_lrt.append(lrt[4])\n kadaloor_lrt.append(lrt[5])\n oasis_lrt.append(lrt[6])\n damai_lrt.append(lrt[7])\n punggol_lrt.append(lrt[8])\n samkee_lrt.append(lrt[9])\n tecklee_lrt.append(lrt[10])\n punggolpoint_lrt.append(lrt[11])\n samudera_lrt.append(lrt[12])\n nibong_lrt.append(lrt[13])\n sumang_lrt.append(lrt[14])\n sooteck_lrt.append(lrt[15])\n\n #heuristic function for all nodes\n def heuristic(self, n):\n Heuristic = {\n 'Punggol_MRT': 1,\n 'SamKee_LRT': 1,\n 'SooTeck_LRT': 1,\n 'PunggolPoint_LRT': 1,\n 'Samudera_LRT': 1,\n 'Sumang_LRT': 1,\n 'Nibong_LRT': 1,\n 'Damai_LRT': 1,\n 'Kadaloor_LRT': 1,\n 'Riviera_LRT': 1,\n 'CoralEdge_LRT': 1,\n 'Meridian_LRT': 1,\n 'Oasis_LRT': 1,\n 'Cove_LRT': 1,\n }\n return Heuristic[n]\n\n def astar_algorithm(self, start_node, stop_node):\n # open_list is a list of nodes which have been visited, but who's neighbors\n # haven't all been inspected, starts off with the start node\n # closed_list is a list of nodes which have been visited\n # and who's neighbors have been inspected\n open_list = set([start_node])\n closed_list = set([])\n\n # cdist contains current distances from start_node to all other nodes\n # the default value (if it's not found in the map) is +infinity\n cdist = {}\n\n cdist[start_node] = 0\n\n # parents contains an adjacency map of all nodes\n parents = {}\n parents[start_node] = start_node\n\n while len(open_list) > 0:\n n = None\n\n # find a node with the lowest value of f() - evaluation function\n for i in open_list:\n if n == None or cdist[i] + self.heuristic(i) < cdist[n] + self.heuristic(n):\n n = i;\n\n if n == None:\n print('Path does not exist!')\n return None\n\n # if the current node is the stop_node\n # then we begin reconstructing the path from it to the start_node\n if n == stop_node:\n reconst_path = []\n\n while parents[n] != n:\n reconst_path.append(n)\n n = parents[n]\n\n reconst_path.append(start_node)\n\n reconst_path.reverse()\n print('Path found: {}'.format(reconst_path))\n return reconst_path\n\n # for all neighbors of the current node do\n for (m, weight) in self.get_neighbours(n):\n # if the current node isn't in both open_list and closed_list\n # add it to open_list and note n as it's parent\n if m not in open_list and m not in closed_list:\n open_list.add(m)\n parents[m] = n\n cdist[m] = cdist[n] + weight\n\n # otherwise, check if it's quicker to first visit n, then m\n # and if it is, update parent data and g data\n # and if the node was in the closed_list, move it to open_list\n else:\n if cdist[m] > cdist[n] + weight:\n cdist[m] = cdist[n] + weight\n parents[m] = n\n\n if m in closed_list:\n closed_list.remove(m)\n open_list.add(m)\n\n # remove n from the open_list, and add it to closed_list\n # because all of his neighbors were inspected\n open_list.remove(n)\n closed_list.add(n)\n\n print('Path does not exist!')\n return None\n\nlists = {\n # LRT on Punggol East\n 'Punggol_MRT': [('SamKee_LRT', 0.589), ('SooTeck_LRT', 0.605), ('Damai_LRT', 0.690), ('Cove_LRT', 0.763)],\n 'SamKee_LRT': [('Punggol_MRT', 0.589), ('PunggolPoint_LRT', 0.815)],\n 'PunggolPoint_LRT': [('SamKee_LRT', 0.815), ('Samudera_LRT', 0.513)],\n 'Samudera_LRT': [('PunggolPoint_LRT', 0.513), ('Nibong_LRT', 0.493)],\n 'Nibong_LRT': [('Samudera_LRT', 0.493), ('Sumang_LRT', 0.429)],\n 'Sumang_LRT': [('Nibong_LRT', 0.429), ('SooTeck_LRT', 0.478)],\n 'SooTeck_LRT': [('Sumang_LRT', 0.478), ('Punggol_MRT', 0.605)],\n\n # LRT on Punggol West\n 'Damai_LRT': [('Punggol_MRT', 0.690), ('Oasis_LRT', 0.563)],\n 'Oasis_LRT': [('Damai_LRT', 0.563), ('Kadaloor_LRT', 0.515)],\n 'Kadaloor_LRT': [('Oasis_LRT', 0.515), ('Riviera_LRT', 0.558)],\n 'Riviera_LRT': [('Kadaloor_LRT', 0.558), ('CoralEdge_LRT', 0.386)],\n 'CoralEdge_LRT': [('Riviera_LRT', 0.386), ('Meridian_LRT', 0.530)],\n 'Meridian_LRT': [('CoralEdge_LRT', 0.530), ('Cove_LRT', 0.443)],\n 'Cove_LRT': [('Meridian_LRT', 0.443), ('Punggol_MRT', 0.763)],\n}\n\ngraph1 = Graph(lists)\ngraph1.astar_algorithm('Samudera_LRT', 'Riviera_LRT')\n" }, { "alpha_fraction": 0.5070756077766418, "alphanum_fraction": 0.5182813405990601, "avg_line_length": 46.324241638183594, "blob_id": "7f79b81551c8f3b2dd70426d2d508f5d62150620", "content_id": "e556747388939d07b81a3c4d3f4b171f78cf81d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15617, "license_type": "no_license", "max_line_length": 227, "num_lines": 330, "path": "/map.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "import folium\nimport io\nimport sys\nimport main_graph as m_graph\nimport bus_adj as bus_graph\nimport lrt_adj as lrt_graph\nimport pandas as pd\nfrom PyQt5 import QtWidgets, QtWebEngineWidgets\nimport csv\n\n# ----------------------------------\n# THIS PART CONCERNS WITH UI\n# EVERYTHING COMES TOGETHER HERE\n# ----------------------------------\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n # Create map object, set default location, map theme & zoom\n m = folium.Map(location=[1.4046357, 103.9090000], zoom_start=14.5, prefer_canvas=True)\n # Global tooltip, hover info\n tooltip = 'Click For More Info'\n\n # ASKS FOR INPUT/OUTPUT HERE, EVERYTHING TAKEN IN AS STRING (Irvyn)\n def check(start):\n with open('Complete_Punggol_Graph.csv', 'rt') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n if start == row[0] or start == row[1]:\n location.append(row[0])\n name.append(row[1])\n return location, name\n\n def confirmation(msg):\n while True:\n answer = input(msg).upper()\n if answer in ('Y', 'N'):\n return answer\n else:\n print('Not a valid input, please try again')\n\n def transportation(tp):\n while True:\n mode = input(tp).upper()\n if mode in ('L', 'B', 'W', 'M'):\n return mode\n else:\n print('Not a valid input, please try again')\n\n def show_walks():\n # Used to create a file to show graph of connectivity\n # This one is just nodes that are of walkable distance\n marked = []\n for i in range(1, len(m_graph.mainGraph[0])):\n for j in list(m_graph.mainGraph.at[i, 6].split(\", \")):\n # print(m_graph.mainGraph[0][i], j)\n if [m_graph.mainGraph[0][i], j] not in marked and [j, m_graph.mainGraph[0][i]] not in marked:\n coords_to_add = [m_graph.get_long_lat(m_graph.mainGraph[0][i]), m_graph.get_long_lat(j)]\n # print(coords_to_add)\n marked.append(coords_to_add)\n folium.PolyLine(coords_to_add, color=\"grey\", opacity=0.5, weight=0.5).add_to(m)\n m.save(\"walks.html\")\n\n def show_lrts():\n # Used to create a file to show graph of connectivity\n # This one is just the LRTs, and what blocks are 'connected' to them\n marked = []\n # Buildings and their closest LRTs\n for i in range(1, len(m_graph.mainGraph[0])-14):\n closest_lrt = m_graph.get_nearest_lrt(m_graph.mainGraph[0][i])\n if [m_graph.mainGraph[0][i], closest_lrt] not in marked and [closest_lrt, m_graph.mainGraph[0][i]] not in marked:\n coords_to_add = [m_graph.get_long_lat(m_graph.mainGraph[0][i]), m_graph.get_long_lat(closest_lrt)]\n marked.append(coords_to_add)\n folium.PolyLine(coords_to_add, color=\"grey\", opacity=1, weight=1).add_to(m)\n # Markers for LRTs\n marked = []\n marked2 = []\n with open('Punggol_LRT_Routing.csv', 'rt') as lrt:\n reader = csv.reader(lrt, delimiter=',')\n next(reader)\n for row in reader:\n for i in row[2].split(\", \"): # connected nodes\n if [row, i] not in marked and [i, row] not in marked:\n # print(row[0], i)\n coords_to_add = [m_graph.get_long_lat(row[0]), m_graph.get_long_lat(i)]\n marked.append(coords_to_add)\n folium.PolyLine(coords_to_add, color=\"purple\").add_to(m)\n if coords_to_add not in marked2:\n folium.Marker(coords_to_add[0],\n icon=folium.Icon(color=\"purple\", icon=\"train\", prefix='fa'),\n popup=i, tooltip=row[0]).add_to(m)\n marked2.append(coords_to_add[0])\n # Edges between LRTs\n marked = []\n m.save(\"lrts.html\")\n\n def show_buses():\n b_graph = pd.read_csv('Punggol_Bus_Routing_Type2.csv', sep=',')\n b_graph[\"ServiceNo\"] = b_graph[\"ServiceNo\"].apply(str) # converts column to be string-only (rather than int+str)\n b_graph[\"NextStop\"] = b_graph[\"NextStop\"].apply(str) # converts column to be string-only (rather than int+str)\n marked = []\n for i in range(0,len(b_graph[\"ServiceNo\"])):\n longlats = [m_graph.get_long_lat(b_graph.at[i, \"BusStopCode\"]), m_graph.get_long_lat(b_graph.at[i, \"NextStop\"])]\n # add marker (latlong)\n if b_graph.at[i, \"BusStopCode\"] not in marked:\n folium.Marker(m_graph.get_long_lat(b_graph.at[i, \"BusStopCode\"]),\n icon=folium.Icon(color=\"green\", icon=\"bus\", prefix='fa'),popup=\"\",tooltip=\"\").add_to(m)\n marked.append(i)\n # add edge (longlat)\n folium.PolyLine(longlats, color=\"green\", weight=2, opacity=0.75).add_to(m)\n m.save(\"buses.html\")\n\n\n # show_walks()\n # show_lrts()\n # show_buses()\n print(\"\\nWelcome to Punggol Pathfinder\")\n print(\"Valid inputs are: \\033[1m Postal codes, bus stop numbers, train station names, train station codes. \\033[0m\")\n\n while True:\n name = []\n # User start and end code will be stored in here\n location = []\n # User choosen mode will stored in here\n mode = []\n result_path = []\n # Prompt user for start and destination point\n start = input(\"\\nWhere are you coming from?\\n\")\n end = input(\"Where is your destination?\\n\")\n check(start)\n check(end)\n\n # Calls function to check if input is valid by comparing with CSV\n if len(location) != 2:\n print(\"Location not valid, please try again\\n\")\n continue\n else:\n sp = name[0]\n ep = name[1]\n\n if sp:\n print(\"Start location: \", sp)\n else:\n print(\"Start location: \", start)\n if ep:\n print(\"Destination: \", ep)\n else:\n print(\"Destination: \", end)\n\n answer = confirmation(\"\\nConfirm start location and destination? [Y/N] \\n\")\n if answer == 'N':\n print(\"Let\\'s try again\")\n elif answer == 'Y':\n mode = transportation(\"Select mode of transport: LRT (L), Bus (B), Walk (W), or Mixed (M)\\n\")\n if mode == 'L':\n # Call Lrt algorithm here\n result_path = lrt_graph.take_lrt(location[0], location[1])\n\n print(\"Time taken:\", result_path[0], \"mins\")\n print(\"Take LRT from\")\n for i in range(0, len(result_path[1])):\n print(result_path[1][i])\n if len(result_path[1]) - 1 != i:\n print(\"to\")\n elif mode == 'B':\n # Call Bus algorithm here\n result_path = bus_graph.route_finder(location[0], location[1])\n print(\"Time taken:\", result_path[0], \"mins\")\n print(\"From\")\n for i in range(0, len(result_path[1])):\n print(result_path[1][i])\n if (result_path[2][i]) == True:\n print(\"Take bus\", result_path[3], \"to \")\n else:\n if len(result_path[1]) - 1 != i:\n print(\"Walk to\")\n elif mode == 'W':\n # Call Walk algorithm here\n result_path = m_graph.take_walk(location[0], location[1])\n\n print(\"Time taken:\", result_path[0], \"mins\")\n print(\"Walk from\")\n for i in range(0, len(result_path[1])):\n print(result_path[1][i])\n if len(result_path[1]) - 1 != i:\n print(\"to\")\n elif mode == 'M':\n # Call Mixed algorithm here\n print(\"Option not implemented. Please try again with a different options\")\n sys.exit()\n break\n\n\n # (khai)\n # THIS PART IS WHERE THE MAP GETS POPULATED WITH NODES AND EDGES ---------------------------------------------\n\n # Adding of markers and edges for Single Transport Routes\n def singleTransportPlot(paths, markerColor, lineColor, markerIcon):\n marker_coords = []\n edge_coords = []\n for i in paths:\n # this loop creates a list of coordinates to add markers/nodes with\n marker_coords.append(m_graph.get_lat_long(i))\n edge_coords.append(m_graph.get_long_lat(i))\n\n for i in range(0, len(marker_coords)):\n folium.Marker([marker_coords[i][1], marker_coords[i][0]],\n icon=folium.Icon(color=markerColor, icon=markerIcon, prefix='fa'), popup=i,\n tooltip=result_path[1][i]).add_to(m)\n folium.PolyLine(edge_coords, color=lineColor).add_to(m)\n\n # Set icon for different transportation types\n def iconMaker(length):\n if length == 3:\n return \"train\"\n elif (length == 5):\n return \"bus\"\n elif length == 6:\n return \"building\"\n\n # Set color based on transportation type\n def setColor(length):\n if length == 3:\n return \"purple\"\n elif (length == 5):\n return \"green\"\n elif length == 6:\n return \"gray\" \n\n\n # Set route based on different transport\n def routePlotting(MOT, paths):\n changes_Indicator = 0\n if (MOT == \"L\"):\n marker_coords = []\n edge_coords = []\n\n for i in range(0, len(paths[1])):\n marker_coords.append(m_graph.get_lat_long(paths[1][i]))\n\n current_node = paths[1][i]\n if i+1 < len(paths[1]):\n next_node = paths[1][i+1]\n \n edge_coords.append(m_graph.get_long_lat(current_node))\n\n if len(current_node) == 3 and len(next_node) == 3:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=\"purple\").add_to(m)\n edge_coords = []\n else:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=\"grey\").add_to(m)\n edge_coords = [] \n elif (MOT == \"B\"):\n marker_coords = []\n edge_coords = []\n\n for i in range(0, len(paths[1])):\n marker_coords.append(m_graph.get_lat_long(paths[1][i]))\n\n current_node = paths[1][i]\n if i+1 < len(paths[1]):\n next_node = paths[1][i+1]\n \n edge_coords.append(m_graph.get_long_lat(current_node))\n\n if len(current_node) == 5 and len(next_node) == 5:\n if paths[2][i] == True:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=\"green\").add_to(m)\n edge_coords = []\n else:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=\"grey\").add_to(m)\n edge_coords = []\n else:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=\"grey\").add_to(m)\n edge_coords = [] \n elif (MOT == \"W\"):\n singleTransportPlot(paths[1], \"gray\", \"grey\", \"building\")\n elif (MOT == \"M\"):\n marker_coords = []\n edge_coords = []\n changes_Indicator = 0\n\n for i in range(0, len(paths[1])):\n marker_coords.append(m_graph.get_lat_long(paths[1][i]))\n\n current_node = paths[1][i]\n if i+1 < len(paths[1]):\n next_node = paths[1][i+1]\n \n edge_coords.append(m_graph.get_long_lat(current_node))\n\n if len(current_node) == len(next_node):\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=\"darkred\",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=setColor(len(current_node))).add_to(m)\n edge_coords = []\n elif len(current_node) != len(next_node):\n if changes_Indicator == 1:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=\"darkred\",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=setColor(len(next_node))).add_to(m)\n edge_coords = []\n changes_Indicator -= 1\n else:\n folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=\"darkred\",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)\n edge_coords.append(m_graph.get_long_lat(next_node))\n folium.PolyLine(edge_coords, color=setColor(len(current_node))).add_to(m)\n edge_coords = []\n changes_Indicator +=1\n\n # Call Set routes and pass in mode of transport and routes\n # Sample Input: [Coming From: PE1], [Coming From: ]\n routePlotting(mode, result_path)\n\n # Initialization of the map\n data = io.BytesIO() # creates a temporary 'container' for html code\n m.save(data, close_file=False) # folium html code is saved inside data variable\n w = QtWebEngineWidgets.QWebEngineView() # then the rest of the code is the map running\n w.setHtml(data.getvalue().decode())\n w.resize(840, 680)\n w.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5475651025772095, "alphanum_fraction": 0.5736126899719238, "avg_line_length": 28.797468185424805, "blob_id": "1c9d9bbdec453ad02f5394d547e6cc2088b5334d", "content_id": "993bb8dcfa5015924c599b63ce1dc89a5179756f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7064, "license_type": "no_license", "max_line_length": 96, "num_lines": 237, "path": "/bus_adj.py", "repo_name": "eekhait/1008_Project", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport main_graph as m_graph\nimport csv\nimport sys\nimport math\nimport numpy as np\n\n\n# Columns: 0-Code, ....... pending\nbusData = pd.read_csv('Punggol_Bus_Routing.csv', sep=',', header=None)\n# Columns: 0-Code, ....... pending\nbusData2 = pd.read_csv('Punggol_Bus_Routing_Type2.csv', sep=',',header=None)\npunggol = pd.read_csv('Complete_Punggol_Graph.csv',sep=',',header=None)\npunggol1 = pd.read_csv('Punggol_complete_graph2.csv',sep=',',header=None)\n\nbus_speed = 50000/60\nbus_waiting_time = 5\n'''\nTest Cases\nstart = \"65141\"\nend = \"65339\"\nnew_start = \"828858\"\nnew_end = \"821266\"\n'''\ndef busStopCode1(data):\n start = (punggol[0] == data)\n return start\n\ndef busStopCode2(data):\n start = punggol1[0] == data\n return start\n\ndef connected(data):\n connected1 = punggol[busStopCode1(data)]\n if connected1.empty is True:\n connected1 = punggol1[busStopCode2(data)]\n hg = []\n test = pd.DataFrame(connected1[6].str.split(',').tolist())\n test1 = pd.DataFrame((connected1[7].str.split(',').tolist()))\n if test.empty == True:\n print(\"no such route For Buses\")\n sys.exit()\n if test1.empty == True:\n print (\"no such route For buses\")\n sys.exit()\n ht =[]\n if len(data) == 5:\n ht.append(int(data))\n # print(int(test1[0].values))\n try:\n niii = max(test1.columns.values)\n except ValueError:\n niii = (test1.columns.values)\n for i in test.iterrows():\n for k in range (0, niii):\n if int(test1[k].values) <=200:\n #for connected nodes and distance\n hg.append(((int(test[k].values)),(int(test1[k].values))))\n #just for connected nodes\n ht.append(int((test[k].values)))\n\n return ht\n\n# For finding starting bus Stop( See csv for column 1 and compare to check for bus stop code)\ndef busStopCode(data):\n startStop = busData2[1] == data\n return startStop\n\n# For finding starting bus Stop( See csv for column 2 and compare to check for bus stop code)\ndef endStopCode(data):\n endStop = busData2[2] == data\n return endStop\n\ndef busNoInserter(data):\n busNo = busData2[0] == data\n return busNo\n\n#For finding the starting point of the bus\ndef busStopCode_startfinder(data):\n length = len(data)\n new_array =[]\n isa =0\n for i in range(0,length):\n test_test = busStopCode(str(data[i]))\n test_test1 = busData2[test_test]\n if test_test1.empty == False:\n new_array.append(test_test1)\n return new_array\n\n#For findin the ending point of the bus\ndef busStopCode_endfinder(data):\n length = len(data)\n new_array =[]\n isa =0\n for i in range(0,length):\n test_test = endStopCode(str(data[i]))\n test_test1 = busData2[test_test]\n if test_test1.empty == False:\n new_array.append(test_test1)\n return new_array\n\n# Checking the routes taken by the buses to see if there is a route to the ending bus stop.\ndef take_bus(start_node, end_node,data):\n bus_route = (busNoInserter(data)) & ((busStopCode(start_node) | endStopCode(end_node)))\n asd =[]\n asd.append(start_node)\n bus_distance = 0\n lol = np.int64(0)\n lol1 = np.int64(0)\n #bus_route = (bus_route[0]) >= 1 & (bus_route[0] <=3)\n route = busData2[bus_route]\n if len(route) < 2:\n pass\n else:\n if route.empty == True:\n pass\n else:\n lol = route.index.values[0]\n try:\n lol1= route.index.values[1]\n except IndexError:\n lol1 = lol\n for i in range (lol,lol1+1):\n if busData2.at[lol,6] != busData2.at[lol1,6]:\n pass\n else:\n bus_distance += int(busData2.at[i,3])\n asd.append(busData2.at[i,2])\n\n if len(asd) < 2:\n asd = []\n return None\n\n\n return (data,asd, math.ceil(bus_distance/bus_speed + bus_waiting_time + (lol1-lol)))\n\n\n#For appending all the routes that could be taken and return the one with the least time\ndef route_finder(new_start, new_end):\n starting = busStopCode_startfinder(connected(new_start))\n ending = busStopCode_endfinder(connected(new_end))\n str1 = ' '\n str2 = ' '\n k = []\n n = []\n for i in range (0,len(starting)):\n bus_to_take = starting[i][0].values\n asd = (starting[i][1].values)\n #bus_to_take , indices = np.unique(asd,return_counts=True)\n for l in bus_to_take:\n try:\n a ,indices= np.unique((starting[i][1].values),return_counts=True)\n b, indices = np.unique((ending[i][2].values),return_counts= True)\n str1 = str1.join(a)\n str2 = str2.join(b)\n if take_bus(str1,str2,l) is None:\n pass\n else:\n p = list(take_bus(str1,str2,l))\n n.append((take_bus(str1,str2,l))[2])\n k.append(p)\n\n except IndexError:\n \"Do Nothing\"\n df = pd.DataFrame(k)\n\n if df.empty == True:\n print(\"No common bus nearby start and end points. Please restart with another option. \")\n sys.exit()\n\n route = df[2] == min(n)\n optimised_route = df[route]\n optimised_route[0], optimised_route[2] = optimised_route[2], optimised_route[0]\n pop = optimised_route.head(1)\n\n first_route = []\n lol = pd.DataFrame(pop[1].tolist())\n\n starting_walk = m_graph.take_walk(new_start,lol[0].values[0])\n lemon =[]\n if ((starting_walk[0]) == 0):\n pass\n else:\n first_route=starting_walk[1]\n first_route.pop(len(first_route)-1)\n\n for i in range(1,len(starting_walk)):\n lemon.append(False)\n\n\n for i in range (0,len(lol)):\n for l in lol:\n first_route.append((lol[l][i]))\n if l == 0:\n pass\n else:\n lemon.append(True)\n\n length = max(lol)\n Last_Point = lol[length].values[0]\n ending_walk = m_graph.take_walk(Last_Point, new_end)\n\n\n if len(ending_walk) <= 2:\n\n end_route = ending_walk[1]\n # print(end_route)\n first_route.append(end_route[0])\n end_route.pop(0)\n first_route.append(end_route[0])\n lemon.append(False)\n else:\n new = np.array(ending_walk[1])\n counter = 1\n for i in range(1, len(new)):\n first_route.append(new[counter])\n lemon.append(False)\n counter = counter + 1\n lemon.append(False)\n k = []\n # all route here\n for i, l in optimised_route.iterrows():\n k.append((l[0], l[1], l[2]))\n route = []\n test1 = pop\n m = test1.index.values[0]\n route.append(test1[0][m]) # time taken is fine\n route[0] += starting_walk[0]\n route[0] += ending_walk[0]\n # print(\"first_route:\", first_route)\n route.append(first_route)\n route.append(lemon) # lemon is fine\n # print(\"\")\n route.append(test1[2][m]) # bus number is fine\n return (route)\n\n# print(\"BUS ROUTE: \", route_finder(\"828858\",\"65009\"))\n\n\n" } ]
7
vishvadesai9/Breast_Cancer_Classification
https://github.com/vishvadesai9/Breast_Cancer_Classification
5e40d95d30bb420060851bfb248024acc4d5bba6
c9e60fd852f1944a1b569534aaefeb432e49c3e5
3925cdd37c2ec0429b71e128f751aaf84b25860c
refs/heads/master
2022-12-21T23:20:21.763345
2020-10-01T18:11:06
2020-10-01T18:11:06
298,875,594
4
3
null
null
null
null
null
[ { "alpha_fraction": 0.8223602771759033, "alphanum_fraction": 0.8223602771759033, "avg_line_length": 400.5, "blob_id": "0e5293f1e7c8438a9cdba53164441692a92c7bf8", "content_id": "c3238fb4171f59141a6f945ff720e8e3c88219ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 806, "license_type": "no_license", "max_line_length": 771, "num_lines": 2, "path": "/README.md", "repo_name": "vishvadesai9/Breast_Cancer_Classification", "src_encoding": "UTF-8", "text": "# Breast_Cancer_Classification\nThe aim of this project is to classify breast cancer as either malignant or benign using the dataset from sckit-learn. It is a web application built using streamlit and deployed with heroku(link: https://breast-cancer-detection-heroku.herokuapp.com/). The app analyzes the given data and provides prediction accuracy for the various classification algorithms used i.e Support Vector Machine, Logistic Regression, K-Nearest Neighbor, Random Forest, Decision Trees and Naïve Bayes. The application allows for interactivity with the parameters of these classification algorithms. In addition, I have plot Confusion Matrix, Precision-Recall Curve and ROC curve for each of the classifications. For data visualization I have used Pandas, Matplotlib, Seaborn, Plotly and Numpy. \n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 14.285714149475098, "blob_id": "6b79ff5dcd452a7374229ad437692ba17b604f1b", "content_id": "fe40c97ced8b8896e797d3776537d3893ced14ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 108, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/requirements.txt", "repo_name": "vishvadesai9/Breast_Cancer_Classification", "src_encoding": "UTF-8", "text": "numpy>=1.16.4\npandas>=0.24.2\nseaborn>=0.9.0\nmatplotlib>=3.1.0\nplotly>=4.4.1\nscikit-learn==0.23.2\nstreamlit\n\n" }, { "alpha_fraction": 0.5270372629165649, "alphanum_fraction": 0.5391121506690979, "avg_line_length": 44.967124938964844, "blob_id": "2ae3abd3cdb82fb216ae8244a04cfe04e9802eb6", "content_id": "a96efe48019e548deda2c7039cd7967c08ec74a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17143, "license_type": "no_license", "max_line_length": 177, "num_lines": 365, "path": "/app_bcd.py", "repo_name": "vishvadesai9/Breast_Cancer_Classification", "src_encoding": "UTF-8", "text": "import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\nimport time\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import precision_score, recall_score\r\nfrom sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve, auc\r\nfrom sklearn.datasets import load_breast_cancer\r\n\r\n\r\n\r\n\r\ndef main():\r\n st.beta_set_page_config(page_title='Breast Cancer Detection', page_icon='tumor_icon.png', layout='centered', initial_sidebar_state='auto')\r\n \r\n st.title('Breast Cancer Detection')\r\n st.image('tumor_icon.png',width=100)\r\n st.sidebar.title('Breast Cancer Detection')\r\n st.markdown('Cancer is Malignant or Benign? ')\r\n navigation=st.sidebar.radio('VIEW', ('Data Analysis','Prediction'))\r\n \r\n \r\n @st.cache(persist=True)\r\n def load_data():\r\n cancer = load_breast_cancer()\r\n df = pd.DataFrame(cancer.data,columns=cancer.feature_names)\r\n df['target'] = cancer.target \r\n labelencoder=LabelEncoder()\r\n for col in df.columns:\r\n df[col] = labelencoder.fit_transform(df[col])\r\n return df\r\n \r\n @st.cache(persist=True)\r\n def split(df):\r\n y = df['target']\r\n x = df.drop(columns=['target'])\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=3)\r\n return x_train, x_test, y_train, y_test\r\n \r\n \r\n \r\n def plot_metrics(metrics_list):\r\n if 'Confusion Matrix' in metrics_list:\r\n st.subheader(\"Confusion Matrix\")\r\n cm = confusion_matrix(y_test, y_pred)\r\n fig,ax = plt.subplots()\r\n sns.heatmap(cm, annot = True)\r\n # plot_confusion_matrix(model, x_test, y_test, display_labels=class_names)\r\n ax.figure.savefig('file.png')\r\n st.pyplot(fig)\r\n if 'Precision-Recall Curve' in metrics_list:\r\n st.subheader('Precision-Recall Curve')\r\n \r\n precision, recall, thresholds = precision_recall_curve(y_test, y_pred)\r\n\r\n fig = px.area(\r\n x=recall, y=precision,\r\n title=f'Precision-Recall Curve (AUC={auc(precision, recall):.4f})',\r\n labels=dict(x='Recall', y='Precision'),\r\n width=700, height=500\r\n )\r\n fig.add_shape(\r\n type='line', line=dict(dash='dash'),\r\n x0=0, x1=1, y0=1, y1=0\r\n )\r\n fig.update_yaxes(scaleanchor=\"x\", scaleratio=1)\r\n fig.update_xaxes(constrain='domain')\r\n st.write(fig)\r\n \r\n \r\n if 'ROC Curve' in metrics_list:\r\n fpr, tpr, thresholds = roc_curve(y_test, y_pred)\r\n\r\n fig = px.area(\r\n x=fpr, y=tpr,\r\n title=f'ROC Curve (AUC={auc(fpr, tpr):.4f})',\r\n labels=dict(x='False Positive Rate', y='True Positive Rate'),\r\n width=700, height=500\r\n ) \r\n fig.add_shape(\r\n type='line', line=dict(dash='dash'),\r\n x0=0, x1=1, y0=0, y1=1\r\n )\r\n\r\n fig.update_yaxes(scaleanchor=\"x\", scaleratio=1)\r\n fig.update_xaxes(constrain='domain')\r\n st.write(fig)\r\n \r\n if 'Training and Test accuracies' in metrics_list:\r\n mal_train_X = x_train[y_train==0]\r\n mal_train_y = y_train[y_train==0]\r\n ben_train_X = x_train[y_train==1]\r\n ben_train_y = y_train[y_train==1]\r\n \r\n mal_test_X = x_test[y_test==0]\r\n mal_test_y = y_test[y_test==0]\r\n ben_test_X = x_test[y_test==1]\r\n ben_test_y = y_test[y_test==1]\r\n \r\n scores = [model.score(mal_train_X, mal_train_y), model.score(ben_train_X, ben_train_y), model.score(mal_test_X, mal_test_y), model.score(ben_test_X, ben_test_y)]\r\n\r\n fig,ax = plt.subplots()\r\n \r\n # Plot the scores as a bar chart\r\n bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868'])\r\n\r\n # directly label the score onto the bars\r\n for bar in bars:\r\n height = bar.get_height()\r\n plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2), ha='center', color='w', fontsize=11)\r\n\r\n # remove all the ticks (both axes), and tick labels on the Y axis\r\n plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')\r\n\r\n # remove the frame of the chart\r\n for spine in plt.gca().spines.values():\r\n spine.set_visible(False)\r\n\r\n plt.xticks([0,1,2,3], ['Malignant\\nTraining', 'Benign\\nTraining', 'Malignant\\nTest', 'Benign\\nTest'], alpha=0.8);\r\n plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8)\r\n ax.xaxis.set_tick_params(length=0)\r\n ax.yaxis.set_tick_params(length=0)\r\n ax.figure.savefig('file1.png')\r\n st.pyplot(fig)\r\n \r\n df = load_data()\r\n class_names = ['malignant', 'benign']\r\n \r\n \r\n###------------------DATA ANAlYSIS-------------------------- \r\n if navigation == 'Data Analysis':\r\n \r\n \r\n \r\n if st.sidebar.checkbox(\"Show Raw Data\", False):\r\n st.subheader('Breast Cancer Dataset')\r\n \r\n st.dataframe(df)\r\n \r\n if st.sidebar.checkbox(\"Show Features\", False):\r\n cancer = load_breast_cancer()\r\n st.subheader('Features')\r\n features = pd.DataFrame(cancer.feature_names)\r\n features.columns = ['Features']\r\n st.dataframe(features)\r\n \r\n \r\n plots = st.sidebar.multiselect(\"Plots\", ('Scatter Matrix', 'Number of Malignant and Benign','Heatmap','Mean radius vs Mean area','Mean smoothness vs Mean area'))\r\n \r\n \r\n if st.sidebar.button(\"Plot\", key='plotss'):\r\n with st.spinner('Wait for it...'):\r\n time.sleep(5)\r\n \r\n if 'Number of Malignant and Benign' in plots:\r\n st.subheader(\"Malignant and Benign Count\")\r\n fig,ax = plt.subplots()\r\n \r\n ma = len(df[df['target']==1])\r\n be = len(df[df['target']==0])\r\n count=[ma,be]\r\n bars = plt.bar(np.arange(2), count, color=['#000099','#ffff00'])\r\n ##show value in bars\r\n for bar in bars:\r\n height = bar.get_height()\r\n plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2), ha='center', color='black', fontsize=11)\r\n plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='on', labelbottom='on')\r\n for spine in plt.gca().spines.values():\r\n spine.set_visible(False)\r\n plt.xticks(ticks=[0,1])\r\n ax.set_ylabel('Count')\r\n ax.set_xlabel('Target')\r\n ##remove dashes from frame\r\n ax.xaxis.set_tick_params(length=0)\r\n ax.yaxis.set_tick_params(length=0)\r\n st.pyplot(fig)\r\n \r\n \r\n if 'Scatter Matrix' in plots:\r\n st.subheader(\"Scatter Matrix\")\r\n fig = px.scatter_matrix(df,dimensions=['mean radius','mean texture','mean perimeter','mean area','mean smoothness'],color=\"target\",width = 800,height = 700)\r\n st.write(fig)\r\n \r\n if 'Heatmap' in plots:\r\n st.subheader(\"Heatmap\")\r\n fig=plt.figure(figsize = (30,20))\r\n hmap=sns.heatmap(df.drop(columns=['target']).corr(), annot = True,cmap= 'Blues',annot_kws={\"size\": 18})\r\n hmap.set_xticklabels(hmap.get_xmajorticklabels(), fontsize = 25)\r\n hmap.set_yticklabels(hmap.get_ymajorticklabels(), fontsize = 25)\r\n st.pyplot(fig)\r\n if 'Mean radius vs Mean area' in plots:\r\n st.subheader('Cancer Radius and Area')\r\n fig = plt.figure()\r\n sns.scatterplot(x=df['mean radius'],y = df['mean area'],hue = df['target'],palette=['#000099','#ffff00'])\r\n st.pyplot(fig)\r\n if 'Mean smoothness vs Mean area' in plots:\r\n st.subheader('Cancer Smoothness and Area')\r\n fig = plt.figure()\r\n sns.scatterplot(x=df['mean smoothness'],y = df['mean area'],hue = df['target'],palette=['#000099','#ffff00'])\r\n st.pyplot(fig)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n###---------------classification----------------------\r\n if navigation == 'Prediction':\r\n \r\n x_train, x_test, y_train, y_test = split(df)\r\n if st.sidebar.checkbox(\"Show X_train/Y_train\", False):\r\n st.subheader('X_train')\r\n st.dataframe(x_train)\r\n st.subheader('Y_train')\r\n st.dataframe(y_train)\r\n \r\n st.sidebar.subheader(\"Choose Classifier\")\r\n classifier = st.sidebar.selectbox(\"Classifier\", (\"Support Vector Machine (SVM)\", \"Logistic Regression\", \"Random Forest\", 'KNN', 'Decision Tree', 'Gaussian Naive Bayes'))\r\n\r\n if classifier == 'Support Vector Machine (SVM)':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n #choose parameters\r\n C = st.sidebar.number_input(\"C (Regularization parameter)\", 0.01, 10.0, step=0.01, key='C_SVM')\r\n kernel = st.sidebar.radio(\"Kernel\", (\"rbf\", \"linear\"), key='kernel')\r\n gamma = st.sidebar.radio(\"Gamma (Kernel Coefficient)\", (\"scale\", \"auto\"), key='gamma')\r\n\r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n \r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"Support Vector Machine (SVM) Results\")\r\n model = SVC(C=C, kernel=kernel, gamma=gamma)\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics)\r\n \r\n if classifier == 'Logistic Regression':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n C = st.sidebar.number_input(\"C (Regularization parameter)\", 0.01, 10.0, step=0.01, key='C_LR')\r\n max_iter = st.sidebar.slider(\"Maximum number of iterations\", 100, 500, key='max_iter')\r\n \r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n \r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"Logistic Regression Results\")\r\n model = LogisticRegression(C=C, penalty='l2', max_iter=max_iter)\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics)\r\n \r\n if classifier == 'Random Forest':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n n_estimators = st.sidebar.number_input(\"The number of trees in the forest\", 100, 5000, step=10, key='n_estimators')\r\n max_depth = st.sidebar.number_input(\"The maximum depth of the tree\", 1, 20, step=1, key='max_depth')\r\n bootstrap = st.sidebar.radio(\"Bootstrap samples when building trees\", ('True', 'False'), key='bootstrap')\r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n \r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"Random Forest Results\")\r\n model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=bootstrap, n_jobs=-1)\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics)\r\n \r\n if classifier == 'KNN':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n n_neighbors = st.sidebar.number_input(\"Number of neighbors\", 1, 100, step=1, key='n_neighbors')\r\n \r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n\r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"KNN Results\")\r\n model = KNeighborsClassifier(n_neighbors = n_neighbors )\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics)\r\n \r\n if classifier == 'Decision Tree':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n \r\n max_depth = st.sidebar.number_input(\"The maximum depth of the tree\", 1, 20, step=1, key='max_depth')\r\n criterion = st.sidebar.radio(\"Criterion\", (\"gini\", \"entropy\"), key='criterion')\r\n splitter = st.sidebar.radio(\"Splitter\", (\"best\", \"random\"), key='splitter')\r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n \r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"Decision Tree Results\")\r\n model = DecisionTreeClassifier(max_depth= max_depth, criterion= criterion, splitter= splitter )\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics)\r\n \r\n if classifier == 'Gaussian Naive Bayes':\r\n st.sidebar.subheader(\"Model Hyperparameters\")\r\n \r\n \r\n metrics = st.sidebar.multiselect(\"Metrics to Plot\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve','Training and Test accuracies'))\r\n\r\n if st.sidebar.button(\"Classify\", key='classify'):\r\n st.subheader(\"Gaussian Naive Bayes Results\")\r\n model = GaussianNB()\r\n model.fit(x_train, y_train)\r\n accuracy = model.score(x_test, y_test)\r\n y_pred = model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(2))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\r\n plot_metrics(metrics) \r\n \r\n st.sidebar.subheader(\"ABOUT\")\r\n st.sidebar.write('By: Vishva Desai')\r\n st.sidebar.write(\"Github: https://github.com/vishvadesai9\") \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" } ]
3
Bolteren/avtoDoc
https://github.com/Bolteren/avtoDoc
38eec4deaabfada6c16ad54bb2e4d6e1f445dc43
d3c8ea91491186213cbd49ccb3a317533b3019be
65bd5cafb96948dbd82ce5489fc6bb1f375a64cb
refs/heads/master
2023-06-19T11:46:25.123755
2021-07-13T11:47:08
2021-07-13T11:47:08
382,865,640
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4722435474395752, "alphanum_fraction": 0.4840112626552582, "avg_line_length": 30.27199935913086, "blob_id": "aa83eaa344ea60444cc7da8cc7988ece286d7b1f", "content_id": "de1197babe3e866f9d236f3c71d74b0458fbfa8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4005, "license_type": "no_license", "max_line_length": 138, "num_lines": 125, "path": "/parrss.py", "repo_name": "Bolteren/avtoDoc", "src_encoding": "UTF-8", "text": "import copy\nimport codecs\nimport re\n\ndef clear_file(name):\n input_file = codecs.open(name, 'r')\n output_file = open('temp.htm', 'w')\n for i in range(1, 119):\n next(input_file)\n while True:\n try:\n words = input_file.readline()\n except Exception:\n next(input_file)\n words = input_file.readline()\n output_file.write(words)\n if not words:\n break\n input_file.close()\n output_file.close()\n\n\ndef parse_programm():\n f = open('temp.htm', 'r')\n html = f.read()\n lst = html.split('\\n')\n out_text = []\n checked_element = \"\"\"Установленные программы\"\"\"\n checked_end_element = \"\"\"</TABLE><BR><BR>\"\"\"\n checked_header = \"\"\"<TR><TD><TD><TD><B>Программа</B>&nbsp;&nbsp;<TD CLASS=cr><B>Версия</B>&nbsp;&nbsp;<TD \n CLASS=cr><B>Размер</B>&nbsp;&nbsp;<TD><B>GUID</B>&nbsp;&nbsp;<TD><B>Издатель</B>&nbsp;&nbsp;<TD \n CLASS=cr><B>Дата</B> \"\"\"\n t = -1\n for element in lst:\n if element.find(checked_header) != -1:\n continue\n if t == -1:\n t = element.find(checked_element)\n continue\n if t > 0 and element.find(checked_end_element) != -1:\n t = -1\n if t > 0:\n out = element.replace('<TR><TD><TD><TD>', '')\n out1 = out.replace('&nbsp;&nbsp;<TD CLASS=cr>', '$$').split('$$')[0]\n out_text.append(out1)\n return out_text\n\n\ndef parse_licence():\n f = open('temp.htm', 'r')\n html = f.read()\n lst = html.split('\\n')\n out_text = {}\n checked_element = \"\"\"<A NAME=\"licenses\">Лицензии</A>\"\"\"\n checked_end_element = \"\"\"</TABLE><BR><BR>\"\"\"\n checked_header = \"\"\"<TR><TD><TD><TD><B>Программы</B>&nbsp;&nbsp;<TD><B>Ключ продукта</B>\"\"\"\n t = -1\n for element in lst:\n if element.find(checked_header) != -1:\n continue\n if t == -1:\n t = element.find(checked_element)\n continue\n if t > 0 and element.find(checked_end_element) != -1:\n t = -1\n if t > 0:\n out = element.replace('<TR><TD><TD><TD>', '')\n out1 = out.replace('&nbsp;&nbsp;<TD>', '$$').split('$$')[0]\n out_l = out.replace('&nbsp;&nbsp;<TD>', '$$').split('$$')[1]\n out_text[out1] = out_l\n return out_text\n\n\ndef parse_fix():\n f = open('Report_New_0.html', 'r')\n file_element = []\n file_params = []\n check = -1\n while True:\n str = f.readline()\n if not str:\n break\n if re.match(\n r\"\"\"<td class=\"ktlg\" COLSPAN=6 ALIGN=CENTER>Каталог C:\\\\Program\\sFiles(\\s\\(x86\\))?\\\\Secret\\sNet\\sStudio\\\\Client\\\\</td>\"\"\",\n str):\n check = 0\n continue\n if check == 0:\n if str.find('<td class=\"ks\" >') != -1:\n name_file = str.replace(\"<\", \">\").split(\">\")[2]\n file_params.append(name_file)\n check = 1\n continue\n if check == 1:\n data_file = str.replace(\"<\", \">\").split(\">\")[2]\n file_params.append(data_file)\n check = 2\n continue\n if check == 2:\n len_file = str.replace(\"<\", \">\").split(\">\")[2]\n file_params.append(len_file)\n check = 3\n continue\n if check == 3:\n if str.find('<td class=\"ks\" ALIGN=CENTER>') != -1:\n ks = str.replace(\"<\", \">\").split(\">\")[2]\n file_params.append(ks)\n check = 0\n else:\n continue\n if str.find(\"итого:\") != -1:\n break\n if not file_params:\n continue\n file_element.append(copy.deepcopy(file_params))\n file_params.clear()\n return file_element\n\n\nif __name__ == '__main__':\n clear_file('Report.htm')\n # print(parse_programm())\n # print(parse_licence())\n #parse_fix()\n print(len(parse_fix()))\n" } ]
1
aviator19941/DATA-301
https://github.com/aviator19941/DATA-301
ecb7627b792141e7cba800c0e6eb46a1deecfd82
c891fce5c230c1de11a93326d5fdc818232dad34
12810b7df2b0a34459423cce4913df9575ccaa38
refs/heads/master
2021-01-19T13:56:11.181789
2017-02-19T05:46:41
2017-02-19T05:46:41
82,435,281
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6182026267051697, "alphanum_fraction": 0.633085310459137, "avg_line_length": 29.05172348022461, "blob_id": "a9f9156dc9fab0432c78ce18511b7e9665df5a13", "content_id": "514cb0fe922f0de82fe61e90c2148e5bc9fbf30d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1747, "license_type": "no_license", "max_line_length": 90, "num_lines": 58, "path": "/lab1q1.py", "repo_name": "aviator19941/DATA-301", "src_encoding": "UTF-8", "text": "# James Ly\n# Avinash Sharma\n# Purpose : reports the amount of unique names over the years to a file\n# named \"question1.txt\"\n# input file is names1000.csv\n\n#object for baby names\nclass BabyName:\n def __init__(self, in_name, in_year, in_gender, in_count) :\n self.name = in_name\n self.year = in_year\n self.gender = in_gender\n self.count = in_count\n\n\n#blank list\nnamesList = []\n\n#read through whole file\ninputFile = open(\"names1000.csv\", 'r')\nwhile True:\n line = inputFile.readline()\n if len(line) == 0:\n break\n data = line.split(',')\n #add data to list\n namesList.append(BabyName(data[1],data[2],data[3],data[4]))\n\n#open file to be written to\noutputFile = open(\"question1.txt\", 'w')\noutputFile.write(\"Question 1: Which year has the most unique names?\\n\\n\")\noutputFile.write(\"Results for the amount of unique names over the years\\n\\n\")\n\nyearList = []\n#check for unique names\nfor i in range(0, len(namesList)):\n #separate by year\n if namesList[i].year not in yearList:\n yearNum = namesList[i].year\n yearList.append(yearNum)\n \n #create a list of unique names\n uniqueAmount = []\n\n #add all unique names that are in the same year\n for j in range(0, len(namesList)):\n if yearNum == namesList[j].year:\n uniqueAmount.append(namesList[j].name)\n outputFile.write(str(yearNum) + \": \" + str(len(uniqueAmount)) + \" unique names\\n\")\n \n#write total # of names and years in file\noutputFile.write(\"\\n\") \noutputFile.write(\"total names in names1000: \" + str(len(namesList)) + \"\\n\")\noutputFile.write(\"# of years in names1000: \" + str(len(yearList)) + \"\\n\")\n\n#close files\ninputFile.close()\noutputFile.close()\n\n\n\n\n" }, { "alpha_fraction": 0.5517393350601196, "alphanum_fraction": 0.5623073577880859, "avg_line_length": 28.88157844543457, "blob_id": "5ee607829022450c11131bb2a81707c82557ac95", "content_id": "e562aa15ead9336de50b07556038307a4c3e6c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2271, "license_type": "no_license", "max_line_length": 109, "num_lines": 76, "path": "/lab1q2.py", "repo_name": "aviator19941/DATA-301", "src_encoding": "UTF-8", "text": "# James Ly\n# Avinash Sharma\n# Purpose : reports number of male and female births in each year\n# to a file named \"question2.txt\"\n# input file is names1000.csv\n\n#object for baby names\nclass BabyName:\n def __init__(self, in_name, in_year, in_gender, in_count) :\n self.name = in_name\n self.year = in_year\n self.gender = in_gender\n self.count = in_count\n\n#blank list\nnamesList = []\n\n#read through whole file\ninputFile = open(\"names1000.csv\", 'r')\nwhile True:\n line = inputFile.readline()\n if len(line) == 0:\n break\n data = line.split(',')\n #add data to list\n namesList.append(BabyName(data[1],data[2],data[3],data[4]))\n\n\n#open file to be written to\noutputFile = open(\"question2.txt\", 'w')\noutputFile.write(\"Question 2: How did the number of births change over the years for males and females?\\n\\n\")\noutputFile.write(\"Results for number of male and female births over the years\\n\\n\")\n\nyearList = []\n#sum up number of births in each year\nfor i in range(0, len(namesList)):\n\n #separate by year\n if namesList[i].year not in yearList:\n yearNum = namesList[i].year\n yearList.append(yearNum)\n maleList = []\n femaleList = []\n\n #separate by male or female\n #into male or female lists\n for j in range(0, len(namesList)):\n if yearNum == namesList[j].year:\n if namesList[j].gender == 'M':\n maleList.append(namesList[j])\n else:\n femaleList.append(namesList[j])\n\n #add up all the male counts \n if len(maleList) > 0:\n maleSum = 0\n for x in range(1, len(maleList)): \n item = maleList[x].count\n item.replace(\"\\n\", \"\")\n maleSum += int(item)\n\n outputFile.write(str(yearNum) + \" m: \" + str(maleSum))\n\n #add up all the female counts \n if len(femaleList) > 0:\n femaleSum = 0\n for x in range(1, len(femaleList)):\n item = femaleList[x].count\n item.replace(\"\\n\",\"\")\n femaleSum += int(item)\n \n outputFile.write(\" f: \" + str(femaleSum) + \"\\n\")\n \n#close files\ninputFile.close()\noutputFile.close()\n" }, { "alpha_fraction": 0.6206425428390503, "alphanum_fraction": 0.6329460144042969, "avg_line_length": 28.40816307067871, "blob_id": "e0ccefdf54850404f0319445d84d19cc50724776", "content_id": "6709df8fa1267333e6a531e2b0dc85f15e53ab0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 90, "num_lines": 49, "path": "/lab1q3.py", "repo_name": "aviator19941/DATA-301", "src_encoding": "UTF-8", "text": "# James Ly\n# Avinash Sharma\n# Purpose : reports the count of baby names with \"Emma\" throughout the years\n# to a text file named \"question3.txt\"\n# input file is names1000.csv\n\n#object for baby names\nclass BabyName:\n def __init__(self, in_name, in_year, in_gender, in_count) :\n self.name = in_name\n self.year = in_year\n self.gender = in_gender\n self.count = in_count\n\n#blank list\nnamesList = []\n\n#read through whole file\ninputFile = open(\"names1000.csv\", 'r')\nwhile True:\n line = inputFile.readline()\n if len(line) == 0:\n break\n data = line.split(',')\n #add data to list\n namesList.append(BabyName(data[1],data[2],data[3],data[4]))\n\n\n#open file to be written to\noutputFile = open(\"question3.txt\", 'w')\noutputFile.write(\"Question 3: How does the popularity of Emma change over the years?\\n\\n\")\noutputFile.write(\"Results for popularity of Emma over the years\\n\\n\")\n\nyearList = []\n#check for instances of \"Emma\" in each year\nfor i in range(0, len(namesList)):\n #separate by year\n if namesList[i].year not in yearList:\n yearNum = namesList[i].year\n yearList.append(yearNum)\n\n #check if baby name is \"Emma\"\n for j in range(0, len(namesList)):\n if (namesList[j].name == 'Emma') and (namesList[j].year == yearNum):\n outputFile.write(str(yearNum) + \" count: \" + str(namesList[j].count))\n\n#close files\ninputFile.close()\noutputFile.close()\n\n \n" } ]
3
vaillant/SataRDM
https://github.com/vaillant/SataRDM
b2455569099ffe30e1cc95bba758cade394895bb
8adf587f1ccf5769dc6885e803c07acb0d1b36d6
ba4a78ae9d3e9e381e75128e78e11f0601ad6597
refs/heads/master
2021-01-22T21:13:24.185566
2012-12-30T19:22:40
2012-12-30T19:22:40
2,212,496
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.5822513103485107, "alphanum_fraction": 0.5899940729141235, "avg_line_length": 42.19587707519531, "blob_id": "e80564d7d3761958fd24f61a865a3bfa7ea1e098", "content_id": "26cfa755eec34b15c604ad9189687073c007a8f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8395, "license_type": "no_license", "max_line_length": 146, "num_lines": 194, "path": "/SataRdm/src/vmksatardm.py", "repo_name": "vaillant/SataRDM", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n'''\nCreated on Jul 30, 2011\n\n@author: Stefan Vaillant\nCopyright Stefan Vaillant, 2011-2012\n\n'''\n\nfrom xml.etree.ElementTree import parse\nfrom optparse import OptionParser\nfrom subprocess import Popen, PIPE\nimport exceptions\nimport os\nimport StringIO\n\nRDM_DIR = \"RDMs\" # in which folder to create the RDMs\nFILE_EXT = \".vmdk\"\nTYPE_MAP = { \"physical\": \"-z\", \"virtual\": \"-r\"} # maps options to vmkfstools option\nGIBIBYTES = 1024 * 1024 * 1024\nVMLPATH = \"/vmfs/devices/disks/\"\n\ndef getValueFor(element, s):\n ''' return v, where <value name=x>v</value> and x==s'''\n result = [ v.text for v in element.getchildren() if v.tag == \"value\" and v.attrib['name'] == s]\n assert len(result) == 1, \"expecting element with <value name='%s'> under %s element\" % (s, element.tag)\n return result[0]\n\ndef stringToIntList(str,maximum):\n # return (error, [int])\n # i.e. (None, [1,2]) = stringToIntList(\"1,2\", 2)\n error = None\n result = []\n if str==\"all\":\n result = range(1,maximum+1)\n elif str==\"none\" or str==\"\":\n result = []\n else:\n try:\n result = [int(s) for s in str.split(\",\") ]\n for r in result:\n if not r in range(1,maximum+1):\n error = \"value '%d' out of range (1..%d)\" % (r,maximum)\n except exceptions.ValueError, e:\n error = \"%s\" % (e)\n return (error,result)\n\ndef executeCmd(strs, noprint, noexec):\n result = \"\"\n if not noprint:\n print \" \".join(strs)\n if not noexec:\n if strs[0] == \"cd\":\n os.chdir(strs[1])\n elif strs[0] == \"mkdir\":\n os.mkdir(strs[1])\n else:\n result = Popen(strs)\n return result \n\nclass DiskInfo(object):\n def __init__(self,root,dict):\n self.__dict__.update(dict)\n self.root = root\n def getpreferredpathuid(self):\n e = self.root.find(\"lun/nmp-device-configuration/fixed-path-selection-policy\")\n# result = [ v.text for v in e.iter(\"value\") if v.attrib['name'] == \"preferred-path-uid\" ]\n# assert len(result) == 1, \"expecting element with <value name='preferred-path-uid'> under <lun> element\"\n return getValueFor(e, \"preferred-path-uid\")\n def getpartitiontypes(self):\n parts = self.root.findall(\"partitions/disk-lun-partition\")\n result = [ getValueFor(p, \"partition-type\") for p in parts ]\n return result\n def getDeviceUID(self):\n uids = self.root.findall(\"lun/device-uids/device-uid\")\n result = [ getValueFor(p, \"uid\") for p in uids if getValueFor(p, \"uid\").startswith(\"vml.\")]\n assert len(result) > 0, \"The device must have one UID starting with 'vml.'\"\n return result[0]\n def getRDMFile(self):\n def convert(str):\n return str.replace(\" \", \"\")\n def convert2(str):\n return str.replace(\" \", \"-\",1).replace(\" \",\"\")\n def serialnumber():\n return self.devfspath.split(\"_\")[-1]\n basename = \"%s-%s-%dGB\" % (convert2(self.model), serialnumber(), long(self.size) / GIBIBYTES)\n if os.path.exists(basename + FILE_EXT):\n i = 0\n while os.path.exists(basename + \"-\" + str(i) + FILE_EXT): i=i+1\n basename = basename + \"-\" + str(i)\n return basename + FILE_EXT\n def getCmd(self, options):\n #return command to create RDM as string[]\n vmlfile = VMLPATH + self.getDeviceUID()\n # removed, fails always in MacOS test env: assert os.path.exists(vmlfile)\n return [\"vmkfstools\",TYPE_MAP[options.dtype], vmlfile, self.getRDMFile(), \"-a\", options.adapter]\n\ndef getDiskInfo(filename):\n tree = parse(filename)\n luns = tree.findall(\"all-luns/disk-lun\")\n disks = []\n for lun in luns:\n d = dict()\n for v in lun.find(\"lun\").getchildren():\n if v.tag == \"value\":\n d[v.attrib['name'].replace(\"-\",\"\")] = v.text\n disks.append(DiskInfo(lun,d))\n \n chdir = None\n allvmfs = tree.findall(\"vmfs-filesystems/vm-filesystem\")\n if len(allvmfs) > 0:\n chdir = getValueFor(allvmfs[0],\"console-path\")\n return (chdir,disks)\n\ndef main():\n usage = \"usage: %prog [options]\\n Create raw device mapping for (S)ATA disks. \\nExample:\\n cd /vmfs/volumes/datastore1/\\n vmksatardm.py\"\n parser = OptionParser(usage)\n parser.add_option(\"-f\", \"--file\", dest=\"filename\",\n help=\"read storage config from FILENAME instead of using 'esxcfg-info -s -F xml', used mainly for testing\")\n parser.add_option(\"-q\", \"--quiet\", action=\"store_true\", dest=\"quiet\", default=False,\n help=\"Run quiet, do not show information. Implies -all\")\n parser.add_option(\"-n\", \"--noexec\", dest=\"noexec\", action=\"store_true\",\n help=\"show only commands, do not execute\")\n parser.add_option(\"-l\", \"--all\", dest=\"all\", action=\"store_true\",\n help=\"create mapping for all disks without asking\")\n parser.add_option(\"-a\", \"--adapter\", dest=\"adapter\", default=\"lsilogic\",\n help=\"adapter type: buslogic|lsilogic|ide [default: %default]\")\n parser.add_option(\"-t\", \"--type\", dest=\"dtype\", default=\"physical\", choices=['physical', 'virtual'], type=\"choice\",\n help=\"type of RDM: virtual|physical [default: %default] \")\n parser.add_option(\"-c\", \"--nochdir\", dest=\"nochdir\", action=\"store_true\",\n help=\"do not change directory to <first vmfs>/RDMs. If you specify this, execute command in correct VMFS folder. \")\n (options, args) = parser.parse_args()\n if len(args) != 0:\n parser.error(\"no argument allowed\")\n assert options.dtype in TYPE_MAP\n if options.quiet:\n options.all = True\n\n ''' Reading configuration'''\n filename = \"\"\n if options.filename:\n if not options.quiet: print \"reading from file %s...\" % options.filename\n filename = options.filename\n else:\n if not options.quiet: print \"reading from file 'esxcfg-info -s'...\"\n if not os.path.exists(\"/sbin/esxcfg-info\"):\n parser.error(\"Cannot find executable '/sbin/esxcfg-info', do you run on ESXi?\")\n content = Popen([\"esxcfg-info\", \"-s\", \"-F\", \"xml\"], stdout=PIPE).communicate()[0]\n filename = StringIO.StringIO(content)\n (chdir,disks) = getDiskInfo(filename)\n if not options.nochdir and chdir == None:\n parser.error(\"Cannot find a VMFS file system for RDM files\")\n if not options.nochdir and not options.quiet:\n print \"RDM folder: \" + chdir + \"/\" + RDM_DIR\n\n ''' Show possible disks (ataDisks)'''\n allAtaDisks = [d for d in disks if d.getpreferredpathuid().startswith(\"ide\") or d.getpreferredpathuid().startswith(\"sata\")]\n ataDisks = [d for d in allAtaDisks if d.getpartitiontypes().count(\"251\") == 0]\n if not options.quiet:\n print \"Found %d relevant ATA disks, not relevant:\\n %d other disks,\\n %d ATA disks with VMFS partition\" % (\n len(ataDisks), len(disks)-len(allAtaDisks), len(allAtaDisks)-len(ataDisks))\n for i in range(0,len(ataDisks)):\n disk = ataDisks[i]\n print \"%d) %s -> \" % (i+1, disk.getRDMFile())\n print \" \" + disk.getDeviceUID()\n\n ''' Ask for to-be mapped disks (rdmDisks)''' \n rdmDisks = []\n if options.all:\n rdmDisks = ataDisks \n elif ataDisks != []:\n inputList = [] # list of int's\n error = \"dummy\"\n while error:\n input = raw_input(\"Create Raw Device Mapping for which disks? [1..%d,all,none]\" % len(ataDisks))\n (error,inputList) = stringToIntList(input, len(ataDisks))\n if error: print \"Wrong input, please repeat (%s)\" % error\n rdmDisks = [ataDisks[i-1] for i in inputList]\n \n '''execute commands for disks'''\n if rdmDisks != []:\n if not options.nochdir:\n # optionally create and cd to $chdir/RDMs\n assert os.path.exists(chdir) # the filesystem discovered must exists\n rdmpath = chdir + \"/\" + RDM_DIR\n if not os.path.exists(rdmpath):\n executeCmd([\"mkdir\", rdmpath], options.quiet, options.noexec)\n executeCmd([\"cd\", rdmpath], options.quiet, options.noexec)\n for d in rdmDisks:\n executeCmd( d.getCmd(options), options.quiet, options.noexec)\n exit(0)\n\nif __name__ == \"__main__\":\n main()\n \n\n \n" }, { "alpha_fraction": 0.545488715171814, "alphanum_fraction": 0.6323308348655701, "avg_line_length": 37.565216064453125, "blob_id": "4f17d163af8c8c4a8b3d16cdf6a6a0d44ba54d21", "content_id": "4334bb729f958bf3fa2b74fdf47027ea6e510e1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2660, "license_type": "no_license", "max_line_length": 151, "num_lines": 69, "path": "/SataRdm/test/testvmksatardm.py", "repo_name": "vaillant/SataRDM", "src_encoding": "UTF-8", "text": "'''\nCreated on Jul 30, 2011\n\n@author: vaillant\n'''\nimport unittest\nimport vmksatardm\n\nclass Test(unittest.TestCase):\n\n def testGetDiskInfo(self):\n (chdir, disks) = vmksatardm.getDiskInfo(\"storage-1ide.xml\")\n self.assertEquals(chdir, \"/vmfs/volumes/4e3144a9-545ea006-7f1c-000c292e315e\")\n self.assertEqual(len(disks), 5)\n self.assertEqual(disks[0].name,\"mpx.vmhba1:C0:T2:L0\")\n self.assertEqual(disks[0].vendor,\"VMware, \")\n self.assertEqual(disks[0].size,\"4294967296\")\n self.assertEqual(disks[0].getpreferredpathuid(), \"pscsi.vmhba1-pscsi.0:2-mpx.vmhba1:C0:T2:L0\")\n self.assertEqual(disks[0].getpartitiontypes(), [\"0\"])\n \n self.assertEqual(disks[2].getpartitiontypes(), [\"0\", \"5\", \"6\", \"251\", \"4\", \"6\", \"6\", \"252\", \"6\"])\n \n self.assertEqual(disks[3].name, \"t10.ATA_____VMware_Virtual_IDE_Hard_Drive___________00000000000000000001\")\n self.assertEqual(disks[3].size, \"6442450944\")\n self.assertEqual(disks[3].getpreferredpathuid(), \"ide.vmhba0-ide.0:0-t10.ATA_____VMware_Virtual_IDE_Hard_Drive___________00000000000000000001\")\n self.assertEqual(disks[3].getpartitiontypes(), [\"0\"])\n self.assertEqual(disks[3].getpartitiontypes().count(\"251\"), 0)\n self.assertEqual(disks[3].getDeviceUID(), \"vml.01000000003030303030303030303030303030303030303031564d77617265\")\n \n def testStringToIntList(self):\n\n\n (error, list) = vmksatardm.stringToIntList(\"a\",1)\n self.assertNotEqual(error, None)\n \n (error, list) = vmksatardm.stringToIntList(\"0\",1)\n self.assertNotEqual(error, None)\n\n (error, list) = vmksatardm.stringToIntList(\"1,2,3\",2)\n self.assertNotEqual(error, None)\n\n (error, list) = vmksatardm.stringToIntList(\"\",1)\n self.assertEqual(error, None)\n self.assertEqual(list, [])\n \n (error, list) = vmksatardm.stringToIntList(\"none\",1)\n self.assertEqual(error, None)\n self.assertEqual(list, [])\n \n (error, list) = vmksatardm.stringToIntList(\"all\",1)\n self.assertEqual(error, None)\n self.assertEqual(list, [1])\n\n (error, list) = vmksatardm.stringToIntList(\"all\",4)\n self.assertEqual(error, None)\n self.assertEqual(list, [1,2,3,4])\n\n (error, list) = vmksatardm.stringToIntList(\"1,2,3,4\",4)\n self.assertEqual(error, None)\n self.assertEqual(list, [1,2,3,4])\n\n (error, list) = vmksatardm.stringToIntList(\"1,4\",4)\n self.assertEqual(error, None)\n self.assertEqual(list, [1,4])\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testGetDiskInfo']\n unittest.main()" } ]
2
djkemmet/Central-Inventory-Agent
https://github.com/djkemmet/Central-Inventory-Agent
58bc52cf47fa0b1eb0ecbbfacaf51993c40a53df
9a35fa31dd92ffd316bad79ea798346ecad59beb
b53b0550d980b3d852406d43b6c19d694e125294
refs/heads/master
2022-07-31T12:56:23.617835
2020-05-23T20:01:16
2020-05-23T20:01:16
266,409,208
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7461773753166199, "alphanum_fraction": 0.7635066509246826, "avg_line_length": 48.099998474121094, "blob_id": "61253f5cf0371e46b9f7bac4b7ab8947b4097002", "content_id": "657a707a9d40839053292ab0bb8ca0d77dd293b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 981, "license_type": "no_license", "max_line_length": 119, "num_lines": 20, "path": "/readme.md", "repo_name": "djkemmet/Central-Inventory-Agent", "src_encoding": "UTF-8", "text": "# Custom Inventory Agent\nThis custom inventory agent was built to collect the necessary information to migrate a device from airwave and other \nmanagment tools into aruba central. Given the complexity of and lack of documentation for SNMP in aruba products, this \ntool was written to collect only the necessary information. However, future iterations of this tool will likely accept \na list of OIDs and process them into a CSV simliar to what this tool currently puts out.\n\n## What does this tool collect?\nThis tool collects the following information and stores them as a string to \na CSV file for further consumption\n1. Hostname\n * If a system did not have it's host name defined, This ool acknowledges the\n default hostname and makes the user aware of this issue in the inventory\n2. Controller Model\n3. Software Version\n * Supported Platforms include:\n * ArubaOS 7.4.0.1\n * ArubaOS 7.4.1.1\n * ArubaOS 8.1.0.1\n4. System Serial\n5. System MAC address" }, { "alpha_fraction": 0.5347582101821899, "alphanum_fraction": 0.557498574256897, "avg_line_length": 40.48358154296875, "blob_id": "515d886ea309411cee046a1cf1dd178d500e7ed3", "content_id": "b77499751c406654c01dc3680cbcf29d1544a53d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13896, "license_type": "no_license", "max_line_length": 118, "num_lines": 335, "path": "/main.py", "repo_name": "djkemmet/Central-Inventory-Agent", "src_encoding": "UTF-8", "text": "#\n# AUTHOR: DJ Kemmet, [email protected]\n# DATE: 8/14/17\n# REQUIREMENTS: Python 3.4 or greater, Python Standard Library.\n# PURPOSE: The Purpose of this script is to collect the necessary data-points required to import a site into Central.\n# CURRENT VERSION: Milestone 0.5\n#\n\nimport csv\nimport logging\nimport os\n\n\n# Set up global variables\nlogging. basicConfig(filename='error.log', level=logging.WARNING)\nerrors_to_csv = ['Site Marked for retry', 'Site marked for retry', 'Site marked for retry']\n\n\n# Make sure the inventory file where the results go exists, if not, create it and verify it.\ndef verify_supporting_files():\n\n # Check for and create inventory.csv if necessary\n if os.path.lexists('inventory.csv'):\n print('Inventory Manifest Found. ')\n else:\n with open('inventory.csv', 'a+') as results_file:\n results_file.close()\n print('Inventory Manifest created.')\n\n # Check for and create error.log if necessary.\n if os.path.lexists('error.log'):\n print('Error Log Found.')\n else:\n with open('error.log', 'a+') as log_file:\n log_file.close()\n print('Error Log Created')\n\n # Check for and create retries.csv if necessary.\n if os.path.lexists('retries.csv'):\n print(\"Previous retry queue was discovered. Something should be done about that.\")\n else:\n with open('retries.csv', 'a+') as retry_file:\n retry_file.close()\n print('Retry log created.')\n\n # Check for and create Completed directory if necessary.\n if os.path.lexists('Completed/'):\n pass\n else:\n os.system('mkdir Completed')\n\n\n# Inventories the Controller at a given store.\ndef inventory_controller(site_ip):\n\n print(\"inventorying site: \" + site_ip)\n try:\n # Facilitates Gathering the Hostname, Software Version, and Controller Model.\n simfile_system = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' system')\n simfile_system_lines = simfile_system.readlines()\n\n # For each line of output from the host device, Check for the system string\n for line in simfile_system_lines:\n # Gather Model and Software Version.\n if \"sysDescr\" in line:\n system_string = line\n pass1 = str.split(system_string, '=')\n pass2 = str.split(pass1[1], ',')\n pass3 = str.split(pass2[0], ':')\n controller = ((pass3[2]).replace(\")\", \"\")).strip()\n software_root_version = (pass2[1]).strip()\n\n # Gather Hostname.\n if \"sysName\" in line:\n if 'Aruba' in line:\n print('This host was not named.')\n hostname = 'System Not Named: ' + site_ip\n break\n else:\n dirty_sys_line = line\n pass1 = str.split(dirty_sys_line, '=')\n pass2 = str.split(pass1[1], ':')\n hostname = ((pass2[1]).replace(\"\\n\", '')).strip()\n break\n\n # Facilitates Gathering the System Serial Number.\n software_root_version_split = str.split(software_root_version, \" \")\n software_string = software_root_version_split[1].strip()\n if \"7.4.0.1\" in software_string:\n serial_request = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.2.1.47.1.1.1.1.11.1001')\n for line in serial_request.readlines():\n if \"STRING\" in line:\n dirty_serial_line = line\n pass1 = str.split(dirty_serial_line, ' ')\n pass2 = pass1[3]\n serial = str.replace(pass2, '\"', '').strip()\n\n if \"7.4.0.4\" in software_string:\n serial_request = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.2.1.47.1.1.1.1.11.1001')\n for line in serial_request.readlines():\n if \"STRING\" in line:\n dirty_serial_line = line\n pass1 = str.split(dirty_serial_line, ' ')\n pass2 = pass1[3]\n serial = str.replace(pass2, '\"', '').strip()\n\n elif \"7.4.1.1\" in software_string:\n serial_request = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.2.1.47.1.1.1.1.11.1001')\n for line in serial_request.readlines():\n if \"STRING\" in line:\n dirty_serial_line = line\n pass1 = str.split(dirty_serial_line, ' ')\n pass2 = pass1[3]\n serial = str.replace(pass2, '\"', '').strip()\n\n elif \"8.1.0.1\" in software_string:\n serial_request = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.2.1.1.1.12')\n for line in serial_request.readlines():\n if \"STRING\" in line:\n dirty_serial_line = line\n pass1 = str.split(dirty_serial_line, ' ')\n pass2 = pass1[3]\n pass3 = str.replace(pass2, '\"', '').strip()\n serial = pass3\n\n MAC_request = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.2.1.2.1.7')\n for line in MAC_request.readlines():\n if \"STRING\" in line:\n dirty_mac_line = line\n pass1 = str.split(dirty_mac_line, '=')\n pass2 = pass1[1]\n pass2 = str.split(pass2, ':')\n pass3 = str.strip(pass2[1], ' ')\n MAC = str.strip(str.strip(pass3, '\\n'), ' ')\n\n data_gathered = [hostname, controller, software_root_version, serial, MAC]\n with open('inventory.csv', \"a\", newline=\"\") as inventory_file:\n file_writer = csv.writer(inventory_file, quoting=csv.QUOTE_ALL)\n file_writer.writerow(data_gathered)\n inventory_file.close()\n\n print(hostname + \" has been inventoried.\")\n\n # Define Exceptions for the code above.\n except IndexError:\n logging.info(hostname + \" Could not collect requested SNMP Objects.\")\n with open('inventory.csv', \"a\", newline=\"\") as inventory_file:\n file_writer = csv.writer(inventory_file, quoting=csv.QUOTE_ALL)\n file_writer.write(errors_to_csv)\n inventory_file.close()\n logging.debug(site_ip + ' could not be reliably inventoried')\n inventory_file.close()\n exit()\n\n with open('retries.csv', \"a\", newline=\"\") as retries_file:\n retry_writer = csv.writer(retries_file, quoting=csv.QUOTE_ALL)\n retry_site = [site_ip]\n retry_writer.writerow(retry_site)\n retries_file.close()\n\n except TimeoutError:\n logging.info(hostname + \"is either unavailable or removed from service.\")\n with open('inventory.csv', \"a\", newline=\"\") as inventory_file:\n file_writer = csv.writer(inventory_file, quoting=csv.QUOTE_ALL)\n file_writer.write(errors_to_csv)\n inventory_file.close()\n logging.debug(site_ip + ' site did not respond to snmp query')\n with open('retries.csv', \"a\", newline=\"\") as retries_file:\n retry_writer = csv.writer(retries_file, quoting=csv.QUOTE_ALL)\n retry_site = [site_ip]\n retry_writer.writerow(retry_site)\n retries_file.close()\n\n except KeyboardInterrupt:\n logging.debug('Inventory interrupted by Keystroke. Removing Inventory file.')\n # os.system('rm -rf inventory.csv')\n exit()\n\n except UnboundLocalError:\n print(\"General Exception has occurred.\")\n with open('inventory.csv', \"a\", newline=\"\") as inventory_file:\n print(\"Inventory Stopped By User Key Stroke. Deleting Inventory Manifest.\")\n file_writer = csv.writer(inventory_file, quoting=csv.QUOTE_ALL)\n file_writer.writerow(errors_to_csv)\n inventory_file.close()\n logging.debug(site_ip + ' encountered a general exception occurred.')\n with open('retries.csv', \"a\", newline=\"\") as retries_file:\n retry_writer = csv.writer(retries_file, quoting=csv.QUOTE_ALL)\n retry_site =[site_ip]\n retry_writer.writerow(retry_site)\n retries_file.close()\n\n\n# Inventories the AP Cluster at a given store.\ndef inventory_APs(site_ip):\n #\n # CLUSTER SOFTWARE VERSION. Results in a single string\n #\n cluster_software_version = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.3.3.1.1.4.0')\n results = cluster_software_version.readlines()\n for line in results:\n pass1 = line.split(\"STRING:\")\n pass2 = pass1[1].strip()\n final_cluster_software_version = str.replace(pass2, '\"', \"\")\n\n\n #\n # CLUSTER NAME. Results in a single string\n #\n cluster_name = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.3.3.1.1.2.0')\n for line in cluster_name.readlines():\n pass1 = line.split(\"STRING:\")\n pass2 = pass1[1].strip()\n final_cluster_name = pass2.replace('\"', '')\n\n #\n # MEMBER AP MAC ADDRESSES. returns a list of mac addresses, one of each AP in the cluster.\n #\n membermac_list = []\n member_mac_addresses = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.3.3.1.2.1.1.1')\n for line in member_mac_addresses.readlines():\n pass1 = line.split(\"STRING:\")\n pass2 = pass1[1].strip()\n final_member_mac_address = pass2.replace(\" \", \"\")\n membermac_list.append(final_member_mac_address)\n\n #\n # MEMBER AP SERIAL NUMBERS. returns a list of serials, one of each AP in the cluster.\n #\n memberserial_list = []\n member_serial_numbers = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.3.3.1.2.1.1.4')\n for line in member_serial_numbers.readlines():\n pass1 = line.split(\"STRING: \")\n pass2 = pass1[1].replace('\"', '')\n final_member_serial_number = pass2.strip()\n memberserial_list.append(final_member_serial_number)\n\n #\n # MEMBER AP MODEL NUMBERS. returns a list of model numbers, one of each AP in the cluster.\n #\n membermodel_list = []\n member_model_numbers = os.popen('snmpwalk -v 2c -c nhB7S@wX ' + site_ip + ' .1.3.6.1.4.1.14823.2.3.3.1.2.1.1.6')\n for line in member_model_numbers.readlines():\n pass1 = line.split(\"STRING:\")\n pass2 = pass1[1].replace('\"', '')\n final_member_model_number = pass2.strip()\n membermodel_list.append(final_member_model_number)\n\n\n # Get the length of each list:\n if len(membermac_list) == len(memberserial_list):\n if len(membermac_list) == len(membermodel_list):\n for entry in range(0, len(membermac_list)):\n name = final_cluster_name\n software = final_cluster_software_version\n model = membermac_list[entry]\n mac = membermac_list[entry]\n serial = memberserial_list[entry]\n data_gathered = [name, software, model, mac, serial]\n with open('inventory.csv', \"a\", newline=\"\") as inventory_file:\n file_writer = csv.writer(inventory_file, quoting=csv.QUOTE_ALL)\n file_writer.writerow(data_gathered)\n inventory_file.close()\n print(\"wrote \"+ name + \" \" + software + \" \" + model + \" \" + mac + \" \" + serial + \" \" + serial)\n inventory_file.close()\n else:\n print(\"Cannot reliably inventory \" + cluster_name + \". It appears that one or more APs did not respond to \"\n \"an SNMP query \")\n\n\n# Converts a Site Address to a Controller address and returns it to the calling function\ndef check_ip(site_ip):\n site_ip = (str.replace(site_ip, '\"', ''))\n new_ip = ''\n if \"\\n\" in site_ip:\n site_ip = site_ip.replace('\\n', \"\")\n site_octets = str(site_ip).split(\".\")\n if site_octets[3] is not \"1\":\n site_octets[3] = \"1\"\n for octet in site_octets:\n if octet is not \"1\":\n new_ip += octet + \".\"\n else:\n new_ip += octet\n site_ip = new_ip\n else:\n site_ip = site_ip\n return site_ip\n\n\n# Converts a Site Address to a VC address and returns it to the calling function\ndef make_ap_ip(site_ip):\n site_ip = (str.replace(site_ip, '\"', ''))\n new_ip = ''\n if \"\\n\" in site_ip:\n site_ip = str.replace(site_ip, \"\\n\", '')\n octets = str(site_ip).split(\".\")\n if octets[3] is not \"2\":\n octets[3] = \"2\"\n for octet in octets:\n if octet is not \"2\":\n new_ip += octet + \".\"\n else:\n new_ip += octet\n else:\n new_ip = site_ip\n return new_ip\n\n\n# The Sauce of this tool.\ndef main():\n verify_supporting_files()\n for file in os.listdir('.'):\n if '.csv' in file:\n verify_supporting_files() with open(file, 'r') as list_file:\n reader = list_file.readlines()\n for site in reader:\n inventory_controller(check_ip(site))\n inventory_APs(make_ap_ip(site))\n\n if os.path.lexists('retries.csv') is True:\n with open('retries.csv', 'r') as retries:\n reader = retries.readlines()\n for site in reader:\n inventory_controller(check_ip(site))\n inventory_APs(make_ap_ip(site))\n\n job_name = str(file.split(\".\")[0])\n os.system('mkdir Completed/' + job_name)\n\n # Move inventory.csv, error.log, retries.csv the CSV itself to this directory\n os.system('mv inventory.csv error.log retries.csv ' + job_name + '.csv' + ' Completed/' + job_name)\n\n\nmain()" } ]
2
fanout/headline
https://github.com/fanout/headline
db7562bc4e44c6d77e92080e3418af984c220420
c8d52ff36180d46a6e103fef219ae653c89e93d6
50018ca594d790d633e0d50c49e7bbc6613e504c
refs/heads/master
2023-05-24T18:37:00.210681
2019-12-07T19:45:32
2019-12-07T19:45:32
25,950,623
6
2
null
2014-10-30T02:09:58
2019-12-07T19:45:35
2023-05-22T22:27:53
Python
[ { "alpha_fraction": 0.7731958627700806, "alphanum_fraction": 0.7731958627700806, "avg_line_length": 18.399999618530273, "blob_id": "43bf75b3368ca2f0e5a75a1bcf6c2c7d074f83f3", "content_id": "d27ae38790e03d96ac53c199c43c540eb9206787", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/headlineapp/apps.py", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass HeadlineappConfig(AppConfig):\n name = 'headlineapp'\n" }, { "alpha_fraction": 0.7035040259361267, "alphanum_fraction": 0.7169811129570007, "avg_line_length": 13.84000015258789, "blob_id": "1ee8c07155b22cb415e04171ecf6708f635e79f0", "content_id": "d3e6f096722ae85542a702fd4287a5517c479bb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 371, "license_type": "no_license", "max_line_length": 71, "num_lines": 25, "path": "/README.md", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "# Headline\n\nHeadline API example for Python/Django.\n\n## Setup\n\nCreate virtualenv and install dependencies:\n\n```sh\nvirtualenv --python=python3 venv\n. venv/bin/activate\npip install -r requirements.txt\n```\n\nCreate a `.env` file in the base directory containing `GRIP_URL`, e.g.:\n\n```\nGRIP_URL=http://localhost:5561\n```\n\nRun the server:\n\n```sh\npython manage.py runserver\n```\n" }, { "alpha_fraction": 0.6402438879013062, "alphanum_fraction": 0.6402438879013062, "avg_line_length": 22.428571701049805, "blob_id": "048f445f77d1f6ebe92a5e424738c6f3cce54a11", "content_id": "765cb6a2501fe6e38a989953ae47fce51d5fd81c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/headlineapp/urls.py", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.base, name='base'),\n path('<int:headline_id>/', views.item, name='item'),\n]\n" }, { "alpha_fraction": 0.5530035495758057, "alphanum_fraction": 0.5671378374099731, "avg_line_length": 27.299999237060547, "blob_id": "d7c2c291c279bee27b2afe899eb81fdbe0db27bf", "content_id": "124fbccf60f926572b7b18bb89dc614dbc3b6dca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 54, "num_lines": 20, "path": "/headlineapp/models.py", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Headline(models.Model):\n type = models.CharField(max_length=64)\n title = models.CharField(max_length=200)\n text = models.TextField()\n date = models.DateTimeField(auto_now=True)\n\n def to_data(self):\n out = {}\n out['id'] = str(self.id)\n out['type'] = self.type\n if self.title:\n out['title'] = self.title\n out['date'] = self.date.isoformat()\n out['text'] = self.text\n return out\n\n def __str__(self):\n return '%s: %s' % (self.type, self.text[:100])\n" }, { "alpha_fraction": 0.567531168460846, "alphanum_fraction": 0.5725833773612976, "avg_line_length": 33.12643814086914, "blob_id": "9493b15dc818e58f7d447437c93b75c796f5bf14", "content_id": "63ee1f8e79a0d925cbba4f0901273cb2a9304fab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2969, "license_type": "no_license", "max_line_length": 79, "num_lines": 87, "path": "/headlineapp/views.py", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "import json\nimport calendar\nfrom django.http import HttpResponse, HttpResponseRedirect, \\\n HttpResponseNotModified, HttpResponseNotAllowed\nfrom django.shortcuts import get_object_or_404\nfrom gripcontrol import HttpResponseFormat, HttpStreamFormat, \\\n WebSocketMessageFormat\nfrom django_grip import set_hold_longpoll, set_hold_stream, publish\nfrom headlineapp.models import Headline\n\ndef _json_response(data):\n body = json.dumps(data, indent=4) + '\\n' # pretty print\n return HttpResponse(body, content_type='application/json')\n\ndef base(request):\n if request.method == 'POST':\n h = Headline(type='none', title='', text='')\n h.save()\n return _json_response(h.to_data())\n else:\n return HttpResponseNotAllowed(['POST'])\n\ndef item(request, headline_id):\n h = get_object_or_404(Headline, pk=headline_id)\n\n hchannel = str(headline_id)\n\n if request.wscontext:\n ws = request.wscontext\n if ws.is_opening():\n ws.accept()\n ws.subscribe(hchannel)\n while ws.can_recv():\n message = ws.recv()\n if message is None:\n ws.close()\n break\n return HttpResponse()\n elif request.method == 'GET':\n if request.META.get('HTTP_ACCEPT') == 'text/event-stream':\n resp = HttpResponse(content_type='text/event-stream')\n set_hold_stream(request, hchannel)\n return resp\n else:\n wait = request.META.get('HTTP_WAIT')\n if wait:\n wait = int(wait)\n if wait < 1:\n wait = None\n if wait > 300:\n wait = 300\n inm = request.META.get('HTTP_IF_NONE_MATCH')\n etag = '\"%s\"' % calendar.timegm(h.date.utctimetuple())\n if inm == etag:\n resp = HttpResponseNotModified()\n if wait:\n set_hold_longpoll(request, hchannel, timeout=wait)\n else:\n resp = _json_response(h.to_data())\n resp['ETag'] = etag\n return resp\n elif request.method == 'PUT':\n hdata = json.loads(request.read())\n\n h.type = hdata['type']\n h.title = hdata.get('title', '')\n h.text = hdata.get('text', '')\n h.save()\n hdata = h.to_data()\n\n hjson = json.dumps(hdata)\n etag = '\"%s\"' % calendar.timegm(h.date.utctimetuple())\n rheaders = {'Content-Type': 'application/json', 'ETag': etag}\n hpretty = json.dumps(hdata, indent=4) + '\\n'\n\n formats = []\n formats.append(HttpResponseFormat(body=hpretty, headers=rheaders))\n formats.append(HttpStreamFormat('event: update\\ndata: %s\\n\\n' % hjson))\n formats.append(WebSocketMessageFormat(hjson))\n\n publish(hchannel, formats)\n\n resp = _json_response(hdata)\n resp['ETag'] = etag\n return resp\n else:\n return HttpResponseNotAllowed(['GET', 'PUT'])\n" }, { "alpha_fraction": 0.4612903296947479, "alphanum_fraction": 0.6838709712028503, "avg_line_length": 15.315789222717285, "blob_id": "a0f0c55c63a6a9f63ccab81dc56f2fc68f60ba24", "content_id": "a455ee41694683ddaf530e2c7b8490b817733746", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 310, "license_type": "no_license", "max_line_length": 22, "num_lines": 19, "path": "/requirements.txt", "repo_name": "fanout/headline", "src_encoding": "UTF-8", "text": "certifi==2018.4.16\nchardet==3.0.4\ndj-database-url==0.5.0\ndj-static==0.0.6\nDjango==2.2.8\ndjango-dotenv==1.4.2\ndjango-grip==1.8.0\ngripcontrol==3.2.1\ngunicorn==19.9.0\nidna==2.7\npsycopg2==2.8.2\npubcontrol==2.4.2\nPyJWT==1.6.4\npytz==2018.5\nrequests==2.22.0\nsix==1.11.0\nsqlparse==0.3.0\nstatic3==0.7.0\nurllib3==1.24.2\n" } ]
6
ufeindt/simsurvey-paper-scripts
https://github.com/ufeindt/simsurvey-paper-scripts
1fa7e04c3d3d3731b73ed58457083c5150fe58e0
d1f09d60172d9f30b2659d3874e697320175fc6b
dc56c3afd5461f730a9b759ee7873f8dadf4b8ff
refs/heads/master
2020-04-20T01:24:46.618008
2020-03-28T21:15:36
2020-03-28T21:15:36
168,544,721
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.5059621334075928, "alphanum_fraction": 0.5251344442367554, "avg_line_length": 36.19130325317383, "blob_id": "da6d05579999b20eb426b3438d68234e7684e9ef", "content_id": "ee6981ad3678a454bdb531dea30df57b86b1e8c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4277, "license_type": "no_license", "max_line_length": 103, "num_lines": 115, "path": "/run_sim.py", "repo_name": "ufeindt/simsurvey-paper-scripts", "src_encoding": "UTF-8", "text": "import os\nimport sys\nfrom argparse import ArgumentParser\n\nimport numpy as np\n#from astropy.cosmology import Planck15\nfrom astropy.table import Table\nimport simsurvey\nimport sncosmo\n\n_RATES = {'Ia': 3e-5, 'Ibc': 2.25e-5, 'IIn': 7.5e-6, 'IIP': 1.2e-4}\n\ndef load_fields_ccd(fields_file='ztf_fields.txt', ccd_file='ztf_ccd_corners.txt'):\n fields_raw = np.genfromtxt(fields_file, comments='%')\n\n fields = {'field_id': np.array(fields_raw[:,0], dtype=int),\n 'ra': fields_raw[:,1],\n 'dec': fields_raw[:,2]}\n\n ccd_corners = np.genfromtxt(ccd_file, skip_header=1)\n ccds = [ccd_corners[np.array([0,1,3,2])+4*k, :2] for k in range(16)]\n\n return fields, ccds\n \ndef load_ztf_bands(bandpass_dir=''):\n bands = {\n 'ztfg' : 'ztfg_eff.txt',\n 'ztfr' : 'ztfr_eff.txt',\n 'ztfi' : 'ztfi_eff.txt',\n }\n\n for bandname in bands.keys() :\n fname = bands[bandname]\n b = np.loadtxt(os.path.join(bandpass_dir,fname))\n band = sncosmo.Bandpass(b[:,0], b[:,1], name=bandname)\n sncosmo.registry.register(band, force=True)\n\ndef main():\n if 'SFD_DIR' not in os.environ.keys():\n raise ValueError('Please set $SFD_DIR to where you downloaded the SFD dust maps.')\n \n parser = ArgumentParser(description='Run lightcurve simulations for ZTF')\n parser.add_argument('plan', type=str,\n help='CSV file containing survey plan')\n parser.add_argument('transient', type=str,\n help='Transient type (e.g. \"Ia\" for SNe Ia)')\n parser.add_argument('template', type=str,\n help='Transient template (e.g. \"salt2\" or \"nugent\")')\n parser.add_argument('-z', '--redshift', default=None, nargs=2,\n help='redshift boundaries', type=float)\n parser.add_argument('--no-weather', action='store_true',\n help='do not apply weather loss')\n \n \n args = parser.parse_args()\n\n if args.redshift is None:\n args.redshift = (0, 0.2)\n\n obs = Table.read(args.plan, format='ascii.csv')\n fields, ccds = load_fields_ccd()\n load_ztf_bands()\n \n plan = simsurvey.SurveyPlan(time=obs['time'], band=obs['band'], obs_field=obs['field'],\n skynoise=obs['skynoise'], comment=obs['comment'],\n fields={k: v for k, v in fields.items()\n if k in ['ra', 'dec', 'field_id',\n 'width', 'height']},\n ccds=ccds)\n\n mjd_range = (plan.pointings['time'].min(), plan.pointings['time'].max())\n ra_range = (0, 360)\n dec_range = (-40, 90)\n\n tr = simsurvey.get_transient_generator((args.redshift[0], args.redshift[1]),\n ratefunc=(lambda z: _RATES[args.transient]),\n transient=args.transient, \n template=args.template,\n ra_range=ra_range,\n dec_range=dec_range,\n mjd_range=(mjd_range[0],\n mjd_range[1]))\n\n survey = simsurvey.SimulSurvey(generator=tr, plan=plan)\n\n lcs = survey.get_lightcurves(progress_bar=True)\n\n if not os.path.exists('lcs'):\n os.makedirs('lcs')\n\n prev_files = [fn for fn in os.listdir('lcs')\n if fn.startswith('lcs_%s_%s'%(args.transient, args.template))]\n if len(prev_files) > 0:\n k = max([int(fn.split('.')[0].split('_')[-1]) for fn in prev_files]) + 1\n else:\n k = 0\n \n outfile = 'lcs/lcs_%s_%s_%06i.pkl'%(args.transient, args.template, k)\n\n if not args.no_weather:\n real_nights = np.genfromtxt('hours_per_obsnight.dat')\n idx = np.concatenate((range(31,365), range(31)))\n rn_2016 = np.where(real_nights[idx, -1] > 3.5)[0] + 2458151\n\n def filterfunc(lc):\n mask = np.array([(int(t_) in rn_2016) and (t_ < lc.meta['t0'] + 100) for t_ in lc['time']])\n \n return lc[mask]\n\n lcs = lcs.filter(filterfunc)\n \n lcs.save(outfile)\n \nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7394191026687622, "alphanum_fraction": 0.7551867365837097, "avg_line_length": 43.62963104248047, "blob_id": "d06c96f8b2772a974168a311fba0555d1d1c3bd9", "content_id": "d137c98b7f9dc26bdc8ff7856c423dc25117515f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 265, "num_lines": 27, "path": "/README.md", "repo_name": "ufeindt/simsurvey-paper-scripts", "src_encoding": "UTF-8", "text": "# simsurvey-paper-scripts\nScripts and files required to reproduce the ZTF lightcurve simulations presented in [Feindt et al. (2019)](https://arxiv.org/pdf/1902.03923.pdf)\n\nRequirements\n------------\n\n- Install `simsurvey` and its required packages. The paper used `simsurvey v0.4.4`. Default settings in other versions may differ.\n- Download the data for `sfdmap` from https://github.com/kbarbary/sfddata\n- Set `$SFD_DIR` to the `sfddata` download path\n\nUsage Example\n-------------\n\n```\npython run_sim.py plan_sim_paper.csv Ia salt2\n```\n\nThe last two arguments can be replaced by the other supernova types simulated in the paper and further options are available, see `python run_sim_paper.py -h`.\n\nThe script will save the output in the directory `lcs` and give it a file name that contains SN type and template. New runs with the same type and template will be save with an incremented number at the end. (Note that each file will be several hundred MB in size.)\n\nTo load the output, use the following lines:\n```\nfrom simsurvey import LightcurveCollection\nlcs = LightcurveCollection(load='/path/to/file')\n```\nNote that output files generated using Python 3 cannot be loaded with Python 2 and vice versa.\n" } ]
2
Carter4502/hangman.py
https://github.com/Carter4502/hangman.py
ab1d4c75ea0994951ac4072e8dbfe53cee07c8d1
ecab8fcfdfaacc9fe63a0e230f1aff9dd60c2191
820b12e04cc9a1b6b88b155d1a88ce987150f5d5
refs/heads/master
2020-04-04T18:19:45.797804
2018-11-05T04:08:51
2018-11-05T04:08:51
156,159,212
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 32, "blob_id": "1dcdea5cd8decf32fd60fa6c2be6c9cdd1b5fc82", "content_id": "43f4510a70e0a7c239bfee76473ed6466cf03baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 52, "num_lines": 2, "path": "/README.md", "repo_name": "Carter4502/hangman.py", "src_encoding": "UTF-8", "text": "# hangman.py\nThis is a very simple hangman game created in Python\n" }, { "alpha_fraction": 0.6274768710136414, "alphanum_fraction": 0.649933934211731, "avg_line_length": 31.223403930664062, "blob_id": "e5f77a5b0623d4d94340bc86846a917bc1a12de0", "content_id": "60b774328381b8d00ed47f548a06f4271be8ba7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3028, "license_type": "no_license", "max_line_length": 99, "num_lines": 94, "path": "/hangman.py", "repo_name": "Carter4502/hangman.py", "src_encoding": "UTF-8", "text": "import time\nimport math\nstart = time.time()\nprint(\"WELCOME TO HANGMAN!!\")\nprint(\"What would you like your word to be?\")\nogword = input(\"input your word here: \")\nword = ogword.lower()\nprint(\"Ok now give it to the player tryna guess ur word.\")\ntime.sleep(5)\nlength = str(len(word))\nprint(\"Before we get started... the word given is \" + length + \" letters long. (including spaces)\")\ntime.sleep(2)\nprint(\"You will get 12 guesses before you must guess the full word.\")\ntime.sleep(1)\nprint(\"Now.. take a guess and i will tell you if the letter is in the word.\")\ng1 = input(\"first guess: \")\nif g1 in word:\n\tposition = str(word.index(g1) + 1)\n\n\tprint(\"That is letter number \" + position + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng2 = input(\"second guess: \")\nif g2 in word:\n\tposition2 = str(word.index(g2) + 1)\n\n\tprint(\"That is letter number \" + position2 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng3 = input(\"third guess: \")\nif g3 in word:\n\tposition3 = str(word.index(g3) + 1)\n\n\tprint(\"That is letter number \" + position3 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng4 = input(\"fourth guess: \")\nif g4 in word:\n\tposition4 = str(word.index(g4) + 1)\n\n\tprint(\"That is letter number \" + position4 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng5 = input(\"fifth guess: \")\nif g5 in word:\n\tposition5 = str(word.index(g5) + 1)\n\n\tprint(\"That is letter number \" + position5 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng6 = input(\"sixth guess: \")\nif g6 in word:\n\tposition6 = str(word.index(g6) + 1)\n\n\tprint(\"That is letter number \" + position6 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng7 = input(\"seventh guess: \")\nif g7 in word:\n\tposition7 = str(word.index(g7) + 1)\n\n\tprint(\"That is letter number \" + position7 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng8 = input(\"eighth guess: \")\nif g8 in word:\n\tposition8 = str(word.index(g8) + 1)\n\n\tprint(\"That is letter number \" + position8 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng9 = input(\"nineth guess: \")\nif g9 in word:\n\tposition9 = str(word.index(g9) + 1)\n\n\tprint(\"That is letter number \" + position9 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\ng10 = input(\"LAST guess: \")\nif g10 in word:\n\tposition10 = str(word.index(g10) + 1)\n\n\tprint(\"That is letter number \" + position10 + \" in the \" + length + \" letter long word.\")\nelse:\n\tprint(\"That is not part of the word...\")\nprint(\"Now, take your guess on what you think the word is.\")\nguess = input(\"Guess: \").lower()\nend = time.time()\nif guess == word:\n\ttime = end - start\n\tprint(\"YOU WIN!\")\n\tprint(\"It took \" + str(end - start) + \" seconds for you to guess the word.\")\nelse:\n\tprint(\"You lose :/\")" } ]
2
sorrow4468/BAEKJOON
https://github.com/sorrow4468/BAEKJOON
bebb4f0463ae5240b55f2a861c2e8723cd48ec8b
768f20bfc1490f87d601aedb5e4669a2d261fa08
2a6e92ad80730b517a98e1d96dbdbdee24593b7a
refs/heads/master
2023-07-26T12:39:13.188551
2023-06-14T13:08:00
2023-06-14T13:08:00
386,566,995
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49275362491607666, "alphanum_fraction": 0.5024154782295227, "avg_line_length": 19.799999237060547, "blob_id": "5637fe4795b9b5ff0544c8c4136315d28d5845ad", "content_id": "512d235b655e9976f3a0c808f9cb82754fc3c46f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/16차시 2. 자료구조 – 리스트, 튜플 - 연습문제 18.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split(', '))\n\nresult = [[0 for _ in range(M)] for i in range(N)]\n\nfor i in range(N):\n for j in range(M):\n if result[i][j] == 0:\n result[i][j] = i*j\n\nprint(result)" }, { "alpha_fraction": 0.5903614163398743, "alphanum_fraction": 0.6204819083213806, "avg_line_length": 17.55555534362793, "blob_id": "d6d4cc0411656f9906e9032569ffaa1d908a80f4", "content_id": "f6b0b1f0a20ae62e74b919f2917df2b97c322db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/06/0605/노솔브 방지문제야!!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nQ = int(input())\ndouble = [2**i for i in range(32)]\nfor q in range(Q):\n A = int(input())\n print(1 if A in double else 0)" }, { "alpha_fraction": 0.446958988904953, "alphanum_fraction": 0.4752475321292877, "avg_line_length": 34.400001525878906, "blob_id": "9d7f54b287fb9549e3c68451a4b400b2815ebfb5", "content_id": "28cafc0cfd6a2bf238ff3a302836ece6422db141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1109, "license_type": "no_license", "max_line_length": 64, "num_lines": 20, "path": "/알고리즘/온라인저지/2021/08/0813/설탕 배달.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n3kg봉지, 5kg봉지\n정확하게 Nkg, 못만들면 -1 출력\n3과 5를 이중for문 돌리면서 \n순회중인 무게를 3과 5로 만들어서 \n그 때 사용한 각각의 봉지 개수를 리스트에 저장\n봉지 리스트 길이가 0이면 만들 수 없는 무게이므로 -1\n봉지 리스트에서 최소값을 출력\n\"\"\"\n\nN = int(input()) # 설탕 무게\ncnt = [] # 사용한 봉지개수를 더한 값을 담을 리스트\nfor three in range(N//3 + 1): # 3kg, 시간 단축을 위해 범위 조절\n for five in range(N//5 + 1): # 5kg, 마찬가지로 범위 조절\n if three*3 + five*5 == N: # 해당 무게가 설탕무게이면\n cnt.append(three + five)# 그때 사용된 봉지의 개수를 더한 값을 저장\nif len(cnt) == 0: # 저장된 봉지개수 값이 없으면 만들 수 없는 무게\n print(-1) # -1 출력\nelse: # 저장된 봉지 개수값이 있다면\n print(min(cnt)) # 봉지 개수중 최소값을 출력" }, { "alpha_fraction": 0.5506493449211121, "alphanum_fraction": 0.5558441281318665, "avg_line_length": 26.571428298950195, "blob_id": "1678315064784a81b3d30ee661cca5f9d17832f7", "content_id": "1af89b6d17d1b51d0da487cdd5d5dcb732779f53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 66, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/11/1101/임시 반장 정하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\ncheck = [set() for _ in range(N)]\nfor j in range(5):\n for i in range(N):\n for k in range(N):\n if arr[i][j] == arr[k][j] and i != k:\n check[i].add(k)\nresult = [len(c) for c in check]\nprint(result.index(max(result))+1)" }, { "alpha_fraction": 0.49193549156188965, "alphanum_fraction": 0.5134408473968506, "avg_line_length": 18.63157844543457, "blob_id": "cdbdc3cbd865d3832839cd0c08ec12aaf35980c4", "content_id": "af3bab9f1312b632973504645a46d84b033d4835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 45, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1003/피보나치 수의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef check(fib):\n if A<=fib<=B: return 1\n else: return 0\n\nwhile True:\n A, B = map(int, input().rstrip().split())\n if A == 0 and B == 0: break\n result = 0\n a, b = 1, 2\n result += check(a) + check(b)\n for _ in range(B-2): \n if b>B: break\n a, b = b, a+b\n result += check(b)\n print(result)" }, { "alpha_fraction": 0.4593908488750458, "alphanum_fraction": 0.4763113260269165, "avg_line_length": 22.65999984741211, "blob_id": "2cf714e1cb8d280ea6a53b35cb8d487fd820d645", "content_id": "3668bec89d0eddd9d1952219e929a45449fd5572", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1696, "license_type": "no_license", "max_line_length": 56, "num_lines": 50, "path": "/알고리즘/온라인저지/2022/01/0122/귀여운 라이언.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\n\n# 라이언 혹은 어피치\nryans = list(map(int, input().split()))\n\ns = e = cnt = 0 # 시작 끝 라이언개수\n\nif ryans[s] == 1: # 라이언으로 시작하면\n cnt += 1 # cnt 1로 초기화\n\nresult = N # 가상의 최대값\n\nis_answer = False # 조합을 찾았는가??\n\nwhile True: # 무지성 while True\n # while 종료 조건문\n # 1. 라이언이 K개 미만인데 더 뒤로 못 감\n # 2. 라이언이 K개 초과인데 더 앞에서 땡길 수 없음\n if (cnt < K and e == N-1) or (cnt > K and s == N-1):\n break\n\n if cnt == K: # K개 라이언인 구간 발견!\n if not is_answer: # 처음 발견한거면\n # K개인 라이언구간이 존재하니\n # 일단 답이 -1은 아님\n is_answer = True # 라이언구간 존재함 체크\n \n length = e-s+1 # 라이언구간의 길이\n if length < result: # 더 짧은 구간길이라면\n result = length # 최소 길이 갱신\n \n # 포인터를 옮길건데\n if ryans[s] == 1: # 구간 맨 앞이 라이언이면\n cnt -= 1 # 라이언 카운트 빼주고\n s += 1 # 한 칸 땡기기\n\n elif cnt < K: # K개 미만 라이언구간이면\n e += 1 # 한 칸 밀고\n if ryans[e] == 1: # 밀은 위치가 라이언이면\n cnt += 1 # 카운트\n\n elif cnt > K: # K개 초과 라이언구간이면\n if ryans[s] == 1: # 맨 앞이 라이언이면\n cnt -= 1 # 카운트 감소하고\n s += 1 # 한 칸 땡김\n\nif is_answer: # K개인 라이언구간을 찾았었으면\n print(result)\nelse: # 못찾았었으면\n print(-1)" }, { "alpha_fraction": 0.5270270109176636, "alphanum_fraction": 0.5405405163764954, "avg_line_length": 24, "blob_id": "d177e28e0d42b2ed6332431130b150e4aaf969d9", "content_id": "6c90f64e19d77e448c91bf86a3eaade1d6a693c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/02/0228/Koszykarz.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K, W, M = map(int, input().split())\nA = W-K\nprint(A//M+1 if A%M else A//M)" }, { "alpha_fraction": 0.5267857313156128, "alphanum_fraction": 0.5803571343421936, "avg_line_length": 27.125, "blob_id": "ea41653ec7245ab7e7c0fee0b12b72e5494a7aad", "content_id": "e3526922e073cea4a99eccd99f9e19139d658a5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/04/0423/과자 사기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "cospas = []\nfor snack in ['S', 'N', 'U']:\n cost, weight = map(int, input().split())\n cost *= 10\n if cost>=5000: cost -= 500\n cospas.append((snack, weight/cost))\ncospas.sort(key=lambda x:-x[1])\nprint(cospas[0][0])" }, { "alpha_fraction": 0.5014326572418213, "alphanum_fraction": 0.5300859808921814, "avg_line_length": 18.44444465637207, "blob_id": "4e7b1537a1f6948107090d906f042b5f7c99299e", "content_id": "02afbda505628654af495f5726d4f4e3e5b39b58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/08/0808/트럭 주차.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "P = [0] * 101 # parking\nC = [0] + list(map(int, input().split())) # charge\nfor _ in range(3):\n I, O = map(int, input().split()) # in out\n for i in range(I, O): P[i] += 1\nresult = 0\nfor p in P:\n tmp = C[p] * p\n result += tmp\nprint(result)\n\n\"\"\"\n시간 : 13분\n풀이\n 트럭의 출입을 기록한 일지(P)를 만들고\n 그 시간에 주차장에 있었던 트럭대수와\n 요금을 곱해서 result++ 하여 출력\n\"\"\"" }, { "alpha_fraction": 0.5820895433425903, "alphanum_fraction": 0.5970149040222168, "avg_line_length": 33, "blob_id": "29a376fcc878ca6d3cf05074324c7fa9c4b5e86c", "content_id": "2cef6a4f32bf4436e6b00442633dce242f5b1d85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0820/코딩은 체육과목 입니다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(int(input())//4):print('long', end=' ')\nprint('int')" }, { "alpha_fraction": 0.38566553592681885, "alphanum_fraction": 0.4095562994480133, "avg_line_length": 14.473684310913086, "blob_id": "5fc98f1ceae3529578c830b429a12d562d5175ee", "content_id": "d8e37e2b8ecb37c20b031bb5149b82b2a28efe84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 34, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/02/0205/만취한 상범.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n N = int(input())\n\n dp = [True] * (N+1)\n dp[0] = False\n\n for i in range(2, N+1):\n for j in range(i, N+1, i):\n dp[j] = not dp[j]\n \n result = 0\n\n for d in dp:\n if d:\n result += 1\n \n print(result)" }, { "alpha_fraction": 0.482051283121109, "alphanum_fraction": 0.4871794879436493, "avg_line_length": 27, "blob_id": "aa11fbdb91205e00783f7bbacb571f841557ee56", "content_id": "342355176a4b4534b6d7d97cd5c97974ad7b44be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/12/1206/꿍의 우주여행.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N, D = map(int, input().split())\n result = 0\n for n in range(N):\n v, f, s = map(int, input().split())\n result += v*f >= D*s\n print(result)" }, { "alpha_fraction": 0.44262295961380005, "alphanum_fraction": 0.5, "avg_line_length": 40, "blob_id": "b9e57499a44208b4f9a4ed29f7d63b4ebde09b9b", "content_id": "66280451043b2a4fc910e44a3f48789d5a9f7124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/알고리즘/온라인저지/2021/08/0804/구구단.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input()) # 1 < N <= 9 인 N을 입력받아서\nfor i in range(1, 10): # 1부터 9까지 순회하면서 \n print(f'{N} * {i} = {N*i}') # 구구단을 출력" }, { "alpha_fraction": 0.446153849363327, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 21, "blob_id": "213f14103d47db93e410049e180fa68738389d43", "content_id": "b6157dccbff71530d50da2c8e819e6b299881ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 38, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/02/0210/스타후르츠.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, T, C, P = map(int, input().split())\n\nprint(((N-1)//T) * C * P)" }, { "alpha_fraction": 0.5798319578170776, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 12.333333015441895, "blob_id": "cf0e626c5cdf52e0edcb9758a39726da0543ee38", "content_id": "2fc784abf4f1b3049878bd35657236cb7b2a6ba0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/09/0906/돌 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nN = int(input())\nif N%2:\n print('SK')\nelse:\n print('CY')" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5972222089767456, "avg_line_length": 47.33333206176758, "blob_id": "95ba1e5cf265fa9c1f74ec8759e42628ccaee2d2", "content_id": "ae361a4a4a315d1ee0ed3b155942b138aab6eb0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 62, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/09/0910/점수 집계.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n score = sorted(list(map(int, input().split())))\n print(sum(score[1:-1]) if score[-2]-score[1]<4 else 'KIN')" }, { "alpha_fraction": 0.47663551568984985, "alphanum_fraction": 0.4953271150588989, "avg_line_length": 12.5, "blob_id": "5305e99966fc00a221026d0ceb9381d622e1591e", "content_id": "7703d041c79a25ab3fa5aabc0ce4d54d85d3f3a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 19, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/06/0604/Gum Gum for Jay Jay.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nwhile True:\n try:\n input()\n result += 1\n except:\n break\nprint(result)" }, { "alpha_fraction": 0.5188323855400085, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 31.212121963500977, "blob_id": "0d47de68f1ac69ee6afb8d3b722b7e92a9b66850", "content_id": "2de90d8202deeadfe44b8d7561a76d1d79837cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 145, "num_lines": 33, "path": "/알고리즘/[템플릿]/Floyd-Warshall/Small World Network.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "INF = int(1e9)\nN, K = map(int, input().split())\nworld = [[INF]*N for _ in range(N)] # 친구간 거리 배열 초기화\nfor k in range(K):\n A, B = map(int, input().split())\n A, B = A-1, B-1\n world[A][B], world[B][A] = 1, 1 # 친구인 관계는 서로 거리가 1\nfor k in range(N):\n for i in range(N):\n for j in range(N):\n if i != j: # 자기 자신과 친구는 카운트하지 않음\n world[i][j] = min(world[i][j], world[i][k]+world[k][j])\nresult = 'Small World!' # 초기값을 Small World로 놓고 거리가 6을 초과하는 관계를 찾을 것\nfor i in range(N):\n if result != 'Small World!': # Small World가 아닌 것이 증명되었다면 \n break\n for j in range(N):\n if i != j: # 자기 자신이 아니면서\n if world[i][j] > 6: # 친구 거리가 6을 초과한다?\n result = 'Big World!'\n break\nprint(result)\n\n\"\"\"\n플로이드 워셜(Floyd-Warshall) 알고리즘\nhttps://velog.io/@kimdukbae/%ED%94%8C%EB%A1%9C%EC%9D%B4%EB%93%9C-%EC%9B%8C%EC%85%9C-%EC%95%8C%EA%B3%A0%EB%A6%AC%EC%A6%98-Floyd-Warshall-Algorithm\ni에서 j로 가는 최소값은\n이미 구한 \"i에서 j로 가는 값\"과\n\"지점 k를 거쳐가는 i -> k -> j로 가는 값\"\n두 값중 최소값이다\n\"\"\"\n\n# https://www.acmicpc.net/problem/18243" }, { "alpha_fraction": 0.4516128897666931, "alphanum_fraction": 0.49395161867141724, "avg_line_length": 23.850000381469727, "blob_id": "512e820bfa19c7f67c9a7051ba9a524047ea89ff", "content_id": "0da8cd1034f9f8bebb2ce44e049a6f3f2ddb9e2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 57, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/07/0727/5학년은 다니기 싫어요.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, A, B = map(int, input().split())\nL = 8-N # left : 남은 학기\nLA = 66-A # 남은 전공학점\nLB = 130-B # 남은 총학점\nresult = 'Nae ga wae'\nC = [tuple(map(int, input().split())) for _ in range(10)]\nfor l in range(L):\n # 한 학기 최대 이수 6과목 18학점\n LC = 6 # left credits : 남은 이번학기 수강가능 학점\n tmp_a = C[l][0]\n LA -= tmp_a * 3\n LC -= tmp_a\n tmp_b = C[l][1]\n if LC >= tmp_b:\n LB -= tmp_a*3 + tmp_b*3\n else:\n LB -= tmp_a*3 + LC*3\n if LA<=0 and LB<=0:\n result = 'Nice'\nprint(result)" }, { "alpha_fraction": 0.3652482330799103, "alphanum_fraction": 0.3758865296840668, "avg_line_length": 20.769229888916016, "blob_id": "70d431864efcf07f47758276e93c4e2f4538aa05", "content_id": "c5f07363f66a551115347f1386cfc212040d79fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 43, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/06/0629/캥거루 세마리2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n try:\n A, B, C = map(int, input().split())\n result = 0\n al = abs(A-B)\n cl = abs(B-C)\n if al <= cl:\n result += C - (B+1)\n elif al > cl:\n result += (B-1) - A\n print(result)\n except:\n break" }, { "alpha_fraction": 0.4270462691783905, "alphanum_fraction": 0.47330960631370544, "avg_line_length": 30.33333396911621, "blob_id": "caae94943f193cca55d0d8a81d89342c03f35a8a", "content_id": "f7e7cafdf1284645bd7865ca50f432606ad60454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/01/0117/신용카드 판별.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n card = list(map(int, list(input())))\n result = 0\n for i in range(0, 16, 2):\n card[i] = card[i]*2\n if card[i] > 9:\n card[i] = (card[i]%10) + 1\n result += card[i] + card[i+1]\n print('F' if result%10 else 'T')" }, { "alpha_fraction": 0.4673366844654083, "alphanum_fraction": 0.5376884341239929, "avg_line_length": 15.666666984558105, "blob_id": "d75be4920374727333676c5224b901aff6dd4b4d", "content_id": "c3bcf21ddebe55ef918454746aa839692a93d5b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/02/0220/카드 역배치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "cards = list(range(21))\n\n# a, b = 5, 10\n# print(cards[a:b+1])\n# print(cards[b:a-1:-1])\n\nfor i in range(10):\n a, b = map(int, input().split())\n\n cards[a:b+1] = cards[b:a-1:-1]\n\nprint(*cards[1:])" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 22, "blob_id": "8df19a54d0fcb1c74c7f5a3d58b7ad274babc966", "content_id": "16266bf0be4d01829f08fe4eacc1eced2cf6690b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/알고리즘/온라인저지/2023/04/0410/時間 (Hour).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(int(input())*24)" }, { "alpha_fraction": 0.516094446182251, "alphanum_fraction": 0.5364806652069092, "avg_line_length": 29.09677505493164, "blob_id": "76a5eab002625165c0f76b5f666d8389f73defd1", "content_id": "d9491a9cf4a0ef349cd448793e2e2b49cf351b85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 106, "num_lines": 31, "path": "/알고리즘/온라인저지/2022/09/0927/백양로 브레이크.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nINF = int(1e9) # 가상의, 변경해야 할 길의 수 최대값\n\nN, M = map(int, input().rstrip().split())\narr = [[INF]*N for _ in range(N)]\nfor m in range(M):\n u, v, b = map(int, input().rstrip().split())\n u, v = u-1, v-1 # 인덱스 맞추기\n if b == 0: # 일방통행이면\n arr[u][v] = 0 # 한 쪽은 괜찮지만\n arr[v][u] = 1 # 다른 한 쪽은 길을 바꿔줘야 함\n else: arr[u][v], arr[v][u] = 0, 0 # 양방향 통행이면 서로 오갈 수 있음\nfor k in range(N):\n for a in range(N):\n for b in range(N):\n arr[a][b] = min(arr[a][b], arr[a][k]+arr[k][b]) # 최소값(기존에 구한 변경해야 하는 길의 수, 새로 구한 변경해야 하는 길의 수)\nK = int(input().rstrip())\nfor k in range(K):\n s, e = map(int, input().rstrip().split())\n s, e = s-1, e-1 # 인덱스 맞추기\n if s == e: print(0) # 1->1 같은 경우는 갔다 돌아오는 것이 아닌, 출발 자체를 하지 않는 것으로 함\n else: print(arr[s][e])\n\n# https://www.acmicpc.net/problem/11562\n\"\"\" \n플로이드-워셜 알고리즘\na->b 로 가기 위해 바꿔야하는 일방통행 길의 수는\na->k->b 로 가기 위해 바꿔야 하는 길의 수와 같다\n\"\"\"" }, { "alpha_fraction": 0.48927614092826843, "alphanum_fraction": 0.5080428719520569, "avg_line_length": 23.899999618530273, "blob_id": "1db74d81d6b785893a91c892e6bfee6eced7cf1b", "content_id": "96579c68bc200f50ec733cfcbc2befe1f0f2d89a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 48, "num_lines": 30, "path": "/알고리즘/온라인저지/2021/09/0908/카드2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\ndef is_odd_or_even(N):\n if N%2:\n return True\n else:\n return False\n\nN = int(input())\ndeck = list(range(1, N+1))\npop_or_keep = True # 버리기, False=살리기, 처음엔 버리면서 시작\nwhile len(deck) > 1:\n odd_or_even = is_odd_or_even(len(deck))\n if odd_or_even:\n if pop_or_keep: # 홀수, 버리기\n deck = deck[1:len(deck)+1:2]\n pop_or_keep = False\n else: # 홀수, 살리기\n deck = deck[0:len(deck):2]\n pop_or_keep = True\n else:\n if pop_or_keep: # 짝수, 버리기\n deck = deck[1:len(deck)+1:2]\n pop_or_keep = True\n else: # 짝수, 살리기\n deck = deck[0:len(deck):2]\n pop_or_keep = False\nprint(*deck)" }, { "alpha_fraction": 0.429198682308197, "alphanum_fraction": 0.4588364362716675, "avg_line_length": 21.799999237060547, "blob_id": "d508adfa8a62345374d6b7f670788332367213d9", "content_id": "7cf59cafb587050f9156da0fb6e9e7ef70049f99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 35, "num_lines": 40, "path": "/알고리즘/온라인저지/2022/07/0706/가위 바위 보.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def RSP(player1, player2):\n result = 0\n if player1 == 'R':\n if player2 == 'R':\n result += 1 \n elif player2 == 'S':\n result += 2\n elif player1 == 'S':\n if player2 == 'S':\n result += 1\n elif player2 == 'P':\n result += 2\n elif player1 == 'P':\n if player2 == 'R':\n result += 2\n elif player2 == 'P':\n result += 1\n return result\n\n\nR = int(input())\nSG = list(input()) # 상근\nN = int(input())\nfriend = [] # 친구들\nfor n in range(N): \n friend.append(list(input()))\nresult1 = result2 = 0\nfor i in range(R):\n for f in friend: # 상근이의 점수\n result1 += RSP(SG[i], f[i])\n maxx = 0 # 최대점수 초기화\n for j in 'RSP': # 최대점수 구하기\n tmp = 0\n for f in friend:\n tmp += RSP(j, f[i])\n if tmp > maxx:\n maxx = tmp\n result2 += maxx\nprint(result1)\nprint(result2)" }, { "alpha_fraction": 0.4267912805080414, "alphanum_fraction": 0.5327102541923523, "avg_line_length": 20.46666717529297, "blob_id": "7bf847a3d0f98097b641cd3f908b7de47cd883b7", "content_id": "a874c41343d9e4bad382e835e1cf3c3adf7844f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 52, "num_lines": 15, "path": "/알고리즘/온라인저지/2023/03/0317/추첨을 통해 커피를 받자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "scores = list(map(int, input().split()))\nmaxx = [100, 100, 200, 200, 300, 300, 400, 400, 500]\ncoffee, hacker = 0, 0\nfor i in range(9):\n S, M = scores[i], maxx[i]\n if S>M:\n hacker = 1\n coffee += S\nif hacker:\n print('hacker')\nelse:\n if coffee<100:\n print('none')\n else:\n print('draw')" }, { "alpha_fraction": 0.3705308735370636, "alphanum_fraction": 0.3911159336566925, "avg_line_length": 20.488372802734375, "blob_id": "8404d6428c10092faa10089a1819a6452d229fca", "content_id": "8c35509f58ecdd42e704b478c2f519e76fbc417d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 67, "num_lines": 43, "path": "/알고리즘/온라인저지/2021/12/1216/단지번호붙이기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndx = (-1, 1, 0, 0)\ndy = (0, 0, -1, 1)\n\nN = int(input())\n\nmap = [list(map(int, input())) for _ in range(N)]\n\ncomplex_cnt = 0\n\ncomplex_counts = []\n\nbfs = deque()\n\nfor i in range(N):\n for j in range(N):\n if map[i][j]:\n bfs.append((i, j))\n complex_cnt += 1\n cnt = 0\n while bfs:\n tmp = bfs.popleft()\n y = tmp[0]\n x = tmp[1]\n\n if map[y][x] == 0:\n continue\n \n map[y][x] = 0\n cnt += 1\n\n for k in range(4):\n ny = y + dy[k]\n nx = x + dx[k]\n\n if 0 <= ny < N and 0 <= nx < N and map[ny][nx]:\n bfs.append((ny, nx))\n complex_counts.append(cnt)\nprint(complex_cnt)\ncomplex_counts.sort()\nfor c in complex_counts:\n print(c)" }, { "alpha_fraction": 0.47999998927116394, "alphanum_fraction": 0.4950000047683716, "avg_line_length": 21.33333396911621, "blob_id": "93d2c3b89ce62186b3e54f3351eb66cfc59223fa", "content_id": "d65d3aed872b96f32cc29453b4a2b3a3a80f152a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/07/0728/서로 다른 부분 문자열의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nresult = set()\nfor i in range(1, len(S)+1):\n # print(i)\n for j in range(len(S)+1-i):\n # print(j, j+i)\n # print(S[j:j+i])\n result.add(S[j:j+i])\nprint(len(result))" }, { "alpha_fraction": 0.5102040767669678, "alphanum_fraction": 0.5306122303009033, "avg_line_length": 11.375, "blob_id": "bb6cec09058d814a352f6b81809e5bf48de06c9b", "content_id": "6588444cd13d3e54d4edbcde6867882caba78616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/03/0309/帰省 (Homecoming).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\n\nresult = 1\n\nif C < A or C >= B:\n result = 0\n\nprint(result)" }, { "alpha_fraction": 0.5327731370925903, "alphanum_fraction": 0.5596638917922974, "avg_line_length": 21.074073791503906, "blob_id": "f6511fe9d1495b309a9c5f6e55c0982f23c8485d", "content_id": "1072422906c9c0070b26901873e6452da4177d34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 51, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/10/1012/골드바흐의 추측 실버1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nn = int(1e6)\na = [False,False] + [True]*(n-1)\nfor i in range(2, 1001):\n if a[i]:\n for j in range(2*i, n+1, i):\n a[j] = False\nwhile True:\n N = int(input().rstrip())\n if N == 0: break\n for i in range(3, n):\n if a[N-i] and a[i]:\n print('{} = {} + {}'.format(N, i, N-i))\n break\n else: print(\"Goldbach's conjecture is wrong.\")\n\n\"\"\"\n에라토스테네스의 체를 만드는 과정에서 \nprimes를 만들기 위해 append 하는 부분을 없앴다\n소수판정 테이블인 a만 활용하여 시간초과를 회피하였다\n결론적으로 골드바흐의 추측은 백만 이하 모든 짝수에 적용이 가능하다\n\"\"\"\n\n# https://www.acmicpc.net/problem/6588" }, { "alpha_fraction": 0.4693877696990967, "alphanum_fraction": 0.4920634925365448, "avg_line_length": 19.045454025268555, "blob_id": "8e45bcd77a990f25c7762fd852964fdc734a075e", "content_id": "81e9075ee4027c9f0222cceb39ddd076dc5e77c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0818/p3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nsys.stdin = open('input.txt')\n\nV, E = map(int, input().split()) # 정점수, 간선수\n# 인접 행렬\nG = [[0]*(V+1) for _ in range(V+1)]\nfor _ in range(E):\n u, v = map(int, input().split())\n G[u][v] = G[v][u] = 1\n\nfor lst in G[1:]:\n print(*lst[1:])\n\n# 인접 리스트\nG = [[]*(V+1) for _ in range(V+1)]\nfor _ in range(E):\n u, v = map(int, input().split())\n G[u].append(v)\n G[v].append(u)\n\nfor i in range(1, V+1):\n print(i, '-->', G[i])\n" }, { "alpha_fraction": 0.3415493071079254, "alphanum_fraction": 0.35680750012397766, "avg_line_length": 21.421052932739258, "blob_id": "004f9b2ae1f57ef4a903e171242bfab9f48556f7", "content_id": "e2b52e6bad1ecd04aa04dc8b35f87542efc5c560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "no_license", "max_line_length": 43, "num_lines": 38, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/4. 파이썬 SW문제해결 기본 Stack1/7차시 4일차 - 괄호검사.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n code = input()\n \n stack = []\n bracket = ['(', ')', '{', '}']\n\n for c in code:\n if c == bracket[0]:\n stack.append(c)\n elif c == bracket[1]:\n if stack:\n if stack[-1] == bracket[0]:\n stack.pop()\n else:\n stack.append(c)\n else:\n stack.append(c)\n elif c == bracket[2]:\n stack.append(c)\n elif c == bracket[3]:\n if stack:\n if stack[-1] == bracket[2]:\n stack.pop()\n else:\n stack.append(c)\n else:\n stack.append(c)\n \n result = 0\n if stack:\n result = 0\n else:\n result = 1\n \n \n print('#{} {}'.format(t, result))\n" }, { "alpha_fraction": 0.4421199560165405, "alphanum_fraction": 0.5202231407165527, "avg_line_length": 23.758621215820312, "blob_id": "71dda0cb0c14e8369a8c7e1ac732d1a49ae65951", "content_id": "7b4577af0f6f1a4f77e75c801701626e41d20300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/알고리즘/[템플릿]/진법 변환/싱기한 네자리 숫자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import string\n\ntmp = string.digits + string.ascii_uppercase # 자릿수를 담은 문자열\ndigits_dict = {\n '0': 0, '1': 1, '2': 2, '3': 3,\n '4': 4, '5': 5, '6': 6, '7': 7,\n '8': 8, '9': 9, 'A': 10, 'B': 11,\n 'C': 12, 'D': 13, 'E': 14, 'F': 15,\n}\n\ndef change(num, base):\n result = ''\n while num:\n result = tmp[num%base] + result\n num //= base\n return result\n\ndef digits_sum(num, base):\n result = 0\n for n in num:\n result += digits_dict[n]\n return result\n\nfor num in range(1000, 10000):\n A, B, C = str(num), change(num, 12), change(num, 16)\n A, B, C = digits_sum(A, 10), digits_sum(B, 12), digits_sum(C, 16)\n if A == B == C: print(num)\n\n# https://www.acmicpc.net/problem/6679" }, { "alpha_fraction": 0.47441861033439636, "alphanum_fraction": 0.4883720874786377, "avg_line_length": 22.88888931274414, "blob_id": "0d5d793f03074caac7d1ccd253b907ad8e5029a4", "content_id": "4803deb43723a7786bb2d226799349067d071b19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/09/0927/팰린드롬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom itertools import combinations as comb\n\ninput = sys.stdin.readline\n\nfor _ in [0]*int(input().rstrip()):\n K = int(input().rstrip())\n words = [input().rstrip() for _ in [0]*K]\n word_comb = list(comb(words, 2))\n for a, b in word_comb:\n A, B = a+b, b+a\n if A == A[::-1]:\n print(A)\n break\n if B == B[::-1]:\n print(B)\n break\n else: print(0)\n" }, { "alpha_fraction": 0.49841269850730896, "alphanum_fraction": 0.565079391002655, "avg_line_length": 14.800000190734863, "blob_id": "6c32fefe76219981377494ecd95d51f729f28103", "content_id": "16d8e9027e414e818cafca4d84b79e9a58b132fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 34, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/09/0904/애너그램 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = input(), input()\narr1, arr2 = [0]*26, [0]*26\nfor a in A:\n i = ord(a)-97\n arr1[i] += 1\nfor b in B:\n i = ord(b)-97\n arr2[i] += 1\nresult = 0\nfor i in range(26):\n result += abs(arr1[i]-arr2[i])\nprint(result)\n\n\"\"\"\n같은 알파벳이 같은 개수만큼 있어야 함\n문제에서는 알파벳을 새로 추가하지는 않고\n불필요할 경우 제거하면서\n애너그램을 만듬\n몇개를 제거해야 하는가\n\"\"\"" }, { "alpha_fraction": 0.5340909361839294, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 28.5, "blob_id": "a7cdbeb11674ae8d0d57c18a9057da971dd7f7f3", "content_id": "5e6094fc851320c16ad1588ebb85d0c73b1911a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 39, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0910/공 바꾸기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nball = [i for i in range(N+1)]\nfor m in range(M):\n i, j = map(int, input().split())\n ball[i], ball[j] = ball[j], ball[i]\nprint(*ball[1:])" }, { "alpha_fraction": 0.4644128084182739, "alphanum_fraction": 0.4750889539718628, "avg_line_length": 27.149999618530273, "blob_id": "40ebdb5d59e66505db4f48252e24b4b7fcc191ec", "content_id": "1bc7491ce79e3fc1b445c413c85885089badae2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/알고리즘/온라인저지/2023/02/0205/운동.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "INF = int(1e9)\nN, M = map(int, input().split())\narr = [[INF]*N for _ in range(N)]\nfor m in range(M):\n a, b, c = map(int, input().split())\n a, b = a-1, b-1\n if c < arr[a][b]:\n arr[a][b] = c\nfor k in range(N):\n for a in range(N):\n for b in range(N):\n arr[a][b] = min(arr[a][b], arr[a][k] + arr[k][b])\nfor i in range(N):\n for j in range(N):\n if arr[i][j] == INF:\n if i != j:\n arr[i][j] = 0\nresult = INF\nfor i in range(N): result = min(result, arr[i][i])\nprint(result if result != INF else -1)" }, { "alpha_fraction": 0.5643564462661743, "alphanum_fraction": 0.594059407711029, "avg_line_length": 19.399999618530273, "blob_id": "42777dea1921398b57e879dcf9cb87dc95cf1ea9", "content_id": "d41d4a801154684d5fea2ddf2005e2e126d547de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/01/0125/반지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nresult = 0\nfor _ in range(int(input())):\n if S in input()*2: result += 1\nprint(result)" }, { "alpha_fraction": 0.4342857003211975, "alphanum_fraction": 0.477142870426178, "avg_line_length": 19.647058486938477, "blob_id": "016f357231ee172f2a6dcc7ee5a38830981a0975", "content_id": "bac9c4768c4f4e81310bcaf4398bb68b7dedbf91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 35, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/08/0822/분수찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprev, next, line, move = 0, 0, 0, 0\nD = 1 # 우상향: 1, 좌하향: 0\nfor cnt in range(1, int(1e9)):\n next += cnt\n if prev <= N <= next:\n line, move = cnt, N-prev-1\n D = line%2\n break\n prev += cnt\n# print(line, move, D)\nA, B = 1, line\nfor i in range(move):\n A += 1\n B -= 1\nif D: A, B = B, A\nprint(f'{A}/{B}')" }, { "alpha_fraction": 0.540229856967926, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 13.666666984558105, "blob_id": "a294395c9bb265de35aac2207275ad0452881131", "content_id": "724b46bc776a635c7e5fbf9d6e26af4328050078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 22, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/05/0511/택시 기하학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import math\n\npi = math.pi\nR = int(input())\nprint(f'{pi*R*R:.6f}')\nprint(f'{2*R*R:.6f}')" }, { "alpha_fraction": 0.5221238732337952, "alphanum_fraction": 0.5398229956626892, "avg_line_length": 21.799999237060547, "blob_id": "62e10a003fc3d3e44b13928626243ef68046055b", "content_id": "1f1043882482b8847d269423898de6b17f14d4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/05/0502/Intercepting Information.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 'S'\nfor i in list(map(int, input().split())):\n if i not in (0, 1):\n result = 'F'\nprint(result)" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.53125, "avg_line_length": 31.200000762939453, "blob_id": "eafb7e2e1d27ffa87fa5b4dd9a34abb0fce4b3d6", "content_id": "115e532b9127217c6b23f6215be0d7048cd25e31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 66, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/05/0527/할로윈의 사탕.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n C, V = map(int, input().split())\n A, B = C//V, C%V\n print(f'You get {A} piece(s) and your dad gets {B} piece(s).')" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.524193525314331, "avg_line_length": 14.625, "blob_id": "d889ff6505f2e7edd42dc18ae8ca83319f090a50", "content_id": "0452676e22d32d0d7b28be9a89c1c82b73d8dba0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 38, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0223/Darius님 한타 안 함.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K, D, A = map(int, input().split('/'))\n\nis_Darius = 'gosu'\n\nif K + A < D or D == 0:\n is_Darius = 'hasu'\n\nprint(is_Darius)" }, { "alpha_fraction": 0.50310879945755, "alphanum_fraction": 0.5207253694534302, "avg_line_length": 30.129032135009766, "blob_id": "8b1def6b78499f45901cb2d021376703bdb59cfd", "content_id": "417a7f974598cdd41dfaf4bdc852f6ab73c97ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2104, "license_type": "no_license", "max_line_length": 72, "num_lines": 62, "path": "/알고리즘/온라인저지/2022/11/1113/연구소 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom itertools import combinations as comb\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef spread_virus(viruses):\n new_arr = [[-1]*N for _ in range(N)] # 문제의 예시처럼, 바이러스가 퍼진 시간을 기록할 배열\n # deactivate other viruses\n for i in range(N):\n for j in range(N):\n if new_arr[i][j] == 2 and (i, j) not in viruses:\n new_arr[i][j] = 3\n Q = deque()\n visited = [[0]*N for _ in range(N)]\n for y, x in viruses: \n Q.append((y, x))\n new_arr[y][x] = 0\n visited[y][x] = 1\n while Q:\n y, x = Q.popleft()\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<N and not visited[ny][nx]:\n if arr[ny][nx] != 1:\n Q.append((ny, nx))\n new_arr[ny][nx] = new_arr[y][x]+1\n visited[ny][nx] = 1\n all_spreaded_check(new_arr, visited)\n\ndef all_spreaded_check(arr, visited):\n global result\n new_wall = 0\n max_time = 0\n for i in range(N):\n for j in range(N):\n if (i, j) not in virus_coordinates: # 바이러스의 시간은 카운트하지 않는다\n max_time = max(max_time, arr[i][j])\n if visited[i][j] == 0:\n new_wall += 1\n if wall == new_wall: # \n result = min(max_time, result)\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nvirus_coordinates = []\nwall = 0\nfor i in range(N):\n for j in range(N):\n if arr[i][j] == 2:\n virus_coordinates.append((i, j))\n if arr[i][j] == 1:\n wall += 1\nvirus_combinations = list(comb(virus_coordinates, M))\nresult = int(1e9) # 가상의 최소값\nfor viruses in virus_combinations: # 매 바이러스 조합에 대해\n spread_virus(viruses) # 바이러스 퍼뜨리기\nif result == int(1e9): result = -1 # 한 번도 바이러스가 다 퍼지지 못했다면 -1\nprint(result)\n\n# 문제가 좀 이상합니다....;;\n" }, { "alpha_fraction": 0.48785871267318726, "alphanum_fraction": 0.50772625207901, "avg_line_length": 16.461538314819336, "blob_id": "45636644c2553054f49a5fee7f48dadca7be41a8", "content_id": "95a6912609e8c8008c85f750fb8636b4600baaf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 453, "license_type": "no_license", "max_line_length": 43, "num_lines": 26, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/39차시 4. 문자열 - 연습문제 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "url = 'http://www.example.com/test?p=1&q=2'\n\nprotocol = ''\nhost = ''\nothers = ''\n\nhost_s = 0\nhost_e = 0\n\nfor i in range(len(url)):\n if url[i] == ':':\n protocol = url[:i]\n \n if url[i:i+3] == 'www':\n host_s = i\n \n if url[i-2:i+1] == 'com':\n host_e = i+1\n\n others = url[host_e+1:]\n\nhost = url[host_s:host_e]\n\nprint('protocol: {}'.format(protocol))\nprint('host: {}'.format(host))\nprint('others: {}'.format(others))" }, { "alpha_fraction": 0.5047393441200256, "alphanum_fraction": 0.521327018737793, "avg_line_length": 31.538461685180664, "blob_id": "9feaef80329af9a8f18552245f1c508b4f984506", "content_id": "e1eaffeebcbf31c3da2b5eb2ca5b9d6fdbf24216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/04/0410/짐 챙기는 숌.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nif N > 0: # 담아야 할 책이 있으면\n books = list(map(int, input().split()))\n result = 1 # 상자를 하나 가져다 놓고\n box = M # 상자에 담을 수 있는 무게는 M\n for book in books: # 책들을 순회하면서\n box -= book # 상자에 책을 하나씩 담는다\n if box < 0: # 상자에 책을 담을 수 없으면\n box = M - book # 다음 상자를 가져와 책을 담고\n result += 1 # 상자 개수를 +1\nelse: # 담아야 할 책이 없으면\n result = 0 # 필요한 상자의 개수는 0\nprint(result)" }, { "alpha_fraction": 0.5694444179534912, "alphanum_fraction": 0.5972222089767456, "avg_line_length": 35.5, "blob_id": "5e2fe2a5437e66924459c3b5c46e2c545f46e2f7", "content_id": "fb0fd3cfe6cc4fc499a9e1582326dfb327a5ddb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/02/0207/Hello Judge.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(1, int(input())+1):\n print(f'Hello World, Judge {i}!')" }, { "alpha_fraction": 0.4193548262119293, "alphanum_fraction": 0.43478259444236755, "avg_line_length": 24.285715103149414, "blob_id": "631f204724be1a670b34bdbe17d146ee2ff5cafd", "content_id": "c5477022311522dde65d7794260ce7e175e41b4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "no_license", "max_line_length": 45, "num_lines": 28, "path": "/알고리즘/온라인저지/2021/12/1216/약수들의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n n = int(input())\n\n if n == -1:\n break\n\n measure_counts = [0 for _ in range(n+1)]\n measures = []\n\n for i in range(1, n+1):\n if not measure_counts[i]:\n if n % i == 0:\n measure_counts[i] = 1\n measure_counts[n//i] = 1\n measures.append(i)\n measures.append(n//i)\n\n measures.sort()\n\n if sum(measures[:len(measures)-1]) == n:\n print('{} = '.format(n), end='')\n for i in range(len(measures)-1):\n print(measures[i], end='')\n if i != len(measures)-2:\n print(' + ', end='')\n print() \n else:\n print('{} is NOT perfect.'.format(n))\n \n" }, { "alpha_fraction": 0.4487309753894806, "alphanum_fraction": 0.46700507402420044, "avg_line_length": 21.930233001708984, "blob_id": "e82d6603afa5da2de0463db4bbf39e0621a53c6c", "content_id": "ee8b88e44c21ba4fabe914d3505608aa63897781", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 985, "license_type": "no_license", "max_line_length": 82, "num_lines": 43, "path": "/알고리즘/온라인저지/2022/06/0629/적록색약.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\nimport sys\n\n\ndef input():\n return sys.stdin.readline()\n\ndef bfs(i, j):\n q.append((i, j))\n now = arr[i][j]\n while q:\n y, x = q.popleft()\n visited[y][x] = 1\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k]\n if 0<=ny<N and 0<=nx<N and not visited[ny][nx] and arr[ny][nx] == now:\n if (ny, nx) not in q:\n q.append((ny, nx))\n\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\nN = int(input())\narr = [list(input()) for _ in range(N)]\nNCB = 0 # Non Color Blindness\nCB = 0 # Color Blindness\nvisited = [[0]*N for _ in range(N)]\nq = deque()\nfor i in range(N):\n for j in range(N):\n if not visited[i][j]:\n bfs(i, j)\n NCB += 1\n if arr[i][j] in 'RG':\n arr[i][j] = 'Y'\nvisited = [[0]*N for _ in range(N)]\nq = deque()\nfor i in range(N):\n for j in range(N):\n if not visited[i][j]:\n bfs(i, j)\n CB += 1\nprint(NCB, CB)" }, { "alpha_fraction": 0.27398988604545593, "alphanum_fraction": 0.3156565725803375, "avg_line_length": 26.34482765197754, "blob_id": "13511a4d459a9c8cc73c79a5a9b4e690180c7dab", "content_id": "dcd20864ad5c1b406836a139cab7c502c350d4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 45, "num_lines": 29, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/2. 파이썬 SW문제해결 기본 List2/5차시 2일차 - 색칠하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n arr = [[0]*10 for _ in range(10)]\n N = int(input())\n for n in range(N):\n tmp = list(map(int, input().split()))\n r1 = tmp[0]\n c1 = tmp[1]\n r2 = tmp[2]\n c2 = tmp[3]\n \n for i in range(c1, c2+1):\n for j in range(r1, r2+1):\n if tmp[4] == 1:\n if arr[i][j] == 0:\n arr[i][j] = 1\n elif arr[i][j] == 2:\n arr[i][j] = 3\n else:\n if arr[i][j] == 0:\n arr[i][j] = 2\n elif arr[i][j] == 1:\n arr[i][j] = 3\n \n cnt = 0\n for a in arr:\n cnt += a.count(3)\n print('#{} {}'.format(t, cnt))" }, { "alpha_fraction": 0.3867403268814087, "alphanum_fraction": 0.4806629717350006, "avg_line_length": 19.22222137451172, "blob_id": "6b2ab27b4a2525c8359ebad206593023a920ccdb", "content_id": "f434eccb490458f6dba43731400fe2eee5f7abed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/06/0613/Vending Machine.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 5000\nfor i in list(map(int, input().split())):\n if i == 1:\n result -= 500\n if i == 2:\n result -= 800\n if i == 3:\n result -= 1000\nprint(result)" }, { "alpha_fraction": 0.4347222149372101, "alphanum_fraction": 0.4763889014720917, "avg_line_length": 23.03333282470703, "blob_id": "7c84ee759fb606740b73fba316acad914bba8fe6", "content_id": "7703df2598d60ad15636caea02e6a1900d898ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/알고리즘/온라인저지/2021/07/0731/바둑알 십자 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "num = 19\nboard = [[int(x) for x in input().split()] for y in range(num)]\n\nn = int(input())\n\npoints = []\nfor i in range(n): # 뒤집을 점들의 개수만큼\n a, b = map(int, input().split())\n points.append([a, b])\n # 입력 받아서 좌표들을 리스트로 저장\n \ndef cross(x, y): # 십자뒤집기 로직\n for i in range(19):\n if board[x-1][i] == 0: # 가로줄 0 -> 1, 1 -> 0\n board[x-1][i] = 1\n else:\n board[x-1][i] = 0\n if board[i][y-1] == 0: # 세로줄 0 -> 1, 1 -> 0\n board[i][y-1] = 1\n else:\n board[i][y-1] = 0\n return board\n\nfor point in points:\n cross(point[0], point[1]) # 좌표들 각각 십자뒤집기 실행\n\nfor i in range(19) :\n for j in range(19) : \n print(board[i][j], end=' ')\n print() # 출력문" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.5528455376625061, "avg_line_length": 17.5, "blob_id": "e031ae14019c67b34994abd1c79038cac7fdf513", "content_id": "d105034d93d06424410618a16d4d78cbc26cd513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/알고리즘/온라인저지/2021/12/1216/네 번째 점.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "xdots = [0 for _ in range(1001)]\nydots = [0 for _ in range(1001)]\n\nfor _ in range(3):\n x, y = map(int, input().split())\n xdots[x] += 1\n ydots[y] += 1\n\nresult = []\n\nfor xdot in xdots:\n if xdot == 1:\n result.append(xdots.index(xdot))\n\nfor ydot in ydots:\n if ydot == 1:\n result.append(ydots.index(ydot))\n\nfor r in result:\n print(r, end=' ')" }, { "alpha_fraction": 0.44262295961380005, "alphanum_fraction": 0.5081967115402222, "avg_line_length": 30, "blob_id": "010fbfd4bbf22c0e592821180288e6ac9992445c", "content_id": "5e1aca49f575780f525c1526e381d54ab496a5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0824/РАВЕНСТВО.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = [int(input()) for _ in range(3)]\nprint((N[1]-N[2])//N[0])" }, { "alpha_fraction": 0.6334232091903687, "alphanum_fraction": 0.6522911190986633, "avg_line_length": 25.571428298950195, "blob_id": "ef7c60a74127183882dbef9b2df582a0b8811e42", "content_id": "612047f27f063d514d94910c86fbad5c28ec2ac7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 75, "num_lines": 14, "path": "/알고리즘/[템플릿]/스택/옥상 정원 꾸미기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nstack = []\nresult = 0\nfor _ in [0]*int(input().rstrip()):\n height = int(input().rstrip())\n while stack and height>=stack[-1]: stack.pop() # 스택안의, 높이가 낮은 건물들 전부 삭제\n result += len(stack) # while문을 통과하고 스택에 남아있는 건물들은, 현재 건물의 옥상을 볼 수 있다\n stack.append(height) # 현재 건물 추가\nprint(result)\n\n# https://www.acmicpc.net/problem/6198" }, { "alpha_fraction": 0.4453781545162201, "alphanum_fraction": 0.462184876203537, "avg_line_length": 16, "blob_id": "d191d344d1772fa911791a9719ec5fbabd47ca89", "content_id": "905e9f8858de030ab07f2fe3d9c069e3dc41c593", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 22, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/04/0417/골뱅이 찍기 - 뒤집힌 ㄱ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor _ in range(N):\n print('@'*5*N)\n\nfor _ in range(4):\n for _ in range(N):\n print('@'*N)\n" }, { "alpha_fraction": 0.5378378629684448, "alphanum_fraction": 0.5837838053703308, "avg_line_length": 19.61111068725586, "blob_id": "cd4c0641d347168d0a81d56edce7c07bf640c3dd", "content_id": "500281a035c64fff5186ade3d3c1da9e9296f8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/09/0917/게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nX, Y = map(int, input().rstrip().split())\nZ = int((Y*100/X))\nresult = -1\nstart, end = 1, int(1e9)\nwhile start<=end:\n mid = (start+end) // 2\n new_win_rate = int(((Y+mid)*100/(X+mid)))\n if new_win_rate>Z:\n result = mid\n end = mid-1\n else: start = mid+1\nprint(result)\n\n# https://www.acmicpc.net/problem/1072" }, { "alpha_fraction": 0.6126760840415955, "alphanum_fraction": 0.6126760840415955, "avg_line_length": 27.600000381469727, "blob_id": "7abf3a70da02db24fdd86d7ef1569e8f60001d0c", "content_id": "a2a0f6c5447cd40d2fa344b31437a12512306dfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/06/0610/다리 놓기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from math import factorial as fact\n\nfor t in range(int(input())):\n N, M = map(int, input().split())\n print(fact(M)//(fact(N)*fact(M-N)))" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.4722222089767456, "avg_line_length": 17.16666603088379, "blob_id": "a9b8a8100175d62d41cb14a60df0a31fff677889", "content_id": "d7f600153bc46cc3837dd9c86a9242dd655c76dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/04/0405/AdivB - 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nC, D = A//B, A%B\nif A != 0 and D < 0:\n C, D = C+1, D-B\nprint(C)\nprint(D)" }, { "alpha_fraction": 0.5226939916610718, "alphanum_fraction": 0.5710102319717407, "avg_line_length": 25.30769157409668, "blob_id": "7ba113b810dcf22249633f575884d9ba159d2299", "content_id": "c6942e3e6259beba02df84ebcc88092269eece56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "no_license", "max_line_length": 79, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/09/0904/마라톤 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncheck = [tuple(map(int, input().split())) for _ in range(N)]\nresult = 0\nfor i in range(N-1):\n A, B = check[i], check[i+1]\n result += abs(A[0]-B[0]) + abs(A[1]-B[1])\ndist = 0\nfor i in range(1, N-1):\n A, B, C = check[i-1], check[i], check[i+1]\n jump = abs(A[0]-C[0]) + abs(A[1]-C[1])\n no_jump = abs(A[0]-B[0]) + abs(A[1]-B[1]) + abs(B[0]-C[0]) + abs(B[1]-C[1])\n tmp = no_jump - jump\n dist = max(dist, tmp)\nprint(result - dist)\n\n\"\"\"\n구간을 점프할 때 얻을 수 있는 이익을 모아 테이블을 만든다\n예제에서 B를 점프했을 때 얻는 이익값이 6\nC를 점프했을 때 얻는 이익값이 4이다\n그러면 B를 점프하는게 동선을 가장 단축시킬 수 있고\n이 이익값 6을 전체 경로 20에서 빼주면 14가 나온다\n0번과 N-1번을 제외한 점들에서\n해당 점을 빼고 뛰었을 때 얻을 수 있는 이익값 테이블의\n최대값을\n전체 경로값에서 빼준다\n\"\"\"" }, { "alpha_fraction": 0.4611872136592865, "alphanum_fraction": 0.465753436088562, "avg_line_length": 17.33333396911621, "blob_id": "967fd978ba580198d4f1630215bbcb42abf70020", "content_id": "3cd1e346ece2d616aed71c4d71add1478699d8ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/02/0220/오타맨 고창영.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n typo, word = input().split()\n typo = int(typo)\n\n for i in range(len(word)):\n if i == typo-1:\n continue\n \n print(word[i], end='')\n print()" }, { "alpha_fraction": 0.6790123581886292, "alphanum_fraction": 0.6790123581886292, "avg_line_length": 26.33333396911621, "blob_id": "95d0e611b79f837cae23b6f73e2cb7c6dff29da7", "content_id": "b7c51a149fbdc6c908443cc23a5cef1fef5a365f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/02/0206/연세대학교.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "yonsei = ['YONSEI', 'Leading the Way to the Future']\n\nprint(yonsei[int(input())])" }, { "alpha_fraction": 0.4554140269756317, "alphanum_fraction": 0.4792993664741516, "avg_line_length": 22.296297073364258, "blob_id": "39b5423590f79a4e3321a43ab377de8dc30a8c26", "content_id": "905d88d290dbbb081a227e2a05baf31392afb0fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 39, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/12/1221/별 찍기 - 19.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def fill(arr):\n for k in range(0, size//2+1, 2):\n for i in range(k, size-k):\n arr[i][k] = 1\n\ndef rotate(arr):\n new_arr = []\n for j in range(size):\n tmp = []\n for i in range(size-1, -1, -1):\n tmp.append(arr[i][j])\n new_arr.append(tmp)\n return new_arr\n\nN = int(input())\nsize = 1\nfor _ in range((N-1)*2): size += 2\narr = [[0]*size for _ in range(size)]\nfor _ in range(4):\n fill(arr)\n arr = rotate(arr)\nfor i in range(size):\n for j in range(size):\n if arr[i][j]:\n print('*', end='')\n else: print(' ', end='')\n if i != size-1: print()" }, { "alpha_fraction": 0.5810526609420776, "alphanum_fraction": 0.6084210276603699, "avg_line_length": 19.69565200805664, "blob_id": "c39366f266d2dcc4360ea6ae5155c4c3c7309c63", "content_id": "19dc3a718fec769ac295364e41313ae87ee6eeea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 41, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/10/1002/정수 a를 k로 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\ndef move(next, new_cnt):\n if 0<=next<mxl and not visited[next]:\n Q.append((next, new_cnt))\n visited[next] = 1\n\nA, K = map(int, input().rstrip().split())\nmxl = int(1e6)+1 # max_length\nvisited = [0]*mxl\nQ = deque()\nQ.append((A, 0))\nvisited[A] = 1\nresult = 0\nwhile Q:\n now, cnt = Q.popleft()\n if now == K: result = cnt; break\n move(now+1, cnt+1)\n move(now*2, cnt+1)\nprint(result)" }, { "alpha_fraction": 0.3992537260055542, "alphanum_fraction": 0.41791045665740967, "avg_line_length": 16.933332443237305, "blob_id": "04235cfffc577a348d6e139b2c5008be68e466ee", "content_id": "64c696378d5ea916b5f63b7120f057979f9e7301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 25, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/08/0819/제로.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nstack = []\ntop = -1\nfor k in range(K):\n num = int(input())\n if num == 0:\n if top == -1:\n continue\n else:\n stack.pop()\n top -= 1\n else:\n stack.append(num)\n top += 1\nprint(sum(stack))" }, { "alpha_fraction": 0.4109589159488678, "alphanum_fraction": 0.4931506812572479, "avg_line_length": 36, "blob_id": "4f25ba821a074fd89a640514f11ce01121b63480", "content_id": "fb89847feee8c83fcf67b87caae0f2c3539dd923", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/09/0904/移動 (Moving).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "H = [int(input()) for _ in range(3)]\nprint(int(H[0] + H[1] < H[2] + 0.5))" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 35, "blob_id": "313ed2e7b33735e25b1d17d812d33d06be4bedc5", "content_id": "593b64b86972e231183efb09b400a508dd001149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 35, "num_lines": 1, "path": "/알고리즘/온라인저지/2023/02/0226/Atrium.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(f'{int(input())**0.5*4:.8f}')" }, { "alpha_fraction": 0.4540654718875885, "alphanum_fraction": 0.47729673981666565, "avg_line_length": 26.882352828979492, "blob_id": "8eaa5f70aadd78318ed44b147304f96b8418c845", "content_id": "a7b5330e01fc9167e8980ca0e095d89e915b7ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/알고리즘/온라인저지/2022/07/0709/단지번호붙이기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\n\ndef bfs(y, x): # 단지 내 가구 수 탐색 BFS\n Q.append((y, x))\n cnt = 0\n while Q:\n y, x = Q.popleft()\n if arr[y][x] == 0: # Q에 넣어놓은 사이에 탐색을 마친 경우라면\n continue # 컨티뉴\n arr[y][x] = 0 # 해당 위치 탐색 완료\n cnt += 1 # 단지 내 가구 수 ++\n for i in range(4):\n ny = y+dy[i]\n nx = x+dx[i]\n if 0<=ny<N and 0<=nx<N and arr[ny][nx]: \n # 지도의 범위를 벗어나지 않으면서 이웃한 단지이면\n Q.append((ny, nx))\n return cnt # 단지 내 가구 수 반환\n\n\ndy = [-1, 1, 0, 0] # 상하좌우 4방향 델타이동\ndx = [0, 0, -1, 1]\nN = int(input())\narr = [list(map(int, list(input()))) for _ in range(N)]\nresult = [0] # 단지 수 0으로 초기화\nQ = deque()\nfor i in range(N):\n for j in range(N):\n if arr[i][j]: # 단지를 발견하면\n result.append(bfs(i, j)) # 탐색한 단지내 가구수 추가\n result[0] += 1 # 단지 수 ++\nprint(result[0]) # 단지 수 출력\nfor r in sorted(result[1:]): print(r) # 단지내 가구수를 오름차순으로 출력" }, { "alpha_fraction": 0.3693181872367859, "alphanum_fraction": 0.38825756311416626, "avg_line_length": 36.78571319580078, "blob_id": "8c9678063190b9f1e63ec12bdc97abfeb2263c11", "content_id": "1142953ef6ff9b7a540eae3582247ce7e8b25916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/12/1216/소인수분해.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = int(input()) # 소수를 찾을 범위\na = [False,False] + [True]*(n-1) # 0 ~ n 까지의 숫자 리스트\nprimes=[] # 소수 집합\nfor i in range(2,n+1): # 2 ~ n까지 반복\n if a[i]: # 2부터 시작, 해당 숫자가 지워지지 않고 남아있는 소수라면 \n primes.append(i) # 소수 리스트에 추가\n for j in range(2*i, n+1, i): # 해당 소수의 배수들을 \n a[j] = False # 리스트에서 전부 False로 만들기\n\nwhile n != 1:\n for p in primes:\n while n % p == 0:\n n //= p\n print(p)" }, { "alpha_fraction": 0.45864662528038025, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 18.14285659790039, "blob_id": "10db02c0ab7a4822915bf76ca9f6399ab3b3ddfb", "content_id": "f18b068108ff0f4ebdbd33d92b9b10e48e5d27bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/03/0312/좋은 암호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K, L = map(int, input().split())\nfor i in range(2, L):\n if K%i == 0:\n print('BAD', i)\n break\nelse:\n print('GOOD')" }, { "alpha_fraction": 0.420634925365448, "alphanum_fraction": 0.43386244773864746, "avg_line_length": 15.47826099395752, "blob_id": "4d0b96b49d19e5b5eb9a79f8971434bfac227d8a", "content_id": "9e0c88b23a862a35ac6b310d1c68842b7e107d83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 50, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/10/1029/운동.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, m, M, T, R = map(int, input().rstrip().split())\nn = 0\nresult = 0\nnow = m\nwhile n < N:\n result += 1\n # print(result, now, T, R)\n if m+T > M:\n result = -1\n break\n if now+T <= M:\n n += 1\n now += T\n else:\n if now-R <= m:\n now = m\n else:\n now -= R\nprint(result)" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.637499988079071, "avg_line_length": 19.25, "blob_id": "6ab283b7dcc96735afa323d87ab2139368e0be4e", "content_id": "2d5077a69bf35f042ca70744cf5847acb44fd2cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/04/0415/자동완성.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 'Naver Whale'\nif input() in 'Nn':\n result = 'Naver D2'\nprint(result)" }, { "alpha_fraction": 0.5079872012138367, "alphanum_fraction": 0.5143769979476929, "avg_line_length": 17.47058868408203, "blob_id": "fee8332a08be4e0c364f804ca7b9dee182e1c746", "content_id": "4823b245763595b360c87c906a4adb37884eda5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/07/0722/라디오.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nN = int(input())\nF = [] # favorite\nR = A # radio\nfor n in range(N):\n F.append(int(input()))\nminn = abs(B-R)\nfor f in F:\n tmp = abs(B-f)\n if tmp < minn: \n minn = tmp\n R = f\nresult = 0\nif R != A: # 즐겨찾기를 한 번 누름\n result += 1\nresult += abs(B-R)\nprint(result)" }, { "alpha_fraction": 0.443708598613739, "alphanum_fraction": 0.4768211841583252, "avg_line_length": 20.64285659790039, "blob_id": "f786c31298689b338d27f9a92c05a6b04f981980", "content_id": "2456cab5309ea7429c0d841ea752d155bcc7204e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/알고리즘/온라인저지/2023/02/0216/수찬은 마린보이야!!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ntry:\n records = list(map(int, input().split()))\n dp = [0]*101\n A = sum(records)/N\n for record in records:\n dp[record] += 1\n B = 0\n for i in range(101):\n if dp[i]:\n B += i * (dp[i]/N)\n print(f'{A/B:.2f}')\nexcept:\n print('divide by zero')" }, { "alpha_fraction": 0.41951218247413635, "alphanum_fraction": 0.4390243887901306, "avg_line_length": 24.75, "blob_id": "c0077997e9895fb7f22c3ef1413c0107aaa2753e", "content_id": "b0e9dbda375290f7ce75dbf967bf1d3fafcefbf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/06/0612/0의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(int(input())):\n N, M = map(int, input().split())\n result = 0\n for a in range(N, M+1):\n for b in str(a):\n if b == '0':\n result += 1\n print(result)" }, { "alpha_fraction": 0.42491209506988525, "alphanum_fraction": 0.43897539377212524, "avg_line_length": 30.619047164916992, "blob_id": "a1c33e2efa84eb561f7805fc7c22f7176a2226f5", "content_id": "0d216d0f2d182c4901c28750870eab30848710d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2481, "license_type": "no_license", "max_line_length": 60, "num_lines": 63, "path": "/알고리즘/온라인저지/2022/03/0315/후보 추천하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nK = int(input())\npicks = list(map(int, input().split())) # 추천 후보\n\nresult = []\n\n# [후보번호, 추천수, 등록시간]\nfor i in range(K):\n # print(result)\n if len(result) < N: # 사진틀이 남았을 때\n # 이미 있는 후보를 추천하는지 확인\n in_result = False\n idx = 0\n for k in range(len(result)):\n if picks[i] == result[k][0]:\n in_result = True\n idx = k\n\n if not in_result: # 이미 있는 후보가 아닐 경우\n result.append([picks[i], 1, i]) # 후보 추가\n else: # 있는 후보일 경우\n result[idx][1] += 1 # 추천 +1\n \n else: # 사진틀이 남지 않았을 때\n # 이미 있는 후보를 추천하는지 확인\n in_result = False\n idx = 0\n for k in range(N):\n if picks[i] == result[k][0]:\n in_result = True\n idx = k\n\n if not in_result: # 이미 있는 후보가 아닐 경우\n # 후보 사진을 갈아끼워야 함\n recommended = 10**3 # 가상의 추천수 최대값\n recommend_idx = 0 # 가장 추천을 많이 받은 후보의 인덱스\n oldest_time = 10**3 # 가상의 등록순서 최대값\n oldest_idx = 0 # 가장 오래전에 등록된 후보의 인덱스\n for j in range(N): # 후보 사진들 중\n # 추천이 가장 적은 후보 찾기\n if result[j][1] < recommended:\n recommended = result[j][1]\n recommend_idx = j\n # 동시에 그 후보의 등록시간도 기록\n oldest_time = result[j][2]\n oldest_idx = j\n\n # 추천수가 가장 적은 후보가 여러명일 때\n elif result[j][1] == recommended:\n # 등록시간이 오래된 후보를 찾아서\n if oldest_time > result[j][2]:\n oldest_time = result[j][2]\n oldest_idx = j\n recommend_idx = oldest_idx # 인덱스 잡고\n # print(recommend_idx)\n result[recommend_idx] = [picks[i], 1, i] # 덮어씌우기\n else: # 있는 후보일 경우\n result[idx][1] += 1 # 추천 +1\n # print(result)\n\nresult.sort(key=lambda x:x[0]) # 후보번호순 정렬\nfor r in result:\n print(r[0], end=' ') # 출력" }, { "alpha_fraction": 0.504273533821106, "alphanum_fraction": 0.504273533821106, "avg_line_length": 13.75, "blob_id": "bc77b127a60d63d2c8fa532fe06e2044be22119b", "content_id": "ed4d430c0d4f8210b3f129b87f46b5a7bdabd75e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0202/파티가 끝나고 난 뒤.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L, P = map(int, input().split())\n\na = list(map(int, input().split()))\n\ns = L * P\n\nfor i in a:\n print(i-s, end=' ')" }, { "alpha_fraction": 0.4020356237888336, "alphanum_fraction": 0.4452926218509674, "avg_line_length": 16.909090042114258, "blob_id": "d549458b9e9d63a94f3b195afa84cadc1e4196f4", "content_id": "df21e5d886025b7a16483e2444febe214707943c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0829/자리배정.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nC, R = map(int, sys.stdin.readline().split())\nK = int(sys.stdin.readline())\n\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\n\nhall = [[0]*C for _ in range(R)]\ni = 1\nx = y = k = 0\n\nwhile i < K:\n hall[y][x] = i\n i += 1\n if x+dx[k] >= C or y+dy[k] >= R or hall[y+dy[k]][x+dx[k]]:\n k = (k+1)%4\n x = (x+dx[k])\n y = (y+dy[k])\nif C*R < K:\n print(0)\nelse:\n print(x+1, y+1)" }, { "alpha_fraction": 0.5452961921691895, "alphanum_fraction": 0.5522648096084595, "avg_line_length": 29.263158798217773, "blob_id": "928021474cba99457c8b2b04dbd173afa07b11c6", "content_id": "7af088e857095e87d905978c083f85dad85a7720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 60, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/11/1127/N과 M (6).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef dfs(start):\n if len(result) == M: # 수열이 완성됐다면\n print(*result) # 완성된 수열 출력\n # 이번 함수가 종료되고 나오면서 line14의 result.pop()에서 마지막 숫자 제거\n else: # 아직 수열이 완성되지 않았다면\n for i in range(start, N): # 수열에 더 들어올 남은 숫자 개수만큼 for\n if nums[i] not in result:\n result.append(nums[i]) # nums[i]를 집어넣고\n dfs(i+1) # 더 깊이 탐색\n result.pop() # 방금 넣었던 nums[i]를 빼주기\n\nN, M = map(int, input().rstrip().split())\nnums = sorted(list(map(int, input().rstrip().split())))\nresult = []\ndfs(0)" }, { "alpha_fraction": 0.5398229956626892, "alphanum_fraction": 0.5575221180915833, "avg_line_length": 27.33333396911621, "blob_id": "4e86b5df9ba8ea8532729f1fd18c3c26e12a8924", "content_id": "32fe6f431a0b351431b794a7a2608bd391a40d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0910/카드게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A_card = list(map(int, input().split()))\nB_card = list(map(int, input().split()))\nA_win = B_win = draw = 0\nfor i in range(10):\n if A_card[i] > B_card[i]: A_win += 1\n elif A_card[i] < B_card[i]: B_win += 1\n else: draw += 1\nresult = ''\nif A_win > B_win: result = 'A'\nelif A_win < B_win: result = 'B'\nelse: result = 'D'\nprint(result)" }, { "alpha_fraction": 0.37383177876472473, "alphanum_fraction": 0.44859811663627625, "avg_line_length": 20.600000381469727, "blob_id": "dc3a2207bd6485006fb527f212e2f695d46ee3d5", "content_id": "86462cc82ad1dcfbdd0fa21605b722002876decd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/01/0130/2018 연세대학교 프로그래밍 경진대회.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor k in range(1, 10101):\n if eval('k**2 + k + 1') == N:\n print(k)\n break" }, { "alpha_fraction": 0.47663551568984985, "alphanum_fraction": 0.514018714427948, "avg_line_length": 20.600000381469727, "blob_id": "8a4db5b859392937afc961ffa35a0e1d8a018746", "content_id": "ae664d974a4a4ea3c56059c66c0bc4c8296ea733", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/05/0519/양수 개수 세기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in list(input().split()):\n if i[0] not in ('-', '0'):\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5400000214576721, "avg_line_length": 27.714284896850586, "blob_id": "b703a8efae28754d8127f136866de4f9b23a9c52", "content_id": "22d045fcfe189f3c1596b8d78964656cbc652c91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/02/0212/바구니 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nresult = list(range(1, N+1))\nfor m in range(M):\n i, j = map(int, input().split())\n i -= 1\n result = result[:i] + result[i:j][::-1] + result[j:]\nprint(*result)" }, { "alpha_fraction": 0.5879828333854675, "alphanum_fraction": 0.5879828333854675, "avg_line_length": 22.399999618530273, "blob_id": "9934fd38e46d4c4fa6f3f55fefe0f20e43da3bf0", "content_id": "706fd732dc605207a0d99700cdd9217075dd96f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/11/1106/대회 자리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n P, M = map(int, input().rstrip().split())\n result = set()\n for p in range(P):\n result.add(int(input().rstrip()))\n print(P-len(result))" }, { "alpha_fraction": 0.4575645625591278, "alphanum_fraction": 0.4944649338722229, "avg_line_length": 14.11111068725586, "blob_id": "3b9fe55baa92335e61f7a37fb05ca6950d9a59ec", "content_id": "e4ba9d5c888fb9e574cb45970da91608383f4de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 25, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/02/0209/특별한 날.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "M = int(input())\nD = int(input())\n\nresult = ''\n\nif M == 2 and D == 18:\n result = 'Special'\nelif M < 2:\n result = 'Before'\nelif M > 2:\n result = 'After'\nelif M == 2:\n if D < 18:\n result = 'Before'\n elif D > 18:\n result = 'After'\n\nprint(result)" }, { "alpha_fraction": 0.5528455376625061, "alphanum_fraction": 0.5691056847572327, "avg_line_length": 23.799999237060547, "blob_id": "b64a9e3afc161dd5947328d2ccd9b7b7e496658f", "content_id": "2787e3ca5e3b14d2e7ece980ae690358a87bbfd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/08/0822/Julka.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input()) # apple\nG = int(input()) # gap\nKlaudia = (A-G)//2 + G\nNatalia = (A-G)//2\nprint(Klaudia, Natalia, sep='\\n')" }, { "alpha_fraction": 0.40716612339019775, "alphanum_fraction": 0.4527687430381775, "avg_line_length": 17.117647171020508, "blob_id": "2eca37b61d7bafe52a8016d0fbf9e42d275383a8", "content_id": "800e905d6d85f9b3fff1857dc0a2769ddae40095", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 46, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/12/1204/카드 구매하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncards = list(map(int, input().split()))\ndp = [0]*(N+1)\nfor i in range(1, N+1):\n for j in range(i):\n dp[i] = max(dp[i], dp[j]+cards[i-j-1])\nprint(dp[-1])\n\n\"\"\"\ndp[i] = max(\n dp[1] + cards[i-1],\n dp[2] + cards[i-2],\n ...\n dp[i-2] + cards[2],\n dp[i-1] + cards[1],\n)\n\"\"\"" }, { "alpha_fraction": 0.34794774651527405, "alphanum_fraction": 0.4048507511615753, "avg_line_length": 21.82978630065918, "blob_id": "633db1042372d0c4efcabbc0507c33465347af29", "content_id": "84d26e16237db07662ec7f66a36bfced49b387b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 58, "num_lines": 47, "path": "/알고리즘/프로그래머스/Level1/신규 아이디 추천.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def solution(new_id):\n answer = ''\n # print(new_id)\n new_id = new_id.lower()\n tmp = ''\n \n for n in new_id: # step 2\n if n in 'abcdefghijklmnopqrstuvwxyz0123456789-_.':\n tmp = tmp + n\n switch = False\n tmp2 = ''\n \n for t in tmp: # step 3\n if t == '.':\n if not switch:\n switch = True\n tmp2 = tmp2 + t\n else:\n tmp2 = tmp2 + t\n if switch:\n switch = False\n # print('step3', tmp2)\n \n if len(tmp2) >= 2:\n if tmp2[0] == '.': # step 4\n tmp2 = tmp2[1:]\n if tmp2[-1] == '.':\n tmp2 = tmp2[:len(tmp2)-1]\n else:\n tmp2 = ''\n # print('step4', tmp2)\n \n if tmp2 == '': # step 5\n tmp2 = 'a'\n \n if len(tmp2) >= 16: # step 6\n tmp2 = tmp2[:15]\n if tmp2[-1] == '.':\n tmp2 = tmp2[:len(tmp2)-1]\n \n while len(tmp2) <= 2: # step 7\n tmp2 = tmp2 + tmp2[-1]\n \n answer = tmp2\n return answer\n\nprint(solution(input()))" }, { "alpha_fraction": 0.5703883767127991, "alphanum_fraction": 0.5800970792770386, "avg_line_length": 23.235294342041016, "blob_id": "f79e58b556b0778ac092216898033296daf91886", "content_id": "620accbf8bdc7777fca0f32d1030651d97b3a867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/03/0329/맞았는데 왜 틀리죠.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S1, S2 = map(int, input().split())\nresult = 'Accepted'\nis_wrong_answer = False\nis_why_wrong = False\nfor s in range(S1):\n A, B = map(int, input().split())\n if A != B:\n is_wrong_answer = True\nfor s in range(S2):\n A, B = map(int, input().split())\n if A != B:\n is_why_wrong = True\nif is_wrong_answer:\n result = 'Wrong Answer'\nelif is_why_wrong:\n result = 'Why Wrong!!!'\nprint(result)\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5441176295280457, "avg_line_length": 33.5, "blob_id": "254eb6e75ec447388270de7ba79dc8a522fdb0a1", "content_id": "a1ea7e94ac60a7be519ef0ed1e5e0f0ee06a771f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/04/0402/Piece of Cake.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, H, V = map(int, input().split())\nprint(max(N-H, H)*max(N-V, V)*4)" }, { "alpha_fraction": 0.46859902143478394, "alphanum_fraction": 0.49275362491607666, "avg_line_length": 22.11111068725586, "blob_id": "4ef3b33cfe25d3270e15b91a347fcc812901b50e", "content_id": "feff1f3d4a405339384f173ed943fe42ffdffc8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/05/0512/앵그리 창영.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, W, H = map(int, input().split())\nmax_match = W**2 + H**2\nresult = ['DA', 'NE']\nfor n in range(N):\n M = int(input())\n if M**2 <= max_match:\n print(result[0])\n else:\n print(result[1])" }, { "alpha_fraction": 0.5070063471794128, "alphanum_fraction": 0.522292971611023, "avg_line_length": 36.39682388305664, "blob_id": "82af38fbd1673953956c2203c119d5583a6aadd0", "content_id": "8622017e0465f39f96ac027c8e280cd75f66ef74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2997, "license_type": "no_license", "max_line_length": 85, "num_lines": 63, "path": "/알고리즘/온라인저지/2022/11/1119/로봇 시뮬레이션.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndirection_to_index = { # NESW를 0123으로 바꿔서 델타이동을 아래와 같이 할 것\n 'N': 0,\n 'E': 1,\n 'S': 2,\n 'W': 3,\n}\ndy, dx = [-1, 0, 1, 0], [0, 1, 0, -1]\n\nA, B = map(int, input().rstrip().split())\narr = [[-1]*A for _ in range(B)]\nN, M = map(int, input().rstrip().split())\nrobot_location = [(0, 0) for _ in range(N+1)] # n번 로봇의 좌표를 (y,x)꼴로 담을 배열 공간 N+1개만큼 생성\nfor n in range(1, N+1):\n x, y, direction = input().rstrip().split()\n # 예제처럼 왼쪽아래를 1,1로 두기 위해 아래와 같이 y좌표를 바꿔서 집어넣는다\n x, y, direction = int(x)-1, B-(int(y)), direction_to_index[direction]\n robot = n # 로봇 번호\n arr[y][x] = direction # 원본 배열에는 로봇이 바라보는 방향을 집어넣는다\n robot_location[robot] = (y, x)\n# 디버깅\n# for a in arr: print(a) \n# print()\norders = [] # 입력은 일단 다 받기 위해 리스트 생성\nfor m in range(M):\n robot, command, loop = input().rstrip().split()\n robot, loop = int(robot), int(loop)\n orders.append((robot, command, loop))\nresult = 'OK' # 명령들을 이상없이 수행하면 출력할 OK\nfor order in orders:\n if result != 'OK': break # 뭔가 심상치 않은 일이 생긴거야~\n robot, command, loop = order # ex) 1 F 3\n y, x = robot_location[robot] # 명령으로 들어온 로봇이 현재 배열에 위치한 좌표 y, x\n direction = arr[y][x] # 로봇이 보고 있는 방향\n if command == 'R': # 오른쪽으로 돌리고~\n arr[y][x] += loop\n arr[y][x] %= 4\n elif command == 'L': # 왼쪽으로 돌리고~\n for i in range(loop):\n arr[y][x] = (arr[y][x]+3)%4 # 오른쪽으로 세 번 돌린 것과 같다\n else: # 앞으로 가자!\n for i in range(loop):\n if result != 'OK': break # 뭔가 심상치 않은 일이 생긴거야~\n ny, nx = y+dy[direction], x+dx[direction] # 로봇의 다음 이동할 좌표\n if not (0<=ny<B) or not (0<=nx<A): # 벽에 부딪혔다 == 범위를 벗어났다\n result = f'Robot {robot} crashes into the wall'\n elif arr[ny][nx] != -1: # 다른 로봇과 부딪혔다\n crashed_robot = 0 # 부딪힌 로봇의 번호 초기화\n for n in range(N+1): # 부딧힌 로봇의 번호를 찾자\n if robot_location[n] == (ny, nx): # 로봇들 중 ny,nx에 위치한 로봇의 인덱스\n crashed_robot = n\n result = f'Robot {robot} crashes into robot {crashed_robot}'\n else: # 이상없이 앞으로 갈 수 있다\n arr[y][x], arr[ny][nx] = arr[ny][nx], arr[y][x] # 한 칸 앞으로\n y, x = ny, nx # 로봇의 새 좌표로 갱신\n robot_location[robot] = (y, x) # 로봇 좌표모음 배열도 갱신\n # 디버깅\n # for a in arr: print(a)\n # print()\nprint(result)" }, { "alpha_fraction": 0.4356435537338257, "alphanum_fraction": 0.4752475321292877, "avg_line_length": 19.399999618530273, "blob_id": "15ff4d94e1891fe0b220191d9a09ad265789c018", "content_id": "343b101c94ec5dd399e1084be5e347259dbf1479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/08/0822/AFC 윔블던.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nif (N+M)%2 or N<M:\n print(-1)\nelse:\n print((N+M)//2, (N-M)//2)" }, { "alpha_fraction": 0.4752941131591797, "alphanum_fraction": 0.4964706003665924, "avg_line_length": 21.421052932739258, "blob_id": "a7c209f5e5c48c2d014adf5c1c83a19f38559c80", "content_id": "51d7f086064c595f90c3542c1a319a6b6c8a859f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1002/The Game of Death.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in [0]*int(input().rstrip()):\n result = 0\n N = int(input().rstrip())\n death = [int(input().rstrip())-1 for _ in range(N)]\n visited = [0]*N\n i = 0\n flag = True\n while i != N-1:\n if not visited[i]:\n visited[i] = 1\n i = death[i]\n result += 1\n else: flag = False; break\n if flag: print(result)\n else: print(0)" }, { "alpha_fraction": 0.5112782120704651, "alphanum_fraction": 0.5413534045219421, "avg_line_length": 25.600000381469727, "blob_id": "19c40a7828639ff75b327da66f9547ea3800d701", "content_id": "e5fdab905d504f98992f4b6656d19555e911996e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/09/0910/등장하지 않는 문자의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n result = sum(list(range(65, 91)))\n for i in input():\n result -= ord(i)\n print(result)\n" }, { "alpha_fraction": 0.43292683362960815, "alphanum_fraction": 0.4491869807243347, "avg_line_length": 19.54166603088379, "blob_id": "2c63a28fe7a01d76b65f641c60fd21c4d7f51f38", "content_id": "f22c3b4d9b547bd987fb6371e0dd408d042efc08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 39, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/09/0910/공약수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0:\n a, b = b, a%b\n return a\n\nN = int(input())\nGCD = 0\nif N == 2:\n A, B = map(int, input().split())\n GCD = gcd(A, B)\nelse:\n A, B, C = map(int, input().split())\n GCD = gcd(gcd(A, B), gcd(B, C))\nresult = []\nfor i in range(1, int(GCD**0.5)+1):\n if GCD%i == 0:\n A, B = GCD//i, i\n if A != B:\n result.append(A)\n result.append(B)\n else:\n result.append(A)\nfor i in sorted(result):\n print(i)" }, { "alpha_fraction": 0.5858823657035828, "alphanum_fraction": 0.5976470708847046, "avg_line_length": 19.285715103149414, "blob_id": "dc7f0693262fb040c1a57c1bf4ed3d3c04a7956f", "content_id": "a127e9330295a096304ebce14da7e7d9f0021771", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/08/0816/한조서열정리하고옴ㅋㅋ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nlst = list(map(int, input().split()))\nA, result, cnt = 0, 0, 0 # archor\nfor l in lst:\n # 봉우리 높이 갱신, 임시값 초기화, 결과값 갱신\n if l > A: A, cnt, result = l, 0, max(result, cnt)\n else: cnt += 1\nprint(max(result, cnt)) # 봉우리를 갱신하지 않고 for문이 끝난 경우가 있을 수 있음\n\n\"\"\"\n그리디 문제\n출발한 높이보다 더 높은 봉우리에 부딪히기 전까지\n몇개의 봉우리를 만나는지\n가장 많이 만난 봉우리의 수는 몇인지를 구하는 문제\n\n최소최대값 갱신을 여태\nif a > b: a = b\n를 사용했었는데\na = max(a, b)\n로 바꾸는 것도 좋을 것 같다\n\"\"\"" }, { "alpha_fraction": 0.4749034643173218, "alphanum_fraction": 0.5057914853096008, "avg_line_length": 18.961538314819336, "blob_id": "8592907d76f8a09548097e8a7e07eb5c86cc7c84", "content_id": "d7f797841e08c56ee866d5b22de0da9bb67ad66c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/03/0325/이게 분수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import copy\n\n\ndef rotate():\n global arr\n new = []\n for j in range(2):\n tmp = []\n for i in range(1, -1, -1):\n tmp.append(arr[i][j])\n new.append(tmp)\n arr = copy.deepcopy(new)\n\narr = [list(map(int, input().split())) for _ in range(2)]\n# print(arr)\nmaxx = 0\nresult = 0\n# rotate()\nfor i in range(4):\n A, B, C, D = arr[0][0], arr[0][1], arr[1][0], arr[1][1]\n fraction = A/C + B/D\n if fraction > maxx:\n maxx = fraction\n result = i\n rotate()\nprint(result)" }, { "alpha_fraction": 0.47598254680633545, "alphanum_fraction": 0.49344977736473083, "avg_line_length": 24.5, "blob_id": "67a6e7d6c2e588e28270de8ddc61ca99652a8708", "content_id": "6368c60cfea3a9e5c11ce12cf1e3d0d203d9c6ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 42, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/10/1012/골드바흐의 추측.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = int(1e4)\na = [False,False] + [True]*(n-1)\nprimes = []\nfor i in range(2, n+1):\n if a[i]:\n primes.append(i)\n for j in range(2*i, n+1, i):\n a[j] = False\nfor t in range(int(input())):\n N = int(input())\n result = []\n for prime in primes:\n if prime > N: break\n if N-prime in primes:\n tmp = sorted((N-prime, prime))\n if tmp not in result:\n result.append(tmp)\n print(*result[-1])" }, { "alpha_fraction": 0.5074999928474426, "alphanum_fraction": 0.5450000166893005, "avg_line_length": 21.27777862548828, "blob_id": "8c69bc58f58f32c8b4c9759198b0b727d2a8be7d", "content_id": "8b32f1fa68ac452a55b66255d9c7a94a23d609c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/11/1113/사나운 개.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef bark(start, stop):\n i = 1\n while i+start < 1000:\n for j in range(i, i+start):\n if j < 1000: \n dog[j] += 1\n i += start+stop\n\ndog = [0]*1000\nA, B, C, D = map(int, input().rstrip().split())\nvisitors = list(map(int, input().rstrip().split()))\nbark(A, B); bark(C, D)\nfor visitor in visitors:\n print(dog[visitor])" }, { "alpha_fraction": 0.44285714626312256, "alphanum_fraction": 0.5857142806053162, "avg_line_length": 34.5, "blob_id": "17aa27c1727ead5f890beeb81e8a693b2d756165", "content_id": "42a01015bfdfaf4d788dd64c8152c97dcc4577a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0822/가희와 방어율 무시.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nprint(0 if A*(100-B) >= 10000 else 1)" }, { "alpha_fraction": 0.4842519760131836, "alphanum_fraction": 0.5, "avg_line_length": 20.25, "blob_id": "ff76b56b06a4083826bb3be605866b7eb3bcac70", "content_id": "2b1cc1f023a82ca889cede6b906e1f73938194d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/6. 파이썬 SW문제해결 기본 Queue/5차시 6일차 - 회전.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n arr = list(map(int, input().split()))\n tmp = M%N\n for i in range(tmp):\n arr.append(arr.pop(0))\n \n result = arr[0]\n\n print('#{} {}'.format(t, result))" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5249999761581421, "avg_line_length": 19.5, "blob_id": "d4d09322343760f3a5c8aeda4522941377be0c19", "content_id": "da679b9fdffb389ffe147ca355fb3d95f2aaf169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/알고리즘/온라인저지/2021/12/1213/팰린드롬인지 확인하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = input()\nprint(int(bool(a==a[::-1])))" }, { "alpha_fraction": 0.49799197912216187, "alphanum_fraction": 0.5582329034805298, "avg_line_length": 15.666666984558105, "blob_id": "7203ec5f0c26c0c6d5310a0413777b202e5fb984", "content_id": "2b71e0077b0034f210a3ccced374c5b54a648c2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/알고리즘/[템플릿]/Dynamic Programming/점화식.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = [0]*36\ndp[0] = 1\nfor i in range(35):\n tmp = 0 \n for j in range(i+1):\n tmp += dp[j]*dp[i-j]\n dp[i+1] = tmp\nprint(dp[N])\n\n# https://www.acmicpc.net/problem/13699" }, { "alpha_fraction": 0.5250896215438843, "alphanum_fraction": 0.5286738276481628, "avg_line_length": 31.882352828979492, "blob_id": "b66769161b9f94148c43d7c128385a69cec94997", "content_id": "218fb84ce2e49a00a95dfa948f39e071472f6240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/09/0903/재귀함수가 뭔가요.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(level):\n tmp = '____'*level\n if level != N:\n print(tmp + '\"재귀함수가 뭔가요?\"')\n print(tmp + '\"잘 들어보게. 옛날옛날 한 산 꼭대기에 이세상 모든 지식을 통달한 선인이 있었어.')\n print(tmp + '마을 사람들은 모두 그 선인에게 수많은 질문을 했고, 모두 지혜롭게 대답해 주었지.')\n print(tmp + '그의 답은 대부분 옳았다고 하네. 그런데 어느 날, 그 선인에게 한 선비가 찾아와서 물었어.\"')\n dfs(level+1)\n print(tmp + '라고 답변하였지.')\n else:\n print(tmp + '\"재귀함수가 뭔가요?\"')\n print(tmp + '\"재귀함수는 자기 자신을 호출하는 함수라네\"')\n print(tmp + '라고 답변하였지.')\n\nN = int(input())\nprint('어느 한 컴퓨터공학과 학생이 유명한 교수님을 찾아가 물었다.')\ndfs(0)" }, { "alpha_fraction": 0.4591836631298065, "alphanum_fraction": 0.4897959232330322, "avg_line_length": 18.799999237060547, "blob_id": "cbd5868577a721804567cf9d43bef35da98dcf67", "content_id": "bf88d1835cf4637319cbc2a8d5a20bc40488907f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/02/0210/The Fastest Sorting Algorithm In The World.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "i = 1\nwhile True:\n if input() == '0': break\n print(f'Case {i}: Sorting... done!')\n i += 1" }, { "alpha_fraction": 0.47719594836235046, "alphanum_fraction": 0.49070945382118225, "avg_line_length": 29.384614944458008, "blob_id": "76cd710fe9eb359047051b92247709ffe44f8471", "content_id": "87a312d802455a3a8008ed210fce9d89cca38f16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1446, "license_type": "no_license", "max_line_length": 60, "num_lines": 39, "path": "/알고리즘/온라인저지/2022/07/0710/적록색약.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque # BFS\nfrom copy import deepcopy # 원본 배열 복사하여 색약 배열로 변경하기 위함\n\n\ndef bfs(arr, y, x, color):\n Q = deque()\n Q.append((y, x))\n while Q:\n y, x = Q.popleft()\n if not arr[y][x]:\n continue\n arr[y][x] = 0\n for i in range(4):\n ny = y+dy[i]\n nx = x+dx[i]\n if 0<=ny<N and 0<=nx<N and arr[ny][nx] == color:\n # 탐색범위를 벗어나지 않으면서, 현재 탐색중인 색깔과 같으면\n Q.append((ny, nx))\n\n\ndy = [-1, 1, 0, 0] # 4방향 델타이동\ndx = [0, 0, -1, 1]\nN = int(input())\nnormal_arr = [list(input()) for _ in range(N)] # 원본배열\nCB_arr = deepcopy(normal_arr) # 원본 배열 복사하여 색약배열 초기화\nfor i in range(N): # 적록색약 배열\n for j in range(N):\n if CB_arr[i][j] in 'RG': # 빨강과 초록을 구분할 수 없으므로\n CB_arr[i][j] = 'A' # 임의의 문자 A로 통일\nnormal = CB = 0 # 일반인, 색약인(color blind) 결과값 초기화\nfor i in range(N):\n for j in range(N):\n if normal_arr[i][j]: # normal\n bfs(normal_arr, i, j, normal_arr[i][j])\n normal += 1 # 일반인이 볼 수 있는 구역 ++\n if CB_arr[i][j]: # color blind\n bfs(CB_arr, i, j, CB_arr[i][j])\n CB += 1 # 색약인이 볼 수 있는 구역 ++\nprint(normal, CB) # 출력" }, { "alpha_fraction": 0.5493230223655701, "alphanum_fraction": 0.5802707672119141, "avg_line_length": 27.72222137451172, "blob_id": "788c2f3361b962163e159ab6d2d1d800be952725", "content_id": "4048cd0a3e10aba860606d19dd3a232b39dfef17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/알고리즘/온라인저지/2023/03/0326/평범한 배낭.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\nitems = [tuple(map(int, input().split())) for _ in range(N)]\ndp = [0]*(K+1)\nfor item in items:\n weight, value = item\n for i in range(K, weight-1, -1): # top-down\n dp[i] = max(dp[i], dp[i-weight]+value)\nprint(dp[-1])\n\n\"\"\"\ntop-down 방식의 문제이다\n현재 무게(dp[i])에서 얻을 수 있는 가장 큰 가치를\n아래 두 값 중 최대값으로 갱신한다\n1. 이미 구한 현재 무게의 가치 최대치(dp[i])\n2. 현재 물건의 무게만큼 더 담을 수 있던 시점의 가치 최대치에 현재 물건의 가치를 더한 값\nex) dp[7] = max(dp[7], dp[3]+items[4][value])\nex2) dp[6] = max(dp[6], dp[0]+items[6][value])\n\"\"\"\n" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 18.5, "blob_id": "9e1cf1567f1350a0add7a20eb09d7065d65d73b8", "content_id": "9f996e893be83d9d391e27560c8d8690ebcf97ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/01/0105/선린인터넷고등학교 교가.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprint(input()[N-5:N])" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.5403508543968201, "avg_line_length": 19.428571701049805, "blob_id": "16d12e624fbd5e2fc50cb63e4dd5f0d2eb5b5f08", "content_id": "577b99230c0481c713fd41ade8d53bf54d193ad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 69, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/03/0315/나이 계산하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "birth = list(map(int, input().split()))\nnow = list(map(int, input().split()))\n\nolds = [0, 0, 0]\n\nolds[2] = now[0] - birth[0]\nolds[1] = olds[2] + 1\nolds[0] = olds[2]\n\nif (now[1] < birth[1]) or (now[1] == birth[1] and now[2] < birth[2]):\n olds[0] -= 1\n\nfor old in olds:\n print(old)" }, { "alpha_fraction": 0.597597599029541, "alphanum_fraction": 0.6966966986656189, "avg_line_length": 21.266666412353516, "blob_id": "73d6e45e3c5530b2d80c0bbdb95cfd4d1ae7b02b", "content_id": "d9841906b6935555519fe1043e0f11a7d6cb210a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/알고리즘/[템플릿]/문자열 탐색/Brute Force.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import timeit\nstart = timeit.default_timer()\n\ntext = 'aawneifonoeiwmcl,xjfklsajdfklajewklfhaefnk1r7e8yiueniuencieh02cf12fh0v3ymn,cgjklo10f321hcqvyjkn9og4ml0f21hc3v9yjknqom0h1vcf2y93njkmof210hc3v9jkoymn0f21hcvko9jnym021hfok9cqv3jln,210hf9ocv3jklmny,qgf21h0kmn,jlocqv39cf30hkv9mn,ojlgfvc0hkx12mn,jol9210cf3hk9qvomn,jl10fh9cv3okjmn,g10cf3hv9knom0f9h3koncvm,02f1h9kcv3omn,j2f103vhc9knmjog0f2h19kncv3jmoyg0f21h9cv3njkgmoy0fh921ojkymncgv3,rlf21h0c3v9nyjkmgoh0n9f2yjkmcv13g4h0f21c3vn9kmjyg0hf219ncvjkm3og0f21hc3v9mnjky,o0f21hcvn9km3jy0h1vcf2nk9mj3021hnkf9m,cv3jhf210ncv3mjk9h0f219nycv3jkg0h9f21nycv3jk0h9f21nycv3kjg0fh219mnjkcv3,yogf20h1v3n9cg4kmf0h2913vcgf3210cvh9g4n20f31h9cvnkgf02h9c13vngtkr40fh921nkcvm30hn9kfmcv3h0nf1mk,j9cv3gf,mn12v0chkljox30cf21hkv3mn,9of0cmn,hkvjlo390hk9omnj,2f13vc0h9nk1mojfc3v0h9n12cfkmv3021h9cfnvyh0nf29kmcv13jh29fn1kmcv30f2h91ncvkm30h92nkf1mcv3o0h9f21nkmocv,30h9nk2fmcv130h9fknc12v30h9nk12fmcv320hc9f1nvk30f2h9c1vnkm30h9fnkc1vh9nkmjof12cv30h9fnkmcv12o,30h9f21nkcvmx30hf9n12ckv30h9nfkmc12v30hk9n,mfjloc1v0fh912vcnk3mh9f12vcn30h9nkcf12v30hf2n91mcv0hnf9c12v3h0n9mkj2cfv130h9nfc12v30h9fnkcv12m3gf219hcvn3kg0f21h9cvn3f219hcvk3nf021h9cnv39hf21cv30h9nf2c1vg3h09n1f2ycvg30h9fnc12vy8g3df09h2c1vg30h98y7nfcv213gh0987yfv2c13h0987yf2cv13ghy0987ncfv123h09872fyv134chyf7098cv2g09h87fv2cg13hy67098cv12gh09f21ycv3g4h09y87f12v3cg0h9821vc309h8f12cv309hfcv09hn12vcx309hn1f2cv30h9nf12cv30hn1cf2'\npattern = 'cx309hn1f2cv30h9'\nresult = 0\n\nfor i in range(len(text)-len(pattern)):\n if text[i:i+len(pattern)] == pattern:\n result = 1\n break\nprint(result)\n\nend = timeit.default_timer()\nprint(f'{end-start:.5f}')" }, { "alpha_fraction": 0.5829383730888367, "alphanum_fraction": 0.6255924105644226, "avg_line_length": 22.44444465637207, "blob_id": "0e6b036ec70f7710d9cfe71f67aa2269538db18a", "content_id": "ddcb0bb13cd837b44f82b0c142d965f3c19c2a94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/08/0822/단어 공부.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "word = input()\nword = word.upper()\nalphabet = [0] * 26\nfor wrd in word:\n alphabet[ord(wrd)-65] += 1\nif alphabet.count(max(alphabet)) > 1:\n print('?')\nelse:\n print(chr(alphabet.index(max(alphabet))+65))\n" }, { "alpha_fraction": 0.49484536051750183, "alphanum_fraction": 0.5103092789649963, "avg_line_length": 20.66666603088379, "blob_id": "83731e0c44d84a7ea51479ef3694e184b9c2e4c4", "content_id": "70f245015a9844025ba5de6dc1989533c6362a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/07/0722/콘서트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nA = list(map(int, input().split())) # first sold\nA.sort()\nC = list(range(1, len(A)+1))\nfor i in range(len(A)):\n if A[i] != C[i]:\n print(C[i])\n exit()\nprint(N+1)" }, { "alpha_fraction": 0.36477985978126526, "alphanum_fraction": 0.4433962404727936, "avg_line_length": 28, "blob_id": "290ee7d9c6415599be247470c4d66c7bf1072241", "content_id": "365dd4d49ad36d75e82693d02b00d99834bc0738", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 45, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/07/0731/투자의 귀재 배주형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "H, Y = map(int, input().split())\ndp = [0]*(Y+1)\ndp[0] = H\nfor i in range(1, Y+1):\n if i-1 >= 0 and dp[i-1]:\n dp[i] = max(int(dp[i-1]*1.05), dp[i])\n if i-3 >= 0 and dp[i-3]:\n dp[i] = max(int(dp[i-3]*1.2), dp[i])\n if i-5 >= 0 and dp[i-5]:\n dp[i] = max(int(dp[i-5]*1.35), dp[i])\nprint(dp[Y])" }, { "alpha_fraction": 0.5499181747436523, "alphanum_fraction": 0.5908346772193909, "avg_line_length": 24.5, "blob_id": "2cac20762e0868fa5e3a1325ce224952afafdbc6", "content_id": "e3b1c02714410d9e8969d7b4e3f0449b223c77e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 999, "license_type": "no_license", "max_line_length": 52, "num_lines": 24, "path": "/알고리즘/온라인저지/2021/08/0807/소수 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\" 초기 풀이\nN개의 숫자 중 소수는 몇개인가\n소수인지 아는 법?\n약수가 1과 자기 자신\n주어질 수들은 1000이하 라는 조건\n1000까지, 주어진 수들을, 이중순회 하면서\n주어진 수를 1부터 주어진 수까지 나눌 때\n주어진 수까지 나머지가 안 생기면 소수\n중간에 나머지가 생기면(True) not소수\n\"\"\"\n\n# 최종 코드\nN = int(input()) # 숫자 개수 입력받고\nnumbers = list(map(int, input().split())) # 숫자들 입력받고\ncount = 0 # 소수 개수를 셀 count 초기화\nfor number in numbers: # 입력받은 숫자들을 순회하면서\n for i in range(2, 1001): # 2부터 1000까지 나누면서 검사\n if number % i == 0: # 나머지가 없고\n if number == i: # number본인이면\n count += 1 # 소수이므로 count에 1 저장\n else: # 나머지는 0이지만 number본인이 아니라면\n break # 약수임\n\nprint(count) # 소수 개수 출력" }, { "alpha_fraction": 0.31092438101768494, "alphanum_fraction": 0.3613445460796356, "avg_line_length": 14, "blob_id": "b2910816929c4b50f6fa19e77b8a7dd7e3f42fe4", "content_id": "b30eff541e516d477a4b4828bd67f159edfb3e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/01/0131/별 찍기 - 6.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ni = (N*2)-1\n\nfor n in range(N):\n a = ((N*2-1)-i)//2\n print(' ' * a, '*' * i, sep='')\n i -= 2" }, { "alpha_fraction": 0.515539288520813, "alphanum_fraction": 0.5265082120895386, "avg_line_length": 25.095237731933594, "blob_id": "40fb92975b17618fbc3938de80cfabf9d6427fbc", "content_id": "bb2187291662d2f2cb5f5d8b3f41d3b835829936", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 57, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/06/0629/스타트와 링크.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import combinations as comb\n\nN = int(input())\nscope = N\ncount = scope//2\ntmp = list(comb(list(range(scope)), count))\nresult = 1e9\narr = [list(map(int, input().split())) for _ in range(N)]\nfor i in range(len(tmp)//2):\n start = tmp[i]\n link = tmp[-i-1]\n S = L = 0\n for j in range(N):\n for k in range(N):\n if j in start and k in start:\n S += arr[j][k]\n elif j in link and k in link:\n L += arr[j][k]\n if abs(S-L) < result:\n result = abs(S-L)\nprint(result)" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.5777778029441833, "avg_line_length": 22, "blob_id": "b6267902303e193af17f093aba9b58e04cde2c0a", "content_id": "a97fd0489f708bdb650613bd101cfe2a1b8a09de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/05/0501/2 桁の整数 (Two-digit Integer).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(input(), end='')\nprint(input(), end='')" }, { "alpha_fraction": 0.41632652282714844, "alphanum_fraction": 0.5020408034324646, "avg_line_length": 21.363636016845703, "blob_id": "ab3773a5e5cc5e2a3b83f3cb4450bc2391b0ea89", "content_id": "554f2f5d0a09a75a6434832d5062bdaa5d34154c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 61, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/06/0619/핸드폰 번호 궁합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = input()\nB = input()\ntmp1 = ''\ntmp2 = ''\nfor i in range(8):\n tmp1 = tmp1 + A[i] + B[i]\nfor j in range(16, 2, -1):\n for i in range(1, j):\n tmp2 = tmp2 + str((int(tmp1[i-1]) + int(tmp1[i]))%10)\n tmp1, tmp2 = tmp2, ''\nprint(tmp1)" }, { "alpha_fraction": 0.4017094075679779, "alphanum_fraction": 0.42307692766189575, "avg_line_length": 15.785714149475098, "blob_id": "f3faf142176edca1e070e889a5cb844b4563b897", "content_id": "0510e5a6b193ea492f4c84a9907960eca107a65c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "no_license", "max_line_length": 23, "num_lines": 14, "path": "/알고리즘/온라인저지/2023/01/0107/A와 B.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = list(input())\nT = list(input())\nresult = 0\nwhile len(T) > 0:\n if S == T: \n result = 1\n break\n flag = False\n if T[-1] == 'B':\n flag = True\n T.pop()\n if flag:\n T = T[::-1] \nprint(result)" }, { "alpha_fraction": 0.557603657245636, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 23.22222137451172, "blob_id": "4804ad55b076211b17bd44ce228a85fef7ad49a2", "content_id": "e25a0d90b0350e1da12009848b64374f6ca76739", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/10/1013/폭죽쇼.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, C = map(int, input().split())\nhanabi = [int(input()) for _ in range(N)]\nresult = 0\nfor i in range(1, C+1):\n fire = False\n for h in hanabi:\n if not i%h: fire = True\n result += int(fire)\nprint(result)" }, { "alpha_fraction": 0.4517958462238312, "alphanum_fraction": 0.47826087474823, "avg_line_length": 26.894737243652344, "blob_id": "ac615c5a0a07b933c2b7f57c756d577fed07411c", "content_id": "380a248caed932d91fe75b6cb140dc686158fd9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 70, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/06/0629/하키.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "W, H, X, Y, P = map(int, input().split())\nLB = [X, Y] # left bottom\nRT = [X+W, Y+H] # right top\nR = H//2\nresult = 0\nfor p in range(P):\n tmp = list(map(int, input().split()))\n square = True\n for i in range(2):\n if tmp[i] < LB[i] or tmp[i] > RT[i]:\n square = False\n if square:\n result += 1\n x, y = tmp\n if (abs(x-X)**2 + abs(y-(Y+R))**2)**0.5 <= R and not square:\n result += 1\n elif (abs(x-(X+W))**2 + abs(y-(Y+R))**2)**0.5 <= R and not square:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5574468374252319, "alphanum_fraction": 0.5936170220375061, "avg_line_length": 17.84000015258789, "blob_id": "c961d8ba7bcbaa0fdaf899d553714d2837233899", "content_id": "39a00d1bd7deeaf4ddd03513e2bd939d4fc31eb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 54, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/10/1006/N과 M (8).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef dfs(depth, start):\n if depth == M: \n print(*result) \n return\n for i in range(start, N):\n result.append(arr[i])\n dfs(depth+1, i)\n result.pop()\n\nN, M = map(int, input().rstrip().split())\narr = sorted(list(map(int, input().rstrip().split())))\nresult = []\ndfs(0, 0)\n\n\"\"\"\n<참고한 링크>\nhttps://tmdrl5779.tistory.com/27\nhttps://honggom.tistory.com/110\n\"\"\"\n\n# https://www.acmicpc.net/problem/15657" }, { "alpha_fraction": 0.5089285969734192, "alphanum_fraction": 0.5178571343421936, "avg_line_length": 11.55555534362793, "blob_id": "439c4b6618ae305a4189c95b7f9c7c0da9d5c7ab", "content_id": "10bb0d84c03a13e5afe4002c152c99b8f4e4b776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0220/이상한 곱셈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = input().split()\n\nresult = 0\n\nfor a in A:\n for b in B:\n result += int(a) * int(b)\n\nprint(result)" }, { "alpha_fraction": 0.4029850661754608, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 21.66666603088379, "blob_id": "29dfb69b90dd1b65a0e90ddbc4c6b17968c17777", "content_id": "c8ce52b0d69142867374148adfd0a72f3d332fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/08/0831/욱 제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nM = (B-A)/400\nprint(1/(10**M + 1))" }, { "alpha_fraction": 0.4801762104034424, "alphanum_fraction": 0.5330396294593811, "avg_line_length": 13.25, "blob_id": "e5ff3bd3d073f6cb8c8c2b977c57b6f5ea6df32b", "content_id": "d3de0384006bb7c71be5da3c17a3151020d848e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 28, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/01/0131/상근날드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "burger = 2000\ndrink = 2000\n\nfor i in range(3):\n b = int(input())\n if b < burger:\n burger = b\n\nfor i in range(2):\n d = int(input())\n if d < drink:\n drink = d\n\nresult = burger + drink - 50\n\nprint(result)" }, { "alpha_fraction": 0.5483193397521973, "alphanum_fraction": 0.5651260614395142, "avg_line_length": 19.7391300201416, "blob_id": "a1917bdd9a5d900a5822a8786e57cb6c37b09636", "content_id": "a1b80d0a6f610c2938dba0c84aa3fea048f68cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/09/0904/조깅.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, T = map(int, input().split())\nrunner = [tuple(map(int, input().split())) for _ in range(N)]\nstack = []\ncnt = 1\nfor r in runner[::-1]:\n tmp = r[0] + (r[1]*T)\n if stack and tmp < stack[-1]:\n cnt += 1\n if stack and tmp >= stack[-1]:\n stack.append(stack[-1])\n else:\n stack.append(tmp)\n print(tmp, stack)\nprint(cnt)\n\n\"\"\"\n맨 선두에 있는 주자부터 이동한다\n당연히 맨 선두 주자는 하나의 그룹이 된다\n그 다음 주자를 이동한다\n그 다음 주자의 최고 이동거리가 선두주자보다\n같거나 클 경우 한 그룹으로 붙는다\n아닐 경우 다른 그룹이 된다\n\"\"\"" }, { "alpha_fraction": 0.5477477312088013, "alphanum_fraction": 0.5819819569587708, "avg_line_length": 24.272727966308594, "blob_id": "017792240767bab4ae12d0a40b4590c7320b74bc", "content_id": "37ed1e402f31098d85c28e29bb387e9e058e0f78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 41, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/05/0508/경고.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\na = input()\nb = input()\ntime_1 = datetime.strptime(a,\"%H:%M:%S\")\ntime_2 = datetime.strptime(b,\"%H:%M:%S\")\nday_over = False\ntime_interval = time_2 - time_1\nif str(time_interval)[0] == '-':\n day_over = True\nif a == b: # 문제를 잘 읽자\n print('24:00:00')\nelif day_over:\n if len(str(time_interval)) == 16:\n print(str(time_interval)[8:])\n else:\n print('0'+str(time_interval)[8:])\nelse: \n if len(str(time_interval)) == 16:\n print(str(time_interval))\n else:\n print('0'+str(time_interval))" }, { "alpha_fraction": 0.3164556920528412, "alphanum_fraction": 0.5, "avg_line_length": 13.454545021057129, "blob_id": "eb5a34f3f2de84201e13e021f1c737a16ebf134a", "content_id": "e8e84516f0835ad9f5da5c6327e8dcbe400e3184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/24차시 2. 자료구조 – 리스트, 튜플 - 연습문제 26.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "l1 = [1,3,6,78,35,55]\nl2 = [12,24,35,24,88,120,155]\n\nresult = []\n\nfor i in l1:\n for j in l2:\n if i == j:\n result.append(i)\n\nprint(result)" }, { "alpha_fraction": 0.49264705181121826, "alphanum_fraction": 0.5073529481887817, "avg_line_length": 21.83333396911621, "blob_id": "70c1cd7a2bb9310b15a978145f0c16fb472f3cbd", "content_id": "d9ac805ef6aecb37307f98271f8949dae5827ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 43, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0903/최대공약수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0: a, b = b, a%b\n return a\n\nA, B = map(int, input().split())\nfor _ in range(gcd(A, B)): print(1, end='')" }, { "alpha_fraction": 0.41432225704193115, "alphanum_fraction": 0.4271099865436554, "avg_line_length": 19.157894134521484, "blob_id": "0561318cd2ea1df41e34a61550740e705a751eba", "content_id": "7147f7869569e7d603149fabfa9c3b6196721106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/08/0828/그룹 단어 체커.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nT = int(sys.stdin.readline())\ncnt = 0\nfor tc in range(T):\n word = sys.stdin.readline()\n i = 0\n char = []\n for i in range(len(word)):\n if word[i] not in char:\n char.append(word[i])\n else:\n if word[i] == word[i-1]:\n pass\n else:\n cnt -= 1\n break\n cnt += 1\nprint(cnt)\n " }, { "alpha_fraction": 0.316546767950058, "alphanum_fraction": 0.3381294906139374, "avg_line_length": 21.280000686645508, "blob_id": "a45d6082fea13752146098dc84b130239464b4b7", "content_id": "6e8aef03e9d560715519221159be584d49cf0c69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "no_license", "max_line_length": 37, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/08/0827/기술 연계마스터 임스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "O = False # combo over\nN = int(input())\nresult = 0\nS = [[], []] # stack\nfor i in list(input()):\n if O: break\n try: # 1~9\n i = int(i)\n result += 1\n except: # LRSK\n if i == 'L': S[0].append(i)\n elif i == 'S': S[1].append(i)\n elif i == 'R':\n if S[0]:\n S[0].pop()\n result += 1\n else:\n O = True\n elif i == 'K':\n if S[1]:\n S[1].pop()\n result += 1\n else:\n O = True\nprint(result)" }, { "alpha_fraction": 0.5037146806716919, "alphanum_fraction": 0.5304605960845947, "avg_line_length": 27.08333396911621, "blob_id": "48bd548b5ab7f33d8d5218ca7c64abe0142a5026", "content_id": "bdcfd737cde95d84d9a549d4d841842cc684a239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 46, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/06/0629/덱.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\nimport sys\ndef input():\n return sys.stdin.readline()\n\nD = deque()\nfor i in range(int(input())):\n tmp = input().split()\n if tmp[0] == 'push_front':\n D.appendleft(tmp[1])\n elif tmp[0] == 'push_back':\n D.append(tmp[1])\n elif tmp[0] == 'pop_front':\n print(D.popleft()) if D else print(-1)\n elif tmp[0] == 'pop_back':\n print(D.pop()) if D else print(-1)\n elif tmp[0] == 'size':\n print(len(D))\n elif tmp[0] == 'empty':\n print(0) if D else print(1)\n elif tmp[0] == 'front':\n print(D[0]) if D else print(-1)\n elif tmp[0] == 'back':\n print(D[-1]) if D else print(-1)" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5408163070678711, "avg_line_length": 32, "blob_id": "cc5fb66e151c00cf4e5c20f61aad802a99a8351f", "content_id": "2b183588dd92a4764d19d055326bdd9a3ce59460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/08/0822/Cupcake Party.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "B = [int(input()) for _ in range(2)] # box\nC = B[0]*8 + B[1]*3 # cake\nprint(C-28 if C-28>0 else 0)" }, { "alpha_fraction": 0.5188679099082947, "alphanum_fraction": 0.5283018946647644, "avg_line_length": 25.75, "blob_id": "ab1fb14bc1b9389596579769312148f6fd1fe4c9", "content_id": "7669e22c1729ef7a5dab25856f24e70d0d91a234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/알고리즘/온라인저지/2023/06/0610/Schronisko.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n W, H = map(int, input().split())\n result = (W*H)//2\n print(result)" }, { "alpha_fraction": 0.46543779969215393, "alphanum_fraction": 0.4930875599384308, "avg_line_length": 23.22222137451172, "blob_id": "ebf3291e911e0b358a1f3e934a91711c31eaceb0", "content_id": "6afe7d28382ed1531f307f68a819ac33ec570cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/06/0611/Basketball One-on-One.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "record = input()\nA, B = 0, 0\nfor i in range(0, len(record), 2):\n player = record[i]\n if player == 'A':\n A += int(record[i+1])\n if player == 'B':\n B += int(record[i+1])\nprint('A' if A>B else 'B')" }, { "alpha_fraction": 0.4163934290409088, "alphanum_fraction": 0.4557377099990845, "avg_line_length": 18.125, "blob_id": "5a40390bcbf1a6bc222c41fe67d1e623600ece70", "content_id": "c45c6c5226a89096a61c131b6645ca87f7e67d7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 42, "num_lines": 16, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/1. 파이썬 SW문제해결 기본 List1/9차시 1일차 - 구간합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n\n nums = list(map(int, input().split()))\n\n a = 99999999\n b = 0\n\n for i in range(len(nums)-M+1):\n tmp = sum(nums[i:i+M])\n a = min(a, tmp)\n b = max(b, tmp)\n \n print('#{} {}'.format(t, b-a))" }, { "alpha_fraction": 0.48852458596229553, "alphanum_fraction": 0.4961748719215393, "avg_line_length": 26.75757598876953, "blob_id": "9c7102b660fd0ed42f774be11c0c0e8b6dead4a9", "content_id": "0fe0d3be48b674b56a567a25977eb59e448cd1ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 44, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/03/0322/택배 배송.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import heapq\nimport sys\nINF = sys.maxsize # 가상의 최대값\n\ndef dijkstra(start):\n q = [] # 방문가능한 지점\n heapq.heappush(q, (0, start)) # 출발점 입력\n dis[start] = 0 # 출발지점은 거리가 0\n while q: # 다익스트라\n d, now = heapq.heappop(q)\n # print(d, now) # 디버깅\n if dis[now] < d: # 최소거리가 될 수 없으면\n continue # 백트래킹\n # print(graph[now]) # 디버깅\n for v, w in graph[now]:\n # 들고 있는 최소거리 + 이동할 거리\n cost = d + w \n if cost < dis[v]: # 새로운 최소값이면\n dis[v] = cost # 갱신\n # 다음 이동지점에 갱신된 최소값 추가\n heapq.heappush(q, (cost, v))\n\nN, M = map(int, input().split())\n# 각 노드에서 이동 가능한 지점들 리스트\ngraph = [[] for _ in range(N+1)]\ndis = [INF]*(N+1) # 각 노드별 최소 이동거리 DP\nfor _ in range(M):\n a, b, c = map(int, input().split())\n # 양방향 방문가능 노드 추가\n graph[a].append((b, c))\n graph[b].append((a, c))\ndijkstra(1) # 1부터 출발\nprint(dis[N]) # 출력" }, { "alpha_fraction": 0.43065693974494934, "alphanum_fraction": 0.45985400676727295, "avg_line_length": 12.800000190734863, "blob_id": "350afad8dc5fad2c5884f3a9d81d26e146e40066", "content_id": "1a347220dc61b354c911629c18916106a693b0a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/7차시 2. 자료구조 – 리스트, 튜플 - 연습문제 6.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x = int(input())\n\ns = set()\n\nfor i in range(1, x//2+1):\n if x%i == 0:\n s.add(i)\n s.add(int(x/i))\n\nprint(sorted(list(s)))" }, { "alpha_fraction": 0.41600000858306885, "alphanum_fraction": 0.42399999499320984, "avg_line_length": 13.764705657958984, "blob_id": "bb27e22a09bc517898ad3450c9e511d09c976978", "content_id": "d6dfe442099ccde164c0a3a5786b95c6309ff058", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 35, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/02/0214/나는 행복합니다~.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "i = 0\n\nN, M, K = map(int, input().split())\n\nseat_found = False\n\nfor n in range(N):\n for m in range(M):\n if i == K:\n print(n, m)\n seat_found = True\n break\n\n i += 1\n \n if seat_found:\n break" }, { "alpha_fraction": 0.43609023094177246, "alphanum_fraction": 0.4661654233932495, "avg_line_length": 8.571428298950195, "blob_id": "3c4ef75b41fde13a31379380d6636163b80a9adc", "content_id": "7746808e50c3875e740ed4226dfdc8b18946cacf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 18, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/02/0227/2의 제곱인가.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 0\n\ntmp = 1\n\nwhile tmp <= N:\n if tmp == N:\n result = 1\n break\n\n tmp *= 2\n\nprint(result)" }, { "alpha_fraction": 0.3854489028453827, "alphanum_fraction": 0.39783281087875366, "avg_line_length": 39.125, "blob_id": "0a2580fc047b86ba36227f1208ca1e4f511e9578", "content_id": "ce13783f39bb810fceecd3de1903746ec1cfe9df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/08/0816/소수 구하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\n\nn = B # 소수를 찾을 범위\na = [False,False] + [True]*(n-1) # 0 ~ n 까지의 숫자 리스트\n # 일단 True로 만들고, 소수가 아닌 숫자를 찾아서 False로 지움\nprimes=[] # 소수 집합\nfor i in range(2,n+1): # 2 ~ n까지 반복\n if a[i]: # 2부터 시작, 해당 숫자가 지워지지 않고 남아있는 소수라면 \n primes.append(i) # 소수 리스트에 추가\n for j in range(2*i, n+1, i): # 해당 소수의 배수들을 \n a[j] = False # 리스트에서 전부 False로 만들기\n # bool타입이라 연산이 빠름\n\nfor prime in primes:\n if prime >= A:\n print(prime)\n " }, { "alpha_fraction": 0.45392492413520813, "alphanum_fraction": 0.4778156876564026, "avg_line_length": 21.615385055541992, "blob_id": "df05f1bf667cb0cda4f97786e746f79cf2b993c9", "content_id": "b0af1f962490f09538cfb35add0f08b487f91a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0710/백설 공주와 일곱 난쟁이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "D = [int(input()) for _ in range(9)]\nresult = []\n# print(D)\nsumm = sum(D)\nfor i in range(9):\n for j in range(i+1, 9):\n tmp = summ - (D[i]+D[j])\n if tmp == 100:\n result.append(D[i])\n result.append(D[j])\nfor d in D:\n if d not in result:\n print(d)" }, { "alpha_fraction": 0.40758293867111206, "alphanum_fraction": 0.44075828790664673, "avg_line_length": 25.5, "blob_id": "363bb21c3f2e22172097fb633cb56d3799d46711", "content_id": "2d600e0163c0b82fb9657c52ff96506a750e902a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/01/0102/CTP공국으로 이민 가자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for tc in range(int(input())-1, -1, -1):\n input()\n for i in input().split():\n try:\n print(chr(int(i)+64), end=' ')\n except:\n print(ord(i)-64, end=' ')\n if tc: print()" }, { "alpha_fraction": 0.44133099913597107, "alphanum_fraction": 0.457092821598053, "avg_line_length": 39.85714340209961, "blob_id": "fa544fa0b76292b50da0baa81a91806d284d1e9a", "content_id": "a61788dfffe20d13f84b0babbee0afe581c07f05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/05/0521/텔레프라임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "num, new = map(int, input().split())\nnew_num = int(str(new)+str(num))\nn = new_num+2 # 소수를 찾을 범위\na = [False,False] + [True]*(n-1) # 0 ~ n 까지의 숫자 리스트\nprimes=[] # 소수 집합\nfor i in range(2,n+1): # 2 ~ n까지 반복\n if a[i]: # 2부터 시작, 해당 숫자가 지워지지 않고 남아있는 소수라면 \n primes.append(i) # 소수 리스트에 추가\n for j in range(2*i, n+1, i): # 해당 소수의 배수들을 \n a[j] = False # 리스트에서 전부 False로 만들기\nif num in primes and new_num in primes:\n print('Yes')\nelse:\n print('No')" }, { "alpha_fraction": 0.2628205120563507, "alphanum_fraction": 0.33012819290161133, "avg_line_length": 18.5625, "blob_id": "335759fcf8cf506a8d419fb209bafe5040fcf324", "content_id": "da6f98c844aaa8c240122d9f6f4a5bafc14a29ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 29, "num_lines": 16, "path": "/알고리즘/온라인저지/2023/01/0118/미국 스타일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n A, B = input().split()\n A = float(A)\n if B == 'kg':\n A *= 2.2046\n B = 'lb'\n elif B == 'lb':\n A *= 0.4536\n B = 'kg'\n elif B == 'l':\n A *= 0.2642\n B = 'g'\n else:\n A *= 3.7854\n B = 'l'\n print(f'{A:.4f} {B}')" }, { "alpha_fraction": 0.4202898442745209, "alphanum_fraction": 0.4202898442745209, "avg_line_length": 16.375, "blob_id": "eb37dc7370a0ca2e71a4687fd0785ac680ce3dc2", "content_id": "b08b12ad8fba64415c31f3bd6c1fd3d0f9079b96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0530/Triangles.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = int(input())\n if not N:\n break\n line = '*'\n for i in range(N):\n print(line)\n line += '*'" }, { "alpha_fraction": 0.4774535894393921, "alphanum_fraction": 0.5623342394828796, "avg_line_length": 10.8125, "blob_id": "0982f4b5aaf63ff0f28cde1a6aab477a1674b0c0", "content_id": "7f2296dab404e575a6064a70cfc0bd2875dc7298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 39, "num_lines": 32, "path": "/알고리즘/온라인저지/2021/10/1011/이장님 초대.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1. 묘목 하나 심는데 1일\n2. 묘목 n개 구입 -> n일\n3. 이장님을 초대하기위해, 대신 묘목들이 완전히 자란 후에\n 이장님을 초대하기 위한 최소 시간\n\n1. 묘목의 수\n2. 각 묘목별 걸리는 시간\n\n1. 4 3 3 2 -> \n2. 39 39 38 35 20 9\n\n2 3 4 5 ....\n\n값을 정렬하고\nfor문 돌려서\nrange(2, N) 더해주고\nmax\n\"\"\"\n\nN = int(input())\ntrees = list(map(int, input().split()))\n\ntrees.sort(reverse=True)\n\nj = 0\n\nfor i in range(2, 2+N):\n trees[j] += i\n j += 1\n\nprint(max(trees))" }, { "alpha_fraction": 0.4636015295982361, "alphanum_fraction": 0.49042144417762756, "avg_line_length": 22.81818199157715, "blob_id": "5bd9de97cedd2b2d77f669fdbacd7e8ed53947eb", "content_id": "362cac01835180fbc040a9f32de10e22c1b7c482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 261, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/알고리즘/온라인저지/2023/02/0217/동혁 피자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "pizza = 0\nwhile True:\n tmp = input()\n if tmp == '0': break\n pizza += 1\n R, W, L = map(int, tmp.split())\n A = (R*2)**2\n B = W**2 + L**2\n result = 'fits'\n if A < B: result = 'does not fit'\n print(f'Pizza {pizza} {result} on the table.')" }, { "alpha_fraction": 0.4188311696052551, "alphanum_fraction": 0.4350649416446686, "avg_line_length": 21.071428298950195, "blob_id": "f7b1a290c7e4774b35dd4cfd4dab62b8cd0cea8c", "content_id": "b2a8b0ba3775b22aba9eb7c882cc08e8e7c17d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 39, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/09/0903/GCD 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0:\n a, b = b, a % b\n return a\n\nfor _ in range(int(input())):\n N = list(map(int, input().split()))\n n = N.pop(0)\n result = 0\n for i in range(n-1):\n for j in range(i+1, n):\n A, B = N[i], N[j]\n result += gcd(A, B)\n print(result)" }, { "alpha_fraction": 0.5523809790611267, "alphanum_fraction": 0.5523809790611267, "avg_line_length": 20.200000762939453, "blob_id": "44afbf6170a27dd2156fe7384baf0a7fbc7a2d1e", "content_id": "8858f1bd875530e71be45f8d76be62dbe69ce996", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 47, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/02/0205/더하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n N = int(input())\n print(sum(list(map(int, input().split()))))" }, { "alpha_fraction": 0.5377049446105957, "alphanum_fraction": 0.5639344453811646, "avg_line_length": 22.538461685180664, "blob_id": "18e38a05eb5c0363aa91182ddd6581008f43b212", "content_id": "b2879343159e3055f8d2bd7e2a875e8a108a544a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/알고리즘/[템플릿]/이분탐색/숫자 카드 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nN_list = list(map(int, input().split()))\ndct = {}\nfor n in N_list:\n try: dct[n] += 1\n except: dct[n] = 1\nM = int(input())\nM_list = list(map(int, input().split()))\nfor m in M_list:\n try: print(dct[m], end=' ')\n except: print(0, end=' ')\n\n# https://www.acmicpc.net/problem/10816" }, { "alpha_fraction": 0.6299999952316284, "alphanum_fraction": 0.6399999856948853, "avg_line_length": 24.04166603088379, "blob_id": "ed8e3a359486ee79ee287374a0a3dcfced3892b0", "content_id": "9b4607b5c2ad5116cfe9ee31e522da1875ead236", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 52, "num_lines": 24, "path": "/알고리즘/온라인저지/2021/08/0814/로프.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n로프는 k개\nk개의 로프로 w만큼을 나눠서 들어올릴 때\n각 로프에 가해지는 하중은 w/k\n로프의 최대하중이 w/k를 넘기면 안됌\n모든 로프에 걸리는 하중은 w/k로 같고\n제일 하중이 낮은 로프가 이를 버텨내야 한다\n그 때의 w값\n큰거부터 내려가야 한다\n하중이 큰 로프부터 한개 두개 세개... 하다가 \n최대하중이 계속 증가하는데 꺾이는 순간이 온다\n\"\"\"\n\nN = int(input()) # 줄의 개수\nropes = [] # 로프들 초기화\nfor n in range(N): # 로프 개수만큼 반복하면서\n ropes.append(int(input())) # 로프들 입력받기\nropes.sort() # 정렬하고\nropes.reverse() # 뒤집어서 중량이 큰 로프가 앞으로 오도록\nmax_weight = ropes[0] # 제일 중량이 큰 로프 혼자 있을때가 최대중량 초기값\nfor n in range(1, N+1): # 로프 1개부터 N개까지\n if max_weight <= ropes[n-1]*n: # 최대하중이 기존보다 크면\n max_weight = ropes[n-1]*n # 새로 저장\nprint(max_weight) # 최대하중 출력" }, { "alpha_fraction": 0.5228426456451416, "alphanum_fraction": 0.5279187560081482, "avg_line_length": 23.625, "blob_id": "78c03d4ee5954b29091254c63a1bc4b01fb4a986", "content_id": "2562e818da9f5bc9d3096591a92aeb0e6147331c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 38, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0515/Teleportation.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, X, Y = map(int, input().split())\nA, B = min((A, B)), max((A, B))\nX, Y = min((X, Y)), max((X, Y))\nresult = 0\nresult += abs(A-X)\nresult += abs(B-Y)\nresult = min(result, abs(A-B))\nprint(result)\n" }, { "alpha_fraction": 0.5198019742965698, "alphanum_fraction": 0.5396039485931396, "avg_line_length": 22.823530197143555, "blob_id": "28356683acc71a1d4cf2f62566db06e8e818ac8c", "content_id": "433f200d53edac9eb713303f72be19b3a751fe46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 50, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/08/0822/일곱 난쟁이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dwarf = []\nfor n in range(9):\n dwarf.append(int(input()))\ndwarf.sort()\nall_dwarf = sum(dwarf)\nfound = False\nfor i in range(8):\n for j in range(i+1, 9):\n if all_dwarf - dwarf[i] - dwarf[j] == 100:\n dwarf.pop(dwarf.index(dwarf[i]))\n dwarf.pop(dwarf.index(dwarf[j-1]))\n found = True\n break\n if found:\n break\nfor dwa in dwarf:\n print(dwa)" }, { "alpha_fraction": 0.6240601539611816, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 21.33333396911621, "blob_id": "fe069c22940eb8350c9b348e5840ac763e43904e", "content_id": "b671fe63d1017fc1f2fb97b0763fd18d400307b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 67, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0927/수 정렬하기 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nfor i in sorted([int(input().rstrip()) for _ in [0]*(N)]): print(i)" }, { "alpha_fraction": 0.41981130838394165, "alphanum_fraction": 0.4245283007621765, "avg_line_length": 24, "blob_id": "406883a14d39be71fce2082477af3ea690f2226f", "content_id": "7f7c7ebde5e46310c8521e68c8ce6ba9c320f581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 33, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/07/0726/사과 담기 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nJ = int(input())\nresult, s, e = 0, 1, M\nfor j in range(J):\n A = int(input())\n if s<=A<=e: # 움직이지 않아도 될 때\n continue\n else: # 움직여야 할 때\n if A < s: # 좌로 움직일 때\n move = s-A\n result += move \n s, e = s-move, e-move\n elif e < A: # 우로 움직일 때\n move = A-e\n result += move\n s, e = s+move, e+move\nprint(result)" }, { "alpha_fraction": 0.5605095624923706, "alphanum_fraction": 0.5796178579330444, "avg_line_length": 18.75, "blob_id": "3cc4f0f3d54594d150c93db2f40675d4e4531984", "content_id": "32f7d0c62b800c8ecdd547111f4506fd4f183b05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/03/0331/치킨 두 마리 (...).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nchicken = int(input())\nresult = 0\nif A+B >= chicken*2:\n result = A + B - chicken*2\nelse:\n result = A + B\nprint(result)" }, { "alpha_fraction": 0.4131578803062439, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 20.11111068725586, "blob_id": "f430985660094bc7d47a683bb9725283234680e9", "content_id": "b794a39b2a7880a7bf66339cf5dd7823f749af52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 46, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/07/0708/APC는 왜 서브태스크 대회가 되었을까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, L, K = map(int, input().split())\nP = [] # problems\nfor n in range(N):\n P.append(tuple(map(int, input().split())))\nP.sort(key=lambda x:x[1])\n# print(P)\nsolved = [0] * N\nk = 0\nresult = 0\nfor i in range(N):\n if k < K:\n if P[i][1] <= L:\n k += 1\n result += 140\n elif P[i][0] <= L:\n k += 1\n result += 100\nprint(result)\n" }, { "alpha_fraction": 0.5524079203605652, "alphanum_fraction": 0.5920680165290833, "avg_line_length": 19.823530197143555, "blob_id": "bf1163c08244c46a46f3850c5903bf1b1a346313", "content_id": "010af6cc6b1b79c32ba17ecb51454a88548f8267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/07/0714/통계학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import Counter\n\n\nN = int(input())\narr = [int(input()) for _ in range(N)]\narr.sort()\ncommon = Counter(arr).most_common()\nprint(round(sum(arr)/N))\nprint(arr[N//2])\nif len(common) > 1:\n if common[0][1] == common[1][1]:\n print(common[1][0])\n else:\n print(common[0][0])\nelse:\n print(common[0][0])\nprint(arr[-1]-arr[0])" }, { "alpha_fraction": 0.43497759103775024, "alphanum_fraction": 0.5112107396125793, "avg_line_length": 14.928571701049805, "blob_id": "e6afa6eb2a7d74036e05bb93544773c00958b112", "content_id": "4dcf91f2192056d02af4a63d7a9ed7ce4385c60d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/32차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 7.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input()\n\nLETTERS = 0\nDIGITS = 0\n\nfor s in sen:\n if 65 <= ord(s) <= 90 or 97 <= ord(s) <= 122:\n LETTERS += 1\n\n if 48 <= ord(s) <= 57:\n DIGITS += 1\n\nprint('LETTERS', LETTERS)\nprint('DIGITS', DIGITS)\n" }, { "alpha_fraction": 0.5794947743415833, "alphanum_fraction": 0.6032689213752747, "avg_line_length": 34.47368240356445, "blob_id": "a8bb59cbaa059a14642f588bf9a0edd2183f933b", "content_id": "4eb850d20c76f3329b153f7175f89a2c10d9f70a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/08/0801/숫자의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "numbers = [] # 입력받을 숫자의 리스트 초기화\nfor i in range(3): # A, B, C 세개의 숫자를 입력받음\n numbers += [int(input())] # 입력받아서 리스트에 추가\nmultipled = 1 # 다 곱한 값의 초기값 1:O, 0:X\nfor number in numbers: # multipled에 입력받은 숫자 곱해서 집어넣었고\n multipled *= number\nmultipled = str(multipled) # 곱한 값을 str로 변경\ncounts = [] # 숫자들을 셀 리스트 초기화\nfor i in range(10):\n counts.append([]) # 각 카운트들을 담을 빈 리스트 생성\nfor num in multipled:\n for i in range(10):\n if num == str(i): # 순회중인 숫자가 0~9중에 일치하는 수를\n counts[i] += [1] # 확인해서 count 한 번\nfor i in range(10):\n counts[i] = sum(counts[i]) # 카운트한 숫자는 [1, 1, 1] 이렇게 되 있으니까\n # 다 더한 값을 카운트에 재설정\nfor count in counts:\n print(count) # 카운트값들을 출력" }, { "alpha_fraction": 0.49572649598121643, "alphanum_fraction": 0.5341880321502686, "avg_line_length": 25.11111068725586, "blob_id": "e8e4392364571628917a3e8b86ca3b88fe5f9f8f", "content_id": "efbd706ec6b1f59f020c6bf048f711077419379f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "no_license", "max_line_length": 81, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/08/0815/다이얼.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dial = {2:\"ABC\", 3:\"DEF\", 4:\"GHI\", 5:\"JKL\", 6:\"MNO\", 7:\"PQRS\", 8:\"TUV\", 9:\"WXYZ\"}\nword = input()\nresult = 0\nfor wor in word:\n for dil in dial:\n if wor in dial[dil]:\n result += dil\nresult += len(word)\nprint(result)" }, { "alpha_fraction": 0.468137264251709, "alphanum_fraction": 0.48774510622024536, "avg_line_length": 24.5625, "blob_id": "0eb013050782576e1f3e44ae5f04c6376de07098", "content_id": "5bd2afb0741b3f7734ba81b9477db1b908f9fd68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 61, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/08/0801/먹을 것인가 먹힐 것인가.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n N, M = map(int, input().split())\n A = sorted(list(map(int, input().split())), reverse=True)\n B = sorted(list(map(int, input().split())), reverse=True)\n i = j = 0\n result = 0\n while i<N and j<M:\n if A[i] > B[j]:\n result += M-j\n i += 1\n else:\n j += 1\n print(result)\n\n# https://www.acmicpc.net/problem/7795" }, { "alpha_fraction": 0.5077399611473083, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 17, "blob_id": "5b5300c6298d92ab3a09c9dfa936d9fde03121f1", "content_id": "0514a5505f54889b5c52f45d17e6f983f206dba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/10/1002/죽음의 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().rstrip().split())\ndeath = [int(input().rstrip()) for _ in range(N)]\nresult = 0\ni = 0\nvisited = [0]*N\nwhile i != K:\n if not visited[i]:\n visited[i] = 1\n i = death[i]\n result += 1\n else: \n print(-1)\n exit()\nprint(result)" }, { "alpha_fraction": 0.465568870306015, "alphanum_fraction": 0.47604790329933167, "avg_line_length": 30.85714340209961, "blob_id": "79d38458c02f609cfabab69330fb1079df89c8eb", "content_id": "f02a7b28896863fc8213a4dcf0fcd48b0aa43beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 668, "license_type": "no_license", "max_line_length": 67, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0815/블랙잭_영남이 덕분에 성공.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\ncards = list(map(int, input().split()))\nblackjack = cards[0]+cards[1]+cards[2]\ncards.sort()\nfor i in range(N-2):\n if cards[i] > M:\n break\n for j in range(i+1, N-1):\n if cards[i] + cards[j] > M:\n break\n for k in range(j+1, N):\n if cards[i] + cards[j] + cards[k] > M:\n break\n is_this_blackjack = cards[i]+cards[j]+cards[k]\n if is_this_blackjack == M:\n print(M)\n exit()\n else:\n if abs(M - is_this_blackjack) < abs(M - blackjack):\n blackjack = is_this_blackjack\nprint(blackjack)" }, { "alpha_fraction": 0.46859902143478394, "alphanum_fraction": 0.48792269825935364, "avg_line_length": 28.714284896850586, "blob_id": "e348f06afbca14ad86467dff0bdedcc723d81c71", "content_id": "2af9d999be9272ef6be2366c639661bde4d74c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/06/0626/나무 조각.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = list(map(int, input().split()))\ntmp = sorted(arr)\nwhile arr != tmp:\n for i in range(len(arr)-1):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n print(*arr)" }, { "alpha_fraction": 0.4787878692150116, "alphanum_fraction": 0.5030303001403809, "avg_line_length": 15.600000381469727, "blob_id": "34f9eb7ded60f0191d8fd16de5a0ae1f7e98551b", "content_id": "1404cebde3f33f92f0402c17e3fe95af36b1dd90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0202/나는 요리사다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "winner = 0\nmax_p = 0\n\nfor i in range(1, 6):\n p = sum(list(map(int, input().split())))\n if p > max_p:\n max_p = p\n winner = i\n\nprint(winner, max_p)" }, { "alpha_fraction": 0.4573054909706116, "alphanum_fraction": 0.4775458574295044, "avg_line_length": 25.81355857849121, "blob_id": "2c384c00de3e5dd1f84484a6d49227fc4fddaa2b", "content_id": "bc436ae94342912704409000914fd95ad726f772", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 87, "num_lines": 59, "path": "/알고리즘/온라인저지/2022/10/1006/빙산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef count_iceberg():\n Q = deque()\n visited = [[0]*M for _ in range(N)]\n iceberg = 0\n for i in range(N):\n for j in range(M):\n if arr[i][j] and not visited[i][j]:\n iceberg += 1\n Q.append((i, j))\n visited[i][j] = 1\n while Q:\n y, x = Q.popleft()\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k]\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx] and arr[ny][nx]:\n Q.append((ny, nx))\n visited[ny][nx] = 1\n return iceberg\n\ndef check_seawater():\n tmp_Q = deque()\n for i in range(N):\n for j in range(M):\n if arr[i][j] == 0:\n tmp_Q.append((i, j))\n return tmp_Q\n\ndef melt_iceberg(seawater_Q):\n while seawater_Q:\n y, x = seawater_Q.popleft()\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k]\n if 0<=ny<N and 0<=nx<M and arr[ny][nx]>0:\n arr[ny][nx] -= 1\n return\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nQ = deque()\nresult = 0\nwhile count_iceberg() == 1:\n result += 1\n seawater_Q = check_seawater()\n melt_iceberg(seawater_Q)\nif count_iceberg() == 0: print(0)\nelse: print(result)\n\n\"\"\"\n두 덩어리로 분리되는 최초의 시간을 구해야 한다\n끝까지 두덩이가 되지 않고 통째로 녹아버린다면 0을 출력\n\"\"\"\n\n# https://www.acmicpc.net/problem/2573" }, { "alpha_fraction": 0.40229883790016174, "alphanum_fraction": 0.4195402264595032, "avg_line_length": 13.583333015441895, "blob_id": "f76f08f07fa306f409656706f53ca18b8910d1ea", "content_id": "3d7cbdc71f22f4b2a939ffcc55b02f72b4aa0f3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/04/0425/鉛筆 (Pencils).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, A, B, C, D = map(int, input().split())\nE = F = 0\nif N%A:\n E = N//A + 1\nelse:\n E = N//A\nif N%C:\n F = N//C + 1\nelse:\n F = N//C\n# print(E, F)\nprint(min(E*B, F*D))" }, { "alpha_fraction": 0.4746543765068054, "alphanum_fraction": 0.5414746403694153, "avg_line_length": 24.58823585510254, "blob_id": "8aea8a168501d007b53551db291f8b8489e335ee", "content_id": "4b563852e71defe80557232cf3a7742768afebff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/알고리즘/[템플릿]/Dynamic Programming/다이나믹이 뭐예요.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [0, 1, 1], [1, 0, 1] # right, down, right-down\nmod = int(1e9)+7\n\nN, M = map(int, input().rstrip().split())\ndp = [[0]*M for _ in range(N)]\ny, x = 0, 0\nfor i in range(N): dp[i][0] = 1\nfor i in range(M): dp[0][i] = 1\nfor i in range(1, N):\n for j in range(1, M):\n dp[i][j] = (dp[i-1][j] + dp[i][j-1] + dp[i-1][j-1])%mod\nprint(dp[N-1][M-1])\n\n# https://www.acmicpc.net/problem/14494" }, { "alpha_fraction": 0.4393063485622406, "alphanum_fraction": 0.4797687828540802, "avg_line_length": 23.85714340209961, "blob_id": "dcce28229834651ee2cb5885a10cd5d127c02bca", "content_id": "0142b136fa2f5a1d91410735d0b0e5621fbd3aca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 36, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/04/0418/겨울왕국 티켓 예매.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "H, W = ord('L')-65+1, 4\nfor _ in range(int(input())):\n N, M = map(int, input().split())\n result = -1\n if N>=H and M>=W:\n result = (H-1)*M+4\n print(result)" }, { "alpha_fraction": 0.5508981943130493, "alphanum_fraction": 0.5928143858909607, "avg_line_length": 32.599998474121094, "blob_id": "9e42b3e0d516df438a5618438b30ec94cba49b34", "content_id": "be538d3c8e476c6150e5f104359db4b5b0abf6d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 54, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/04/0422/2017 아주대학교 프로그래밍 경시대회 (Small).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "scores = []\nfor i in range(1, int(input())+1):\n scores.append(list(map(int, input().split()))+[i])\nscores.sort(key=lambda x:(-x[0], x[1], x[2]))\nprint(scores[0][3])" }, { "alpha_fraction": 0.56886225938797, "alphanum_fraction": 0.5888223648071289, "avg_line_length": 19.91666603088379, "blob_id": "99ce2fba60e98428a56234311f4fe940be51a67d", "content_id": "6da09cb928f19fe48d933f0138bf19213368fed5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/08/0808/가희와 키워드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nK = set(input().rstrip() for _ in range(N)) # keyword\nfor _ in range(M): \n tmp = set(input().rstrip().split(',')) # 개행문자('\\n') 제거\n K -= tmp\n print(len(K))\n\n\"\"\"\n시간 : 21분\n풀이\n 시간은 1.5초지만 메모리가 넉넉한 문제이다\n 제출언어로 PyPy를 사용한다\n 빠른 입력을 위해 \n input = sys.stdin.readline도 사용한다\n N과 M은 둘 다 200,000으로 큰 편이다\n set 클래스인 A와 B에 대해서 A-B가 가능하다\n 키워드를 담은 set(K)에서\n 가희가 메모로 작성하여 없앤 키워드를 빼주고\n 매번 글을 쓰고 남은 키워드 개수를 출력한다\n\"\"\"" }, { "alpha_fraction": 0.38064515590667725, "alphanum_fraction": 0.4580645263195038, "avg_line_length": 21.14285659790039, "blob_id": "4c6a6ea46ea8b3ba87b302e233d4dda96120f30d", "content_id": "ea0813fdf249da916305677791e3f4216ee45468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/06/0606/나누기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nF = int(input())\ntmp = ((N//100)*100)\nif tmp%F:\n print((str((tmp//F)*F + F)+'0')[-3:-1])\nelse:\n print((str((tmp//F)*F)+'0')[-3:-1])\n" }, { "alpha_fraction": 0.47983869910240173, "alphanum_fraction": 0.5080645084381104, "avg_line_length": 23.899999618530273, "blob_id": "a9352abad8ba5a90237010f928c484c9e9ae5e13", "content_id": "6d71cd04fc81339dea1d96560eacc403deb8a8a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/10/1003/피보나치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor t in range(1, int(input().rstrip())+1):\n P, Q = map(int, input().rstrip().split())\n a, b = 1, 1\n for _ in range(P-2): a, b = b, (a+b)%Q\n if Q == 1: b = 0\n print('Case #{}: {}'.format(t, b))" }, { "alpha_fraction": 0.5232323408126831, "alphanum_fraction": 0.5535353422164917, "avg_line_length": 19.66666603088379, "blob_id": "8c016928c58eebc240fdafd98c40c8b88ce75ba0", "content_id": "1de2a8d0af9cafcce41caa85f8e9de5288d65ade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 495, "license_type": "no_license", "max_line_length": 48, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/09/0912/별 찍기 - 10.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from copy import deepcopy\n\nN = int(input())\nfor k in range(1, 8): \n if N == 3**k: break\nwidth = k\nsquare = ['*'*3] + ['* *'] + ['*'*3]\n\nlevel = 0\ntmp, result = deepcopy(square), deepcopy(square)\n\nwhile level < k-1:\n level += 1\n result = []\n for s in tmp:\n result.append(s*3)\n for s in tmp:\n result.append(s + ' '*(3**level) + s)\n for s in tmp:\n result.append(s*3)\n tmp = deepcopy(result)\nfor r in result: print(r)\n\n# https://www.acmicpc.net/problem/2447" }, { "alpha_fraction": 0.38513514399528503, "alphanum_fraction": 0.46621620655059814, "avg_line_length": 23.83333396911621, "blob_id": "a5a0909b799e9689e0f91e691f8f9ba86ad0570c", "content_id": "cc4d6bab3d888eca1eab59ff3f69b05bfdf2f427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/07/0731/피보나치 수 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ndp = [0, 1]\nif N > 1: dp += [0] * (N-1)\nfor i in range(2, N+1):\n dp[i] = dp[i-1] + dp[i-2]\nprint(dp[-1]) if N != 0 else print(0)" }, { "alpha_fraction": 0.4324324429035187, "alphanum_fraction": 0.45945945382118225, "avg_line_length": 14.904762268066406, "blob_id": "0607d3ad476e524f828a81dfafafe94373438a87", "content_id": "d436c730334632964476d89869140b03e0deb0e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/09/0919/영식이와 친구들.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M, L = map(int, input().rstrip().split())\narr = [0]*N\narr[0] += 1\nresult = 0\nnow = 0\nwhile max(arr)<M:\n if arr[now]%2:\n now += L\n now %= N\n arr[now] += 1\n else:\n now -= L\n now += N\n now %= N\n arr[now] += 1\n result += 1\nprint(result)" }, { "alpha_fraction": 0.4039604067802429, "alphanum_fraction": 0.4534653425216675, "avg_line_length": 20.04166603088379, "blob_id": "646918ea1308c5f15f208b7b96d6660a3721416a", "content_id": "f112be1d9387d246c604d6daeaec79e46cb0f63a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 45, "num_lines": 24, "path": "/알고리즘/온라인저지/2021/12/1216/사분면.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nquadrant = [0 for _ in range(5)]\n\nfor n in range(N):\n x, y = map(int, input().split())\n\n if x == 0 or y == 0:\n quadrant[4] += 1\n elif x > 0 and y > 0:\n quadrant[0] += 1\n elif x < 0 and y > 0:\n quadrant[1] += 1\n elif x < 0 and y < 0:\n quadrant[2] += 1\n elif x > 0 and y < 0:\n quadrant[3] += 1\n\nfor i in range(5):\n if i == 4:\n print('AXIS: {}'.format(quadrant[i]))\n break\n\n print('Q{}: {}'.format(i+1, quadrant[i]))\n" }, { "alpha_fraction": 0.43220338225364685, "alphanum_fraction": 0.4406779706478119, "avg_line_length": 18.83333396911621, "blob_id": "99d0d9955da0c0454dffdc8b47e34b235f92a036", "content_id": "5dff70e872b27fb4fc862bd469c7b8302f780991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/알고리즘/온라인저지/2021/08/0822/달팽이는 올라가고 싶다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, V = map(int, input().split())\nif (V-B)%(A-B):\n day = (V-B)//(A-B) + 1\nelse:\n day = (V-B)//(A-B)\nprint(day)" }, { "alpha_fraction": 0.4656488597393036, "alphanum_fraction": 0.49618321657180786, "avg_line_length": 17.85714340209961, "blob_id": "a4d8a7d0425bf41d1b4dab4e05b638f08a814997", "content_id": "d31bee524d636eb83c4d45382d134d78cb59e384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/05/0514/악마의 제안.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K, N = map(int, input().split())\nresult = -1\nif N != 1:\n A, B = N*K, N-1\n result = A//B\n if A%B: result += 1\nprint(result)" }, { "alpha_fraction": 0.48695650696754456, "alphanum_fraction": 0.4956521689891815, "avg_line_length": 37.66666793823242, "blob_id": "f65717f7346d74f0d215de30f95fa609bf014c73", "content_id": "0a0d7886d8cc34161109c11e0f6ddce4f11f7e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 47, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/01/0110/폰 노이만.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N, D, A, B, F = map(float, input().split())\n print(int(N), f'{D/(A+B)*F:.6f}')" }, { "alpha_fraction": 0.50971919298172, "alphanum_fraction": 0.5356371402740479, "avg_line_length": 32.07143020629883, "blob_id": "e48be24025c536ebdceb261d16ff635c3a969686", "content_id": "6e922edcac4f86837ea7be48f87e3feff172e3e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/07/0731/3 6 9 게임의 왕이 되자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def three_six_nine(n):\n n = str(n) # 입력받은 숫자를 str로 변환\n tsn_list = \"369\" # 369를 확인하는 str값 설정\n result = \"\" # 초기 공백 결과값\n for num in n: # str로 바꿔준 n을 순회\n if num in tsn_list: # n의 각 숫자가 3,6,9일때\n result += \"X\" # 결과값에 박수 한 번\n if result == \"\": # 순회 다 해도 박수가 없으면\n result += n # 원본 숫자 출력\n return result # 결과값 반환\n\nn = int(input()) # 숫자를 입력받고\nfor i in range(1, n+1): # 1부터 입력값까지 반복\n print(three_six_nine(i), sep = \"\") # 공백단위로 출력\n" }, { "alpha_fraction": 0.4336734712123871, "alphanum_fraction": 0.4591836631298065, "avg_line_length": 23.625, "blob_id": "6f55fdc4fa3f64482286104e6f5c8e29a6518397", "content_id": "6fca9a427475178a5820519b77c8d8817aa9f804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0516/Gauß.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(int(input())):\n N, M = map(int, input().split())\n A = M-N+1\n B = A//2\n result = (N+M)*B\n if A%2:\n result += (N+M)//2\n print(f'Scenario #{i+1}:\\n{result}\\n')" }, { "alpha_fraction": 0.529629647731781, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 19.846153259277344, "blob_id": "96c8e1747d340169bc1136820623445af8cba9a6", "content_id": "68595d9aeb4d10e64a30f224de3ef1fc6865ab91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/06/0609/농구 경기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "table = [0]*26\nN = int(input())\nfor n in range(N):\n first = ord(input()[0]) - 97\n table[first] += 1\n# print(table)\nresult = ''\nfor i in range(len(table)):\n if table[i] >= 5:\n result = result + chr(i+97)\nif not result:\n result = 'PREDAJA'\nprint(result)" }, { "alpha_fraction": 0.5793871879577637, "alphanum_fraction": 0.5905292630195618, "avg_line_length": 17.947368621826172, "blob_id": "6e7f56da0c750d5af0b5bca0a6c98c34e02af949", "content_id": "677b4a8a3a6eda7df600de055b12fa66c8aaf0b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1009/트로피 진열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from audioop import reverse\nfrom re import A\nimport sys\n\ninput = sys.stdin.readline\n\ndef check(arr):\n result = 1\n biggest = arr[0]\n for a in arr:\n if a>biggest:\n biggest = a\n result += 1\n return result\n\nN = int(input().rstrip())\narr = [int(input().rstrip()) for _ in range(N)]\nprint(check(arr))\nprint(check(arr[::-1]))" }, { "alpha_fraction": 0.4727272689342499, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 17.66666603088379, "blob_id": "3c76ccd668a06618fb744c0b812588f8680ba0ec", "content_id": "ebc7e8efc6274496dce58dcbf1c56a471c28a293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/09/0925/악수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a=b=1\nfor _ in [0]*int(input()):a,b=b,(a+b)%10\nprint(a)" }, { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.5344827771186829, "avg_line_length": 18.5, "blob_id": "ac158ef99ff55cce4aff99a3223a8eae61624491", "content_id": "bf295b928eeb3bd4627d3c55c86de34dab745a28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0913/직사각형을 만드는 방법.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nfor i in range(1, int(N**0.5)+1):\n tmp = N//i - (i-1)\n result += tmp\nprint(result)" }, { "alpha_fraction": 0.4781849980354309, "alphanum_fraction": 0.5340313911437988, "avg_line_length": 29.210525512695312, "blob_id": "a582a64625e08ef546c5a39c294549eadc67eec7", "content_id": "4ee4c1599337cd4e7bff1692e1ac1632418d564a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 51, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/12/1209/Hawk eyes.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def mix(order):\n if order == 'A':\n result[0], result[1] = result[1], result[0]\n elif order == 'B':\n result[0], result[2] = result[2], result[0]\n elif order == 'C':\n result[0], result[3] = result[3], result[0]\n elif order == 'D':\n result[2], result[1] = result[1], result[2]\n elif order == 'E':\n result[3], result[1] = result[1], result[3]\n elif order == 'F':\n result[2], result[3] = result[3], result[2]\n\nresult = [1, 0, 0, 2]\nfor order in input():\n mix(order)\nprint(result.index(1)+1)\nprint(result.index(2)+1)" }, { "alpha_fraction": 0.4952380955219269, "alphanum_fraction": 0.5047619342803955, "avg_line_length": 34.33333206176758, "blob_id": "d13e07760ff14e0820d2b3e13f68136399c9f6d7", "content_id": "6f85bace3f0ad7ceb858c0e6dd546bb6f85db37e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/01/0123/자기복제수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N = input()\n print('YES' if N == str(int(N)**2)[-len(N):] else 'NO')" }, { "alpha_fraction": 0.5040650367736816, "alphanum_fraction": 0.5203251838684082, "avg_line_length": 16.714284896850586, "blob_id": "d95b7e68328c000663165f74331bdbc84165d25d", "content_id": "f4bedaf6d33277c377491ee45a430952c15dc240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/08/0828/사장님 도박은 재미로 하셔야 합니다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nwhile True:\n money = int(input())\n if money == -1:\n print(result)\n break\n result += money" }, { "alpha_fraction": 0.38461539149284363, "alphanum_fraction": 0.39835163950920105, "avg_line_length": 23.266666412353516, "blob_id": "59233dc2edb549cf11a3b9d7ff72d384abd73fbd", "content_id": "42df7147547984d6c79ef895e8e339399ad1d12e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/09/0903/쌍의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N = int(input())\n result = []\n for n in range(1, (N//2)+1):\n A, B = n, N-n\n if A != B:\n result.append((A, B))\n print(f'Pairs for {N}: ', end='')\n cnt = 0\n for r in result:\n print(*r, end='')\n cnt += 1\n if cnt != len(result):\n print(', ', end='')\n print()\n" }, { "alpha_fraction": 0.37823835015296936, "alphanum_fraction": 0.4715026021003723, "avg_line_length": 16.18181800842285, "blob_id": "238202db72b4d41a4c4798e5e5990080d3fcea72", "content_id": "e9271e35f72d9f4f9ef706e9c0eaed0201a0c034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/3차시 2. 자료구조 – 리스트, 튜플 - 연습문제 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "scores = [(90, 80), (85, 75), (90, 100)]\n\ns = 0\navg = 0\ni = 0\n\nfor score in scores:\n i += 1\n s = sum(score)\n avg = s/2\n print('{}번 학생의 총점은 {}점이고, 평균은 {}입니다.'.format(i, s, avg))\n " }, { "alpha_fraction": 0.41265398263931274, "alphanum_fraction": 0.4440089464187622, "avg_line_length": 23.81944465637207, "blob_id": "d741dd72442868550a9ea209d9a53dc92f11e34e", "content_id": "0ffdf49d206be3ec3d03d3a838fa02ee9223f007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2162, "license_type": "no_license", "max_line_length": 66, "num_lines": 72, "path": "/알고리즘/온라인저지/2022/07/0709/섬의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n배열을 탐색하다가 섬을 발견하면\n 섬의 개수 ++\n 섬을 바다로 만든다 (1 -> 0) # BFS (DFS로 하는데 메모리초과가 계속 남 ㅜㅜ)\n 발견한 섬의 개수를 출력\n\"\"\"\nfrom collections import deque # BFS용 double ended queue\n\n\ndef bfs(y, x): # 섬을 바다로 만드는 BFS함수\n Q.append((y, x))\n while Q:\n y, x = Q.popleft()\n if arr[y][x] == 0:\n continue\n arr[y][x] = 0\n for i in range(8):\n ny = y+dy[i]\n nx = x+dx[i]\n if 0<=ny<H and 0<=nx<W and arr[ny][nx]:\n Q.append((ny, nx))\n\n\ndy = [-1, 1, 0, 0, 1, 1, -1, -1] # 대각선 포함한 8방향 델타이동\ndx = [0, 0, -1, 1, 1, -1, 1, -1]\nwhile True:\n W, H = map(int, input().split())\n if W == 0 and H == 0: # 테스트케이스 입력 종료조건\n break\n arr = [list(map(int, input().split())) for _ in range(H)] # 지도\n result = 0 # 섬의 개수\n Q = deque()\n for h in range(H):\n for w in range(W):\n if arr[h][w]: # 섬을 발견하면\n bfs(h, w) # 섬을 바다로 만들기\n result += 1 # 섬의 개수 ++\n print(result) # 섬의 개수 출력\n\n\n# 아래는 재귀함수를 활용한 DFS로 풀었는데\n# 메모리초과를 피하지 못했습니다ㅜㅜ\n# 재귀를 계속 돌면서 함수 내 메모리를 전부 사용한 것 같은데...\n# import sys\n# sys.setrecursionlimit(10**9)\n\n\n# def dfs(y, x): # 섬을 바다로 만들어 주면서 섬 개수 세기\n# if arr[y][x] == 0:\n# return\n# arr[y][x] = 0\n# for i in range(8):\n# ny = y+dy[i]\n# nx = x+dx[i]\n# if 0<=ny<H and 0<=nx<W and arr[ny][nx]:\n# dfs(ny, nx)\n\n\n# dy = [-1, 1, 0, 0, 1, 1, -1, -1]\n# dx = [0, 0, -1, 1, 1, -1, 1, -1]\n# while True:\n# W, H = map(int, input().split())\n# if W == 0 and H == 0:\n# break\n# arr = [list(map(int, input().split())) for _ in range(H)]\n# result = 0\n# for h in range(H):\n# for w in range(W):\n# if arr[h][w]:\n# dfs(h, w)\n# result += 1\n# print(result)" }, { "alpha_fraction": 0.6880733966827393, "alphanum_fraction": 0.6926605701446533, "avg_line_length": 23.22222137451172, "blob_id": "47bb257df616eea53fa1b0c255d8eb2e401a4624", "content_id": "eef34007a192acd17c7e1d1fd6d1446f3336b7fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/08/0829/줄 세우기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nstudent_count = int(sys.stdin.readline())\nnumbers = list(map(int, sys.stdin.readline().split()))\nresult = []\nfor i in range(len(numbers)):\n result.insert(numbers[i], i+1)\nresult.reverse()\nprint(*result)\n" }, { "alpha_fraction": 0.610859751701355, "alphanum_fraction": 0.6199095249176025, "avg_line_length": 13.800000190734863, "blob_id": "a1b88c0bd5eb6f171e0f1a722e4580becb9a96df", "content_id": "36e164cbf19f52bc2e011cd5c5bf8818164c3e4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/10/1011/등수 매기기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nstudents = []\n\nfor n in range(N):\n students.append(int(input()))\n\nstudents.sort()\n\ngrades = list(range(1, N+1))\n\nfor i in range(N):\n students[i] = abs(students[i] - grades[i])\n\nprint(sum(students))" }, { "alpha_fraction": 0.42553192377090454, "alphanum_fraction": 0.44964540004730225, "avg_line_length": 22.53333282470703, "blob_id": "e673c66160ac51884acdad2ec216ce05c088c4fe", "content_id": "888c4cccd1a6c17a27069030cc66f0e164909e9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 71, "num_lines": 30, "path": "/알고리즘/온라인저지/2021/12/1230/소수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nM = int(input())\n\nn = M # 소수를 찾을 범위\na = [False,False] + [True]*(n-1) # 0 ~ n 까지의 숫자 리스트\nprimes=[] # 소수 집합\nfor i in range(2,n+1): # 2 ~ n까지 반복\n if a[i]: # 2부터 시작, 해당 숫자가 지워지지 않고 남아있는 소수라면 \n primes.append(i) # 소수 리스트에 추가\n for j in range(2*i, n+1, i): # 해당 소수의 배수들을 \n a[j] = False # 리스트에서 전부 False로 만들기\n\nresult1 = 0\nresult2 = 0\n\nfind_min = False\n\nfor p in primes:\n if p >= N and not find_min:\n find_min = True\n result2 = p\n \n if find_min:\n result1 += p\n \nif find_min:\n print(result1)\n print(result2)\nelse:\n print(-1)" }, { "alpha_fraction": 0.5068492889404297, "alphanum_fraction": 0.5479452013969421, "avg_line_length": 23.44444465637207, "blob_id": "efa0b162be31747c5eb691fa0043689dd2f81ea9", "content_id": "9c6111bbca13f81616f44bc003140ae2da84bd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/04/0411/일우는 야바위.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, X, K = map(int, input().split())\ncups = [0] * N\ncups[X-1] = 1\n# print(cups)\nfor k in range(K):\n A, B = map(int, input().split())\n cups[A-1], cups[B-1] = cups[B-1], cups[A-1]\n# print(cups)\nprint(cups.index(1)+1)" }, { "alpha_fraction": 0.36966824531555176, "alphanum_fraction": 0.3909952640533447, "avg_line_length": 14.666666984558105, "blob_id": "cbff5d09413cda50063a553d3e903c920f8c95a8", "content_id": "f5d4f196e220852b6acac9e5b8f7eac537e95b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 31, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/09/0910/Router.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nN = int(input())\nq = [0] * N\np = 0\nwhile True:\n a = int(input())\n if a == -1:\n break\n elif a == 0:\n q.pop(0)\n q.append(0)\n p -= 1\n else:\n if q[-1]:\n continue\n else:\n q[p] = a\n p += 1\nif sum(q):\n for i in q:\n if i:\n print(i, end=' ')\nelse:\n print('empty')" }, { "alpha_fraction": 0.3734939694404602, "alphanum_fraction": 0.46987950801849365, "avg_line_length": 15.800000190734863, "blob_id": "abf22fbc7546b5dce3b3c7c12c1c33d646b445a7", "content_id": "56fdd128b42e70b803ad84da59d56a0183d09851", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/알고리즘/온라인저지/2021/08/0822/나머지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "d = [0] * 42\nfor _ in range(10):\n n = int(input())\n d[n%42] = 1\nprint(sum(d))" }, { "alpha_fraction": 0.6906779408454895, "alphanum_fraction": 0.6991525292396545, "avg_line_length": 20.545454025268555, "blob_id": "1678dd85d89e4ab51acaca81b93f39ff5697ae19", "content_id": "8a91f777956e088fe72d41e4e5867696bbdf25bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 67, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/11/1117/나는 친구가 적다 (Small).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nword = input().rstrip()\nkeyword = input().rstrip()\ntmp = ''\nfor w in word:\n if w in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':\n tmp = tmp+w\nprint(1 if keyword in tmp else 0)" }, { "alpha_fraction": 0.4435215890407562, "alphanum_fraction": 0.5033222436904907, "avg_line_length": 21.296297073364258, "blob_id": "f253cc01c49cec93efa65ca86cee73b567a4d9eb", "content_id": "e827bfd15aa21396a3e6a19e9826297b9f03598f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/10/1016/1로 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nN = int(input())\n\nif N == 1: # 입력값이 1이면 0을 곧바로 반환합니다\n print(0)\n exit()\n\n# 1에서부터 거꾸로 탐색합니다\nq = deque()\nq.append((1, 0)) # 시작하는 1, 연산횟수 0부터 시작\n\ncheck = set() # 중복연산 방지\n\nwhile q:\n a = q.popleft()\n\n for i in [a[0]*3, a[0]*2, a[0]+1]: # 1에서 출발하기 때문에\n # 연산들을 거꾸로 실행\n if i == N: # N을 찾으면\n print(a[1]+1) # 연산횟수 1회 더하여 출력\n exit() # 바로 코드를 종료\n\n if i not in check and 1 <= i <= 1000000: # 가지치기\n # N은 1000000을 넘지 않는 자연수입니다\n check.add(i) # 범위를 만족하면 check에 추가\n q.append((i, a[1]+1)) # BFS 대기열에 추가\n" }, { "alpha_fraction": 0.5248869061470032, "alphanum_fraction": 0.5429864525794983, "avg_line_length": 19.18181800842285, "blob_id": "2a1de6f8c097c6714747f8e0678e0d1c898d5695", "content_id": "00fb7bbec6973bed5f12409192aa35717c360b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/07/0730/2+1 세일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nP = [] # price\nfor n in range(N): P.append(int(input()))\nP.sort(reverse=True)\nresult = 0\nfor i in range(0, N, 3):\n try:\n result += sum(P[i:i+2])\n except:\n result += P[i:]\nprint(result)" }, { "alpha_fraction": 0.6243094205856323, "alphanum_fraction": 0.6464088559150696, "avg_line_length": 17.200000762939453, "blob_id": "6ffa92dcb21acea0b85b62c5f8d82324f598fe8e", "content_id": "49a1f55543b7824778e1c831d4ecee780df33ecf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0222/과목선택.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "science = [int(input()) for _ in range(4)]\nsocial = [int(input()) for _ in range(2)]\n\nscience.sort(reverse=True)\n\nresult = 0\n\nresult += sum(science[:3]) + max(social)\n\nprint(result)" }, { "alpha_fraction": 0.44897958636283875, "alphanum_fraction": 0.47755101323127747, "avg_line_length": 16.5, "blob_id": "2320c53a56dae959300b621d051ecf26fc1b0564", "content_id": "bfd1c879c6c46531fc4137f9119a5cd8f1e761d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 42, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/02/0207/최댓값.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "maxx = 0\nmaxi = 0\nmaxj = 0\n\nfor i in range(9):\n line = list(map(int, input().split()))\n \n for j in range(9):\n if line[j] > maxx:\n maxx = line[j] \n maxi, maxj = i, j\n\nprint(maxx)\nprint(maxi+1, maxj+1)\n" }, { "alpha_fraction": 0.5443925261497498, "alphanum_fraction": 0.5630841255187988, "avg_line_length": 18.5, "blob_id": "7251d55610e6ac81de3323a94b8e9e71609b7b9d", "content_id": "23c599716e0e3a4cbef555028c2e8e7c810a33b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 41, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/11/1104/찍기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nstudent = [\n 'ABCABCABCABC',\n 'BABCBABCBABC',\n 'CCAABBCCAABB',\n]\nN = int(input().rstrip())\nanswer = input().rstrip()\nresult = [0, 0, 0]\nfor i in range(N):\n for j in range(3):\n if answer[i] == student[j][i%12]:\n result[j] += 1\nIDs = ['Adrian', 'Bruno', 'Goran']\nmaxx = max(result)\nprint(maxx)\nfor i in range(3):\n if result[i] == maxx:\n print(IDs[i])" }, { "alpha_fraction": 0.5270935893058777, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 28.14285659790039, "blob_id": "30a25931616b2ef6937dfe891deaea72cdaa0fb8", "content_id": "a4114167277cdbab29ad9692a1ccb0f1650882e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/11/1129/이교수님의 시험.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncorrect = [1, 2, 3, 4, 5]\nfor answer in range(1, N+1):\n arr = list(map(int, input().split()))\n if arr[:5] != correct: continue\n if arr[5:] != correct: continue\n print(answer)" }, { "alpha_fraction": 0.4822694957256317, "alphanum_fraction": 0.4893617033958435, "avg_line_length": 19.285715103149414, "blob_id": "b52f1a55c23402cab543b69b2946c3f2ac303f27", "content_id": "53291c653d7f2677189c43a7d0753c415b3cea22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/09/0903/최소공배수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0: a, b = b, a%b\n return a\n\nA, B = map(int, input().split())\nGCD = gcd(A, B)\nprint(GCD * (A//GCD) * (B//GCD))" }, { "alpha_fraction": 0.607594907283783, "alphanum_fraction": 0.607594907283783, "avg_line_length": 21.714284896850586, "blob_id": "ccd944d4e831fa17d2728e36fc20227465893322", "content_id": "fff69a3e69e485f7b533b783c534d9f107c50694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/07/0730/[기초-산술연산] 정수 2개 입력받아 거듭제곱 계산하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 두 변수를 입력받아서 각각 저장한다\n# .split()이 하는 일이, 결국 input()받은 건 하나의 str인데\n# 그걸 공백으로 쪼개서 각각 저장하는 기능을 한거다\na, b = input().split()\n# **는 거듭제곱\nc = int(a)**int(b) \nprint(c)" }, { "alpha_fraction": 0.3464052379131317, "alphanum_fraction": 0.379084974527359, "avg_line_length": 17.399999618530273, "blob_id": "28bc9eb73169acef4c8c9c1818213a68de320365", "content_id": "280983134c50c67bcad0e8a608eafb2e11a3e8e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 36, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/02/0205/문자열 분석.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nlower = 'abcdefghijklmnopqrstuvwxyz'\nnum = '0123456789'\n\n\nwhile True:\n try:\n a = b = c = d = 0\n \n sen = input()\n \n for s in sen:\n if s in upper:\n b += 1\n elif s in lower:\n a += 1\n elif s in num:\n c += 1\n elif s == ' ':\n d += 1\n\n print(a, b, c, d)\n\n except:\n break" }, { "alpha_fraction": 0.589353621006012, "alphanum_fraction": 0.5969581604003906, "avg_line_length": 21, "blob_id": "a99a7269d47c7ddda4b4fe485464bebd81571fd5", "content_id": "d2f992a14ec8ec0cc55ed229b128dfa6bac8492e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0923/최대 힙.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom heapq import heappush, heappop\n\ninput = sys.stdin.readline\n\nHQ = []\nfor _ in range(int(input().rstrip())):\n num = int(input().rstrip())\n if num: heappush(HQ, (-num, num))\n else: \n if HQ: print(heappop(HQ)[1])\n else: print(0)" }, { "alpha_fraction": 0.5037453174591064, "alphanum_fraction": 0.5599250793457031, "avg_line_length": 27.891891479492188, "blob_id": "a5fa93ea7e5093b470e1054bbaed2db7d3a1dbe6", "content_id": "8ac4b3475278057bf810ea88ea1b01a2dfe29a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/알고리즘/온라인저지/2021/08/0807/셀프 넘버.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n함수 d(75) = 75 + 7 + 5 = 75 + 12 + 87\n87의 생성자(이전항) = 75\n생성자(이전항)가 없는 숫자 = 셀프 넘버\n셀프넘버 = 자연수 - 셀프넘버가 아닌 수\n셀프넘버가 아닌 수 \n = 생성자가 있는 수\n = N까지 함수d 돌려서 나온 값들\n셀프넘버가 아닌 수 리스트를 만들고 \nN까지와, 셀프넘버아닌수 리스트를 이중순회 하면서\n리스트안에 없으면 출력\n\"\"\"\n\nN = 10000 # 순회 범위\n\ndef Kaprekar(n): # n본인과 각 자리수들을 더하는 함수\n # Kaprekar를 통해 만들어진 값은 생성자가 존재하는 \"셀프넘버가 아닌 숫자\"\n # 10의 자리 : n + n%10 + (n//10)%10\n # 100의 자리 : n + n%10 + (n//10)%10 + (n//100)%10\n def Kapre(n): # 각 자릿수를 더하는 재귀함수\n result = 0 # 결과값 초기화\n if n < 10: # 한 자리수 일때\n result += n # 결과값에 저장\n else: # 두 자리 이상일 때\n return n%10 + Kapre(n//10) # 1의 자리수 값과 재귀호출 반환\n return result # 각 자리수를 더한 값을 반환\n return n + Kapre(n) # 처음 input값 n과, 각자리 숫자들을 더한 값을 반환\n\nnot_self_numbers = [] # 셀프넘버가 아닌 수 리스트 초기화\nfor i in range(N + 1): # 10000보다 작거나 \"같은\" 셀프넘버가 아닌 수\n not_self_numbers.append(Kaprekar(i)) # 함수를 통과한 값은 셀프넘버가 아니므로 리스트에 추가\n\nfor i in range(1, N + 1): # 1부터 N까지 순회하면서 \n if i in not_self_numbers: # 순회중인 i가 셀프넘버가 아닌 값이면\n continue # 지나가고\n else: # 셀프넘버이면\n print(i) # 출력" }, { "alpha_fraction": 0.5553505420684814, "alphanum_fraction": 0.5664206743240356, "avg_line_length": 21.625, "blob_id": "6866b7ac06b560a1c19e07e04b50482992001c05", "content_id": "b2f657285aea0b24f2f8816b1b2e43ca6829ca6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 38, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/04/0413/홀짝 칵테일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import combinations\n\nnums = list(map(int, input().split()))\nnums.sort(reverse=True)\nodd_drinks = []\neven_drinks = []\nfor i in range(1, len(nums)+1):\n tmp = list(combinations(nums, i))\n # print(tmp)\n for t in tmp:\n a = 1\n for j in t:\n # print(j)\n a *= j\n if a%2:\n odd_drinks.append(a)\n else:\n even_drinks.append(a)\nodd_drinks.sort(reverse=True)\neven_drinks.sort(reverse=True)\nif odd_drinks:\n print(odd_drinks[0])\nelse:\n print(even_drinks[0])" }, { "alpha_fraction": 0.4548022747039795, "alphanum_fraction": 0.5310734510421753, "avg_line_length": 19.882352828979492, "blob_id": "826f2e0b1f0d7be685d16d669370a2121c3fb13b", "content_id": "17ddaa4e5886a233da9c6e5d04d7ca4c2d087e0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/08/0817/터렛.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import math\n\nfor t in range(int(input())):\n x1, y1, r1, x2, y2, r2 = map(int, input().split())\n D = math.sqrt((x1-x2)**2 + (y1-y2)**2) # distance\n result = 0\n if D == 0 and r1 == r2: result = -1\n elif D == abs(r1-r2) or D == r1+r2: result = 1\n elif abs(r1-r2) < D < r1+r2: result = 2\n print(result)\n\n\"\"\"\n일치\n내접or외접\n두 점에서 접함\n접점이 없음\n\"\"\"" }, { "alpha_fraction": 0.4150943458080292, "alphanum_fraction": 0.5408805012702942, "avg_line_length": 15, "blob_id": "4db976a06b8552e9211984575f2439dd85eefb38", "content_id": "339afa166635ef03f3067fe8809b8fffa294d297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/23차시 2. 자료구조 – 리스트, 튜플 - 연습문제 25.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [12, 24, 35, 70, 88, 120, 155]\n\nresult = []\n\nfor i in range(len(arr)):\n if i+1 in (1, 5, 6):\n continue\n result.append(arr[i])\n\nprint(result)" }, { "alpha_fraction": 0.3530927896499634, "alphanum_fraction": 0.3814432919025421, "avg_line_length": 17.5238094329834, "blob_id": "e2db528c1480c4016cc9246fe3ea7f95a803c4f9", "content_id": "9ff41b953fd1ab9019498f67abbb1722e07fe3aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0828/크로아티아 알파벳.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ncroatia = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z=']\n\nword = sys.stdin.readline()\ni = 0\ncnt = 0\nwhile i < len(word)-1:\n try:\n if word[i:i+2] in croatia:\n cnt += 1\n i += 2\n elif word[i:i+3] in croatia:\n cnt += 1\n i += 3\n else:\n cnt += 1\n i += 1\n except:\n pass\nprint(cnt)" }, { "alpha_fraction": 0.43718594312667847, "alphanum_fraction": 0.46063652634620667, "avg_line_length": 21.148147583007812, "blob_id": "54edaa97c63bca5054a0217098d040de1eab138d", "content_id": "713cf37b692aa3b7f8e51735aa999e7a49bf450a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 45, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/10/1016/로또.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(x, cnt):\n if cnt == 6: # 고른 숫자가 6개면\n for i in range(k): # 고른 숫자들을 전체 조회할 것\n if visited[i]: # 고른 숫자가 있으면\n print(tmp[i], end=' ') # 출력\n print() # 줄바꿈\n return # 해당 재귀 종료\n \n for i in range(x, k): # 숫자를 하나씩 선택할건데\n visited[i] = 1 # 해당 숫자 선택\n dfs(i+1, cnt+1) # 다음 숫자 선택하러\n visited[i] = 0 # DFS\n # 갔다가 찍고 되돌아오는 부분\n\nwhile True:\n tmp = list(map(int, input().split()))\n \n if tmp[0] == 0:\n break\n\n k = tmp.pop(0)\n\n visited = [0] * k\n\n dfs(0, 0) # 0번째숫자부터, 고른숫자 0개부터\n\n print() # 매 케이스마다 줄바꿈" }, { "alpha_fraction": 0.6242774724960327, "alphanum_fraction": 0.647398829460144, "avg_line_length": 25, "blob_id": "5d02de840e1810bee1bc8859696aef301538ca82", "content_id": "8153b9946e9f4cc9719b256788318b07a3c23787", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/09/0923/주유소.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndistances = list(map(int, input().rstrip().split()))\nfuels = list(map(int, input().rstrip().split()))\nrefuel = []\nfuel_min = int(1e9)\nfor i in range(len(fuels)-1):\n if fuels[i] < fuel_min:\n refuel.append(i)\n fuel_min = min(fuel_min, fuels[i])\nrefuel.append(len(fuels)-1)\nresult = 0\nfor i in range(len(refuel)-1):\n result += sum(distances[refuel[i]:refuel[i+1]])*fuels[refuel[i]]\nprint(result)\n\n# https://www.acmicpc.net/problem/13305" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.60317462682724, "avg_line_length": 31, "blob_id": "6c9f1ec9b1179afb2db22d83dee28f8b321b9179", "content_id": "669aa37d9cdac846ad1d6eb75a09f87d863db0ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 48, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/12/1216/팰린드롬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "word = input()\nprint('true' if word == word[::-1] else 'false')" }, { "alpha_fraction": 0.375342458486557, "alphanum_fraction": 0.40547946095466614, "avg_line_length": 11.620689392089844, "blob_id": "f490660458e0370fec87bcbf961d418b46b51017", "content_id": "e15b2614de980a1cc5eceb4912d06050d4cf1742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 29, "num_lines": 29, "path": "/알고리즘/온라인저지/2022/02/0202/완전제곱수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "M = 0\nN = 0\n\nfor i in range(2):\n if i == 0:\n M = int(input())\n elif i == 1:\n N = int(input())\n\ni = 0\nminn = 0\nsumm = 0\n\nwhile True:\n i += 1\n pow = i ** 2\n if pow > N:\n break\n\n if pow >= M and pow <= N:\n if not minn:\n minn = pow\n \n summ += pow\n\nif minn:\n print(summ, minn)\nelse:\n print(-1)" }, { "alpha_fraction": 0.5176470875740051, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 27.44444465637207, "blob_id": "aa0168d82441c37bb6f8fabef8dc0b5fb7d9b688", "content_id": "7382192a602227f3eecf0e2f673d6ea44beb7f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/03/0320/출석 이벤트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nP = int(input())\nresult = P\nif N >= 5: result = min(result, P-500)\nif N >= 10: result = min(result, int(P*0.9))\nif N >= 15: result = min(result, P-2000)\nif N >= 20: result = min(result, int(P*0.75))\nif result < 0: result = 0\nprint(result)" }, { "alpha_fraction": 0.3758741319179535, "alphanum_fraction": 0.3916083872318268, "avg_line_length": 25, "blob_id": "7e78d2d3721326c4b6eeb84f3c425d2935b33d06", "content_id": "7a1bcbed171434aca047eb05209df4a41a0aeefd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/01/0122/베르트랑 공준.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = int(input())\n if N == 0: # 종료 조건\n break\n \n n = 2 * N # 2n까지 소수를 구할 것\n\n # 에라토스테네스의 체\n a = [False,False] + [True]*(n-1)\n primes = [] \n for i in range(2,n+1): \n if a[i]: \n primes.append(i) \n for j in range(2*i, n+1, i): \n a[j] = False \n\n # 2n까지 구한 소수들을 순회하다가\n for i in range(len(primes)):\n if primes[i] > N: # n보다 커지는 구간이 나오면\n # 소수 전체 길이에서 구간시작 인덱스 빼기\n print(len(primes)-i)\n break\n" }, { "alpha_fraction": 0.5144927501678467, "alphanum_fraction": 0.5362318754196167, "avg_line_length": 22.16666603088379, "blob_id": "ce3b70fb30164523498f445735fbf9deef507826", "content_id": "c217edc639b1569b71d6be5692bdbd88a76ee996", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 34, "num_lines": 6, "path": "/알고리즘/온라인저지/2021/07/0730/[기초-비트시프트연산] 정수 1개 입력받아 2배 곱해 출력하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = int(input())\n# 숫자를 비트단위로 밀어버리는 << >>\n# << : 숫자는 2진수이고 왼쪽으로 한 칸 밀면 값이 두배\n# >> : 오른쪽으로 한 칸 밀면 값이 반\n# <<로 n만큼 밀어주면 값이 2**n배\nprint(n << 1)" }, { "alpha_fraction": 0.5432432293891907, "alphanum_fraction": 0.5540540814399719, "avg_line_length": 29.91666603088379, "blob_id": "c7ce8376699516bddee8c368aa83a4d086b35aac", "content_id": "b97bbfe3f8ab86b75ec6203d36274f4e5c574882", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 536, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/07/0729/추월.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ndct = {input(): i for i in range(N)} # 터널에 들어간 순서\nresult = 0\ncheck = [0] * N # 나간 차 기록 list\nfor n in range(N): # 나가는 차 입력\n tmp = input() # 나간 차 번호판\n IN = dct[tmp] # 나간 차의 터널에 들어간 순서\n OUT = sum(check[:IN]) # 먼저 나간 차량 수\n # 내 앞 차량들의 수가 터널을 들어갈 때 보다 나올 때 더 적으면\n if OUT < IN: result += 1 # 추월차량\n check[IN] = 1 # 해당 차는 터널을 빠져나옴\nprint(result)" }, { "alpha_fraction": 0.4465116262435913, "alphanum_fraction": 0.47441861033439636, "avg_line_length": 21.275861740112305, "blob_id": "d4fc4dabb61772057e2a0bad918233a8765f6dba", "content_id": "2ce9a90dbb0b8aa8fabece610bf1379001e5d0d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 52, "num_lines": 29, "path": "/알고리즘/온라인저지/2023/01/0130/언더프라임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def prime_list(size):\n sieve = [True]*(size+1)\n for i in range(2, int(size**0.5)+1):\n if sieve[i]:\n for j in range(i+i, size+1, i):\n sieve[j] = False\n return [i for i in range(2, size+1) if sieve[i]]\n \ndef factorization(x):\n i = 0\n d = primes[i]\n answer = 0\n while d <= x:\n if x % d == 0:\n answer += 1\n x = x // d\n else:\n i += 1\n d = primes[i]\n return answer\n\n\nprimes = prime_list(int(1e6))\nA, B = map(int, input().split())\nresult = 0\nfor i in range(A, B+1):\n if factorization(i) in primes:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5034482479095459, "alphanum_fraction": 0.5241379141807556, "avg_line_length": 30.071428298950195, "blob_id": "06289fbfbae63f6e9b36ad9db0c8e7e31ad7e8d4", "content_id": "272a8dc4c42092081f93f2e9b133b763c4e46720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 74, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/04/0417/덩치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nwnhs = [] # 덩치 리스트\nresult = [] # 덩치 순위 리스트\nfor n in range(N): # N명의\n wnhs.append(tuple(map(int, input().split()))) # 덩치 입력\n# N * N \nfor i in range(N): # 5명의 덩치를 비교\n cnt = 1 # 덩치를 비교하기 전까지는 모두가 덩치 1등\n for j in range(N):\n # 본인보다 덩치가 큰 사람이 있다면\n if i != j and wnhs[i][0] < wnhs[j][0] and wnhs[i][1] < wnhs[j][1]:\n cnt += 1 # 덩치 순위 +1\n result.append(cnt) # 덩치 순위 추가\nprint(*result) # 출력\n" }, { "alpha_fraction": 0.4611872136592865, "alphanum_fraction": 0.465753436088562, "avg_line_length": 15.923076629638672, "blob_id": "0360dc695cbb6f3df34b3f8bbd4f3d819d225da5", "content_id": "45ee1628f5cd5b2014cd257ca784bb976959e9e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0713/초6 수학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0:\n a, b = b, a%b\n return a\n\ndef lcm(a, b):\n return a * b // gcd(a, b)\n\n\nT = int(input())\nfor t in range(T):\n a, b = map(int, input().split())\n print(lcm(a, b), gcd(a, b))" }, { "alpha_fraction": 0.6290322542190552, "alphanum_fraction": 0.6344085931777954, "avg_line_length": 22.375, "blob_id": "b09966a700262a2e7d52e1556dce7e7280ef0c46", "content_id": "ffe128c874e93f0744925cc3c78a03e8db3e5aa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 54, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/09/0926/주차의 신.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n input()\n parking = list(map(int, input().rstrip().split()))\n print((max(parking)-min(parking))*2)" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5540540814399719, "avg_line_length": 36.5, "blob_id": "d068498d57067002d6b4a9d7abd0b7c50f8a764a", "content_id": "45369a96aab0c046dff54ae2e956e53a2d8c5bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/02/0201/태보태보 총난타.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "taebo = input().split('(^0^)')\nfor t in taebo:print(t.count('@'), end=' ')" }, { "alpha_fraction": 0.44485294818878174, "alphanum_fraction": 0.4595588147640228, "avg_line_length": 17.133333206176758, "blob_id": "2bff4bba422a29e81afd0c499d371eb13617907d", "content_id": "1d2ed8a1dbd88d007d2b53d1ae54b0e7b915c031", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/12/1213/Yangjojang of The Year.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor tc in range(T):\n N = int(input())\n\n YOTY = ''\n YOTY_drink = 0\n\n for n in range(N):\n univ = input().split()\n if int(univ[1]) > YOTY_drink:\n YOTY_drink = int(univ[1])\n YOTY = univ[0]\n \n print(YOTY)\n" }, { "alpha_fraction": 0.5190274715423584, "alphanum_fraction": 0.5528541207313538, "avg_line_length": 27.696969985961914, "blob_id": "337f3d6032a84f3f6f256e26fa64e9529febc53d", "content_id": "0af2cfe37964ac2ca47cea61c259c80e7d0811fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/03/0321/지름길.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n세준이는 지름길을 타고 이동하지 않는다\n그저 일자로 직진하면서\n지름길이 있는지, 있다면 그 지름길을 지나갈 때\n운전거리가 얼마나 줄어드는지\"만\" 확인한다\n세준이는 일자로 간다\n\"\"\"\nfrom collections import deque\nN, D = map(int, input().split())\ntmp = [list(map(int, input().split())) for _ in range(N)]\ntmp.sort(key=lambda x:x[0]) # 출발점 기준 정렬\nroad = deque(tmp) # 지름길들을 담아놓음\n# 문제의 조건 : D < 10000\ndp = [int(1e4) for _ in range(D+1)] # 1e4 = 10000.0, 인덱스 위한 D+1\ndp[0] = 0 # 시작은 0\nfor i in range(D): # i = 세준이 위치\n # 현재 위치에서 갈 수 있는 지름길이 있을 때\n while road and road[0][0] == i:\n x = road.popleft() # 지름길을 꺼내와서\n if x[1] > D: # 목적지를 넘어가면\n continue # 패스\n # 지름길의 끝에 거리 갱신\n dp[x[1]] = min(dp[i] + x[2], dp[x[1]])\n # print(i, x[1], x[2]) # 디버깅\n # print(dp) # 디버깅\n # 다음 칸의 거리는, \n # min(이전 칸에서 한 칸 이동한 거리, 지름길 타고 넘어온 거리)\n dp[i+1] = min(dp[i] + 1, dp[i+1])\n# print(dp) # 디버깅\n# 세준이는 지름길을 타고 이동하지는 않았지만\n# 일자로 이동하면서 파악한 지름길들을\n# 적절히 활용하여 목적지에 도착할 수 있는 최소거리를\nprint(dp[D]) # 출력" }, { "alpha_fraction": 0.45771142840385437, "alphanum_fraction": 0.4776119291782379, "avg_line_length": 11.5625, "blob_id": "6b44f1c26bc1694260a900740cec958cc533fbb0", "content_id": "d2294410e46b0bfc5e3ebff1936a7a471da62a61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 33, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/08/0818/factorial.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\ndef factorial(n):\n if n == 1:\n return n\n else:\n return n * factorial(n-1)\n\nprint(factorial(10))\n\n\"\"\"\n재귀함수\ndef 재귀():\n if 기저:\n return \n else: 기저 아닐 때\n return \n\"\"\"" }, { "alpha_fraction": 0.5607843399047852, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 18.69230842590332, "blob_id": "ea47adba7c599fa078efed07cc42df5d61a30cfd", "content_id": "3780edbfff855187e8c2ea17809f8031268ea7d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/알고리즘/온라인저지/2023/03/0310/터널의 입구와 출구.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nM = int(input())\nresult = M\ntunnel = M\nzero = False\nfor n in range(N):\n A, B = map(int, input().split())\n tunnel += A\n tunnel -= B\n result = max(result, tunnel)\n if tunnel<0:\n zero = True\nprint(0 if zero else result)" }, { "alpha_fraction": 0.3801652789115906, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 16.428571701049805, "blob_id": "93a89efe6cf24f975835fc33fbe557ddf3ac1963", "content_id": "80ebc00fe5083bddb4ad90b44e732aa68cc3d698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/08/0817/n자리m진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = 3\nm = 5\nlist1 = [0] * n\nfor i in range(n-1, -1, -1):\n for j in range(m):\n list1[i] = j\n print(list1)" }, { "alpha_fraction": 0.4235033392906189, "alphanum_fraction": 0.47671839594841003, "avg_line_length": 18.65217399597168, "blob_id": "57de5c6c3ad49f19a73bd2362dc4941abfb2e509", "content_id": "37d2d331e7763b95045dcc0855bc3648e3777501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 50, "num_lines": 23, "path": "/알고리즘/온라인저지/2023/04/0430/사격 내기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "targets = [2**i for i in range(10)]\nscores = [[0]*10 for _ in range(2)] # A, B\nA, B = map(int, input().split())\n\nfor i in range(9, -1, -1):\n target = targets[i]\n\n if A>=target:\n scores[0][i] = 1\n A -= target\n\n if B>=target:\n scores[1][i] = 1\n B -= target\n\nresult = 0\n\nfor i in range(10):\n C, D = scores[0][i], scores[1][i]\n if (C == 0 and D == 1) or (C == 1 and D == 0):\n result += 2**i\n\nprint(result)" }, { "alpha_fraction": 0.5214285850524902, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 22.5, "blob_id": "2c343fed58989b578dd151f5ffb06ffd9f26faa9", "content_id": "76c0e12ba87461bd5a2e8ebd3c608b00364a4f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/04/0421/등차수열에서 항 번호 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, d, k = map(int, input().split())\nresult = 1\nif not (not (k-a)%d and (k-a)//d>=0): result = 'X'\nelse:\n result += (k-a)//d\nprint(result)" }, { "alpha_fraction": 0.4129793643951416, "alphanum_fraction": 0.4410029351711273, "avg_line_length": 20.90322494506836, "blob_id": "6063291ea43f7d10fe605f7e76e63e8b23cbea94", "content_id": "da1c175bd9ba8bc0cfd99e8f2e72439d5ce592c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 69, "num_lines": 31, "path": "/알고리즘/온라인저지/2023/01/0114/소수의 연속합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def prime_list(size):\n sieve = [True]*(size+1)\n for i in range(2, int(size**0.5)+1):\n if sieve[i]:\n for j in range(i+i, size+1, i):\n sieve[j] = False\n return [i for i in range(2, size+1) if sieve[i]]\n\nN = int(input())\nif N == 1: \n print(0)\nelse:\n primes = prime_list(N)\n s, e, now = 0, 0, primes[0]\n max_len = len(primes)-1\n result = 0\n\n while 1:\n if (now < N and e == max_len) or (now > N and s == max_len): \n break\n\n if now == N: result += 1\n\n if now < N:\n e += 1\n now += primes[e]\n else:\n now -= primes[s]\n s += 1\n\n print(result)" }, { "alpha_fraction": 0.45562130212783813, "alphanum_fraction": 0.48520711064338684, "avg_line_length": 20.25, "blob_id": "d01f224b12ab11fe1bb6b01e606aa0c0a313fed9", "content_id": "faa4cb73ff00fe6efb934eb4118157c22a3558e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/알고리즘/[템플릿]/[SAMPLE]/요세푸스 점화식.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def josephus(N, K):\n if N == 1:\n return 1\n else:\n return ((josephus(N-1, K) + K - 1) % N) + 1\n\nN, K = map(int, input().split())\nprint(josephus(N, K))" }, { "alpha_fraction": 0.44131454825401306, "alphanum_fraction": 0.5164319276809692, "avg_line_length": 16.83333396911621, "blob_id": "77162f597988f1cd5c68f752ba3f351b923a1d35", "content_id": "3eb225596b2edab4c87d42f51e7fdb48a3b526c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 43, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0925/피보나치는 지겨웡~.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nmod = int(1e9)+7\ndp = [0] * (N+30)\ndp[0], dp[1] = 1, 1\nif N>1:\n for i in range(2, N+1):\n dp[i] = (dp[i-1] + dp[i-2] + 1)%mod\nprint(dp[N])" }, { "alpha_fraction": 0.5226730108261108, "alphanum_fraction": 0.5680190920829773, "avg_line_length": 23.705883026123047, "blob_id": "c39f32e1a111a6514d83f81d806d3b3e6657b395", "content_id": "c40715d5e7a78b6aac50bd161fbbc600c031fa6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 69, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/10/1014/숫자 맞추기 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ni = 0\nwhile True:\n i += 1\n N = int(input())\n if N == 0: break\n result = [N]\n result.append(3*result[0])\n if not result[1]%2: result.append(result[1]//2)\n else: result.append((result[1]+1)//2)\n result.append(3*result[2])\n result.append(result[3]//9)\n odd_or_even = ['even', 'odd']\n print('{}. {} {}'.format(i, odd_or_even[result[1]%2], result[4]))" }, { "alpha_fraction": 0.5649546980857849, "alphanum_fraction": 0.5709969997406006, "avg_line_length": 22.714284896850586, "blob_id": "5a6d2af43eb0b99afe6aee4ba1acea1c10e42a76", "content_id": "311d1ed22c5fa7099f0ca8756e5984c4508bde93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/07/0729/회사에 있는 사람.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncompany = dict()\nresult = []\nfor n in range(N):\n name, el = input().split() # name, enter or leave\n if el == 'enter':\n company[name] = 1\n else: # el == 'leave\n company[name] = 0\nfor c in company:\n if company[c]:\n result.append(c)\nresult.sort(reverse=True)\nfor r in result: print(r)" }, { "alpha_fraction": 0.4114285707473755, "alphanum_fraction": 0.4342857003211975, "avg_line_length": 16.600000381469727, "blob_id": "7e784ab2b441b4d2715de9d992295f0d9c209fe7", "content_id": "d265a542ec94b42237a4858ae33bfd461f72b59d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/04/0416/골뱅이 찍기 - ㅁ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor _ in range(N):\n print('@'*5*N)\n\nfor _ in range(3):\n for _ in range(N):\n print('@'*N + ' '*N*3 + '@'*N)\n\nfor _ in range(N):\n print('@'*5*N)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5067567825317383, "avg_line_length": 11.416666984558105, "blob_id": "95e8fdb6d0c7e348dd586bf8641d859b05b01d99", "content_id": "26d4f7d2ab2731c99c92fbe6e9749ae0409e9744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/03/0304/X に最も近い値 (The Nearest Value).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "X, L, R = map(int, input().split())\n\nresult = 0\n\nif X < L:\n result = L\nelif L <= X < R:\n result = X\nelif X >= R:\n result = R\n\nprint(result)" }, { "alpha_fraction": 0.5120290517807007, "alphanum_fraction": 0.5265547037124634, "avg_line_length": 30.042253494262695, "blob_id": "e8ca497a22fb1acf8a146a8d32c6b0d1c9532e69", "content_id": "2614ee9a9dc9c1182908090ab43282e5d07b38a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3597, "license_type": "no_license", "max_line_length": 72, "num_lines": 71, "path": "/알고리즘/온라인저지/2021/10/1003/연구소.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 모든 0에 벽을 세 개 세워보고 가장 안전영역을 많이 확보가능한게 정답\nimport copy # 2차원 배열 복사를 위한 import copy, deepcopy를 사용함\nfrom collections import deque # bfs로 바이러스를 퍼뜨릴 때 사용할 deque\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\nres = 0 # 결과값 초기화\n\ndef virus(): # 바이러스가 퍼진다!\n global res # 함수 밖에 있는 전역변수 res를 사용\n\n tmp = copy.deepcopy(arr) # 입력받은 배열 arr을 완전히 복제할 것\n # 벽을 세우고 허물고 할건데\n # 매번 세우는 벽의 위치가 바뀌는데 원본 배열을 들고 수정하기보다\n # 배열을 복사해서 백업을 따놓고 \n # \"여기여기 벽을 세운거로는 바이러스를 못막는구나\"는 결과가 확인이 되면\n # 해당 복사는 지우고 다시 원본 배열을 복사해서 또 벽을 세우고\n # 그 벽에 바이러스 퍼뜨려보고를 반복\n # 그런데 2차원 배열을 복사해야 하기 때문에 deepcopy를 사용함\n\n for i in range(N): # 바이러스 시작점을\n for j in range(M): # 찾자\n if tmp[i][j] == 2: # 해당 칸이 바이러스면\n q.append([i, j]) # 이동할 칸에 추가\n # 배열의 처음 바이러스 위치를 q에 담음\n # 이후 아래 while문을 반복하면서 바이러스를 퍼뜨림\n \n while q: # 바이러스가 퍼질 곳이 남아있다면\n x, y = q.popleft() # 맨 앞에 좌표부터 꺼내서\n for i in range(4): # 상하좌우 전부 확인\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < N and 0 <= ny < M: # 연구소를 벗어나지 않는 범위에서\n if tmp[nx][ny] == 0: # 바이러스를 퍼뜨릴 수 있으면\n tmp[nx][ny] = 2 # 받아라 바이러스!\n q.append([nx,ny]) # 해당 좌표의 상하좌우에 또 바이러스가 퍼지도록 q에 담기\n\n cnt = 0 # 지금 세워진 벽에 대하여 바이러스가 얼마나 퍼졌는지 확인하는 변수 초기화\n \n for t in tmp:\n cnt += t.count(0) # 연구소 해당 줄에서 확인된 안전구역의 수\n \n res = max(res, cnt) # 전역변수 res와 비교하여 지금 세워진 벽에서의 안전구역이 더 많은지 비교하고 저장\n # if cnt > res: 와 같은 문장을 max()를 사용하였음\n\ndef wall(x):\n if x == 3: # 벽을 세 개 세웠다면\n virus() # 바이러스를 퍼뜨려보자!\n return # 바이러스를 다 퍼뜨리고 해당 재귀함수 종료, 다시 다른 벽을 세워보자\n \n for i in range(N):\n for j in range(M):\n if arr[i][j] == 0: # 벽을 세울 수 있으면\n arr[i][j] = 1 # 벽을 세우고\n wall(x+1) # 다음 벽을 세우자\n arr[i][j] = 0 # 다른 위치에 벽을 세울 수 있도록 위에서 세웠던 벽을 허물자\n\n\nN, M = map(int, input().split()) # 연구소의 가로와 세로 크기\n\narr = [list(map(int, input().split())) for _ in range(N)] # 연구소 상황\n\nq = deque() # 바이러스를 퍼뜨릴 q\n\nwall(0) # 벽 0개, 벽 1개, 벽 2개, 벽 3개, 그 시작은 0개\n\nprint(res) # 3개의 벽을 세운 각각의 경우에 대하여 얻은 안전구역의 수가 \n# 기존에 얻은 안전구역의 수 보다 클 때마다 갱신해주어서\n# 결국 벽을 세우는 모든 경우의 수에서 퍼뜨려 본 바이러스에 대해 확인된 안전구역의 최대 수가 담겨있는\n# res를 출력" }, { "alpha_fraction": 0.3029460906982422, "alphanum_fraction": 0.40744858980178833, "avg_line_length": 28.508195877075195, "blob_id": "d23b424b695ddca33848acb446292289d6eceafa", "content_id": "77dc69334f426303535d90ba79f99570fa121ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1871, "license_type": "no_license", "max_line_length": 72, "num_lines": 61, "path": "/알고리즘/SW역량테스트/2021.10.19 A형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n N = int(input())\n arr = list(map(int, input().split()))\n \n result = []\n\t# 제곱까지 갈 필요없다 \n # A + B + C + D 가 가장 크면 결과값이 가장 클거다\n # 결과값만 (A+B)**2 + (C+D)**2 를 출력해주면 된다\n \n stack = []\n tmp = 0\n \n visited = [0] * N\n for i in range(N-6):\n stack.append(arr[i])\n visited[i-1], visited[i], visited[i+1] = 1, 1, 1\n for j in range(i, N-4):\n if visited[j] == 0:\n stack.append(arr[j])\n visited[j-1], visited[j], visited[j+1] = 1, 1, 1\n for k in range(j, N-2):\n if visited[k] == 0:\n stack.append(arr[k])\n visited[k-1], visited[k], visited[k+1] = 1, 1, 1\n for l in range(k, N):\n if visited[l] == 0:\n stack.append(arr[l])\n if sum(stack) > tmp:\n tmp = sum(stack)\n result = [s for s in stack]\n stack.pop()\n visited[k-1], visited[k], visited[k+1] = 0, 0, 0\n stack.pop()\n visited[j-1], visited[j], visited[j+1] = 0, 0, 0\n stack.pop()\n visited[i-1], visited[i], visited[i+1] = 0, 0, 0\n stack.pop()\n A = result[0]\n B = result[1]\n C = result[2]\n D = result[3]\n ans1 = (A+B)**2 + (C+D)**2\n ans2 = (A+D)**2 + (B+C)**2\n print('#{} {}'.format(t, (max(ans1, ans2))))\n\n\"\"\"\ninput↓\n5\n10\n80 90 65 55 90 60 40 35 30 25\n8\n30 25 70 55 95 75 90 20\n10\n60 85 45 25 15 70 55 70 85 35\n15\n80 30 35 95 45 85 30 25 100 85 10 60 80 30 5\n20\n45 30 5 85 55 85 10 5 75 60 15 65 45 50 75 80 15 10 50 90\n\"\"\"" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5131579041481018, "avg_line_length": 18.08333396911621, "blob_id": "428207c8cf1ee70c7ee238b38e71097991566a36", "content_id": "3495bb730745928ad09ef6a3a88ee66ca990c0d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0927/LCM.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef gcd(a, b):\n while a%b: a, b = b, a%b\n return b\n\nfor _ in [0]*int(input().rstrip()):\n a, b = map(int, input().rstrip().split())\n G = gcd(a, b)\n print(G * (a//G) * (b//G))" }, { "alpha_fraction": 0.35199999809265137, "alphanum_fraction": 0.3733333349227905, "avg_line_length": 28.431371688842773, "blob_id": "b466e0cfb7ad746b112160b5d36d5d0e19ee6ea8", "content_id": "6aa886b707e82f1b1194d8f5819883bcec187dad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 71, "num_lines": 51, "path": "/알고리즘/온라인저지/2021/10/1016/섬의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndx = [-1, 1, 0, 0, -1, 1, -1, 1] # 8방향\ndy = [0, 0, -1, 1, -1, -1, 1, 1]\n\nwhile True: # 0 0 을 받을때까지\n w, h = map(int, input().split())\n if w+h == 0: # 종료조건\n break\n\n arr = [] # 섬과 육지\n\n for i in range(h):\n arr.append(list(map(int, input().split())))\n \n q = deque()\n cnt = 0 # 섬의 개수 초기화\n\n # 배열 전체를 확인하면서\n # 육지가 나오면, 그 육지를 바다로 바꿔버리고\n # 주변 8칸에 또 육지가 있는지 즉, 이어져 있는 섬인지 확인할 것\n for i in range(h):\n for j in range(w): # 배열을 돌면서\n if arr[i][j]: # 육지를 찾았다?\n q.append((j, i)) # 탐색할 육지에 추가\n \n while q: # 탐색할 육지가 있을 때\n tmp = q.popleft() # 육지 좌표\n a = tmp[0]\n b = tmp[1]\n\n if arr[b][a] == 0: # 가지치기 조건\n # 탐색하려던 육지좌표였지만\n # 다른 육지를 탐색하던 중 바다로 만들었다면\n continue\n\n arr[b][a] = 0 # 해당 육지를 바다로 만들어버리자\n\n for k in range(8): # 8방향 BFS\n nx = a+dx[k]\n ny = b+dy[k]\n\n if 0 <= nx < w and 0 <= ny < h and arr[ny][nx]:\n # 범위를 벗어나지 않고, 8방향중 하나로 이동할 수 있는 육지면\n # 즉 이어져 있는 섬이면\n q.append((nx, ny)) # 탐색할 육지에 추가\n \n cnt += 1 # while문을 통해 섬을 전부 바다로 만들었다\n # 해당 섬은 사라졌고, 우리는 섬이 하나 존재하였음을 확인할 수 있음\n\n print(cnt) # 확인된 섬의 개수" }, { "alpha_fraction": 0.4591836631298065, "alphanum_fraction": 0.5102040767669678, "avg_line_length": 16.909090042114258, "blob_id": "cd5f995fb6ac426c5ddf43031f89092e2d7a6112", "content_id": "2db238478f8b0dcd8f134b8fca176992f7a2013d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/알고리즘/온라인저지/2021/08/0818/피보나치 DP.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def fibo(n):\n table[0] = 0\n table[1] = 1\n for i in range(2, n+1):\n table[i] = table[i-1] + table[i-2]\n return table[n]\n\nn = int(input())\ntable = [0] * (n+1)\nfibo(n)\nprint(table)" }, { "alpha_fraction": 0.48480844497680664, "alphanum_fraction": 0.5178335309028625, "avg_line_length": 38.894737243652344, "blob_id": "5a0c286545912e91c0322d1a679639448076947d", "content_id": "b791b4f7e42266ff0e62b35b55e2ade764f34e44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/02/0218/동전 바꿔주기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nk = int(input())\n\ncoin = [(0, 0)] + [tuple(map(int, input().split())) for _ in range(k)]\n\ndp = [[0 for _ in range(T+1)] for _ in range(k+1)] # 가로 세로 첫 줄은 0원\ndp[0][0] = 1 # 0원을 만들 수 있는 경우는 1\n\nfor i in range(1, k+1): # 1번 동전, 2번 동전, i번 동전, ..., k번 동전\n val, cnt = coin[i] # 동전 가치, 동전 개수\n for cost in range(T+1): # 0원부터 T원까지, i번 동전을 사용하여 만들 수 있는 지 확인\n dp[i][cost] = dp[i-1][cost] # 앞서 i-1번 동전을 사용한 경우를 미리 불러오기\n for c in range(1, cnt+1): # val원의 동전 1개, 2개, c개, ..., cnt개\n if cost-c*val >= 0: # dp갱신 최소 조건\n dp[i][cost] += dp[i-1][cost-c*val] # i번 동전을 c개 사용하여 갱신할 수 있는 지점 확인\n else: # cost원을 i번 동전을 사용하여 갱신할 수 없을 경우에\n break\n\nprint(dp[k][T]) # k개 종류의 동전을 사용하여 T원을 만들 수 있는 경우 출력" }, { "alpha_fraction": 0.4470588266849518, "alphanum_fraction": 0.5235294103622437, "avg_line_length": 21.217391967773438, "blob_id": "9c60d8546374beebadf583a3b641b66324185435", "content_id": "8ed6bb7e1450dc849d5a2e1c201cbd62754ab930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/09/0930/투명.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef cover(x1, y1, x2, y2):\n for i in range(y1, y2):\n for j in range(x1, x2):\n arr[i][j] += 1\n\ndef check():\n result = 0\n for i in range(100):\n for j in range(100):\n if arr[i][j]>M: result += 1\n return result\n\nN, M = map(int, input().rstrip().split())\narr = [[0]*100 for _ in [0]*100]\nfor n in range(N):\n x1, y1, x2, y2 = map(int, input().rstrip().split())\n x1, y1 = x1-1, y1-1\n cover(x1, y1, x2, y2)\nprint(check())" }, { "alpha_fraction": 0.4022066295146942, "alphanum_fraction": 0.4403209686279297, "avg_line_length": 16.821428298950195, "blob_id": "56de27a6c50eb6ff30fda4a2d7e790024a6ead59", "content_id": "7745686dc6aacf67012a4de2fe9e9ef67a12b7f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 59, "num_lines": 56, "path": "/알고리즘/온라인저지/2023/03/0326/소문난 칠공주.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\n\ndef check(arr):\n visited = [[1]*5 for _ in range(5)]\n\n for a in arr:\n y, x = a\n visited[y][x] = 0\n \n Q = deque([(arr[0])])\n y, x = arr[0]\n visited[y][x] = 1\n cnt = 1\n\n while Q:\n y, x = Q.popleft()\n\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k]\n\n if 0<=ny<5 and 0<=nx<5 and not visited[ny][nx]:\n visited[ny][nx] = 1\n cnt += 1\n Q.append((ny, nx))\n\n return True if cnt == 7 else False\n\n\ndef DFS(depth, idx, YDY_cnt):\n global result\n\n if YDY_cnt>=4:\n return\n \n if depth == 7:\n if check(arr):\n result += 1\n return\n \n for i in range(idx, 25):\n y, x = i//5, i%5\n arr.append((y, x))\n DFS(depth+1, i+1, YDY_cnt+(students[y][x]=='Y'))\n arr.pop()\n\n\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\nstudents = [input() for _ in range(5)]\nresult = 0\narr = []\n\nDFS(0, 0, 0)\n\nprint(result)" }, { "alpha_fraction": 0.3855421543121338, "alphanum_fraction": 0.42168673872947693, "avg_line_length": 12.833333015441895, "blob_id": "a0287260e483bfcfd22abc5f83b6f18ec381a343", "content_id": "a4c4f1302cda1fe46e86647f3171462bca1edc1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/01/0131/윷놀이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "status = {\n 3: 'A',\n 2: 'B',\n 1: 'C',\n 0: 'D',\n 4: 'E',\n}\n\nfor t in range(3):\n yoot = sum(list(map(int, input().split())))\n\n print(status[yoot])\n" }, { "alpha_fraction": 0.5993788838386536, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 23.846153259277344, "blob_id": "b4dd59ad31b139590cd5fe8a186685fc05fd712d", "content_id": "339424802e35ba25baad225d69b201bb73ac3c61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 46, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0927/생일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nstudents = []\nfor _ in [0]*int(input().rstrip()):\n student = input().rstrip().split()\n name = [student[0]]\n birthday = list(map(int, student[1:]))\n students.append(name+birthday)\nstudents.sort(key=lambda x:(x[3], x[2], x[1]))\nprint(students[-1][0])\nprint(students[0][0])" }, { "alpha_fraction": 0.5993788838386536, "alphanum_fraction": 0.6055900454521179, "avg_line_length": 22, "blob_id": "f7f9719db4d0990842179ad982a603f13b5e2fbb", "content_id": "4edfda9f41d66242116e5d1dc7f971f1865d86c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/08/0806/최댓값.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n숫자를 다 받아서\n리스트에 넣고\n최댓값 찾아서\n리스트 순회하면서\n최댓값과 일치하는 요소의 인덱스(길이)\n\"\"\"\nnumbers = [] # 입력받을 리스트 초기화\nfor i in range(9):\n numbers.append(int(input())) # Enter단위로 값들 입력받기\nfor i in range(len(numbers)): # 리스트를 순회\n if numbers[i] == max(numbers): # 최댓값과 순회중인 값이 같으면\n print(numbers[i]) # 값 출력\n print(i + 1) # 인덱스 출력\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5450819730758667, "avg_line_length": 21.272727966308594, "blob_id": "013500daa75a28d9df9280f425be067085431846", "content_id": "3aa32378aa3f76ba13012d6bd642be2c152c4c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/09/0927/UCPC는 무엇의 약자일까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nUCPC = ('U', 'C', 'P', 'C')\nsen = input().rstrip()\ni, j = 0, 0 # sen, UCPC\nfor i in range(len(sen)):\n if j == 4: break\n if sen[i] == UCPC[j]: j += 1\nprint('I love UCPC' if j == 4 else 'I hate UCPC')" }, { "alpha_fraction": 0.5296167135238647, "alphanum_fraction": 0.5644599199295044, "avg_line_length": 18.16666603088379, "blob_id": "630ea95461ddb7917c077c3276b5a3275079e936", "content_id": "1308689d224d9e0af4c833909361ba79370d6145", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 842, "license_type": "no_license", "max_line_length": 39, "num_lines": 30, "path": "/알고리즘/온라인저지/2022/08/0809/IOIOI.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nM = int(input())\nS = input().rstrip()\nresult = 0\nlst = []\nfor i in range(len(S)):\n if S[i] == 'I': lst.append(i)\ncnt = 0\nfor i in range(len(lst)-1):\n if lst[i+1] - lst[i] == 2: cnt += 1\n else: cnt = 0\n if cnt >= N: result += 1\nprint(result)\n\n\"\"\"\n시간 : 1시간 초과, 정답 풀이 확인\n풀이\n 제한 시간 1초, 3<=M<=1,000,000 이므로\n O(n) 이하로 코드 동작을 마쳐야 한다\n 전체 문자열을 한 번만 탐색한다\n I의 인덱스만 모은 리스트를 생성하여\n 리스트에서 두개씩 원소를 비교할 때\n 앞 원소와 뒷 원소의 차가 2이면 'IOI'이다\n 해당 패턴이 N번 이상일 경우 문제에서 원하는 정답이며\n 패턴의 연속됨이 끊어질 시, 카운트를 0으로 돌린다\n\"\"\"" }, { "alpha_fraction": 0.416879802942276, "alphanum_fraction": 0.4578005075454712, "avg_line_length": 36.28571319580078, "blob_id": "b3fc416b28ab2adb3f6bf3f2277d199c06b7ef21", "content_id": "ee44b5ac630e1c264d7efc125e6f2af612d45326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "no_license", "max_line_length": 62, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/07/0705/어린 왕자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(int(input())):\n x1, y1, x2, y2 = map(int, input().split())\n result = 0\n for n in range(int(input())): # 각 행성을 입력\n cx, cy, cr = map(int, input().split())\n d1 = (x1-cx)**2 + (y1-cy)**2 # 출발점과 행성 중심간의 거리\n d2 = (x2-cx)**2 + (y2-cy)**2 # 도착점과 행성 중심간의 거리\n d3 = cr**2 # 행성 반지름의 제곱\n if d3 > d1 and d3 > d2: # 같은 행성 안에 있으면\n pass # 행성 진입/이탈할 필요 없음\n \"\"\"\n 위의 if문을 거치면서 아래 세가지 경우만 남게됨\n 1. 출발점 도착점 다 행성 밖에 있는 경우, 진입/이탈 없이 행성 사이를 움직일 수 있음\n 2. 출발점은 행성 안에, 도착점은 행성 밖에 있는 경우, 진입/이탈이 필요함\n 3. 출발점은 행성 밖에, 도착점은 행성 안에 있는 경우, 진입/이탈이 필요함\n \"\"\"\n elif d3 > d1: # Case 2\n result += 1\n elif d3 > d2: # Case 3\n result += 1\n print(result)" }, { "alpha_fraction": 0.5817825794219971, "alphanum_fraction": 0.5905974507331848, "avg_line_length": 35.5, "blob_id": "eea6199cacd1f5b860638b05aa8f49d4d3ecd82e", "content_id": "216f7156a462c7bf4272d7c619f483ba8141feb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1413, "license_type": "no_license", "max_line_length": 82, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/08/0828/최단경로.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nimport heapq # 탐색 시 가장 작은 값을 탐색하기 위함\n\ninput = sys.stdin.readline\nINF = int(1e9) # 가상의 최대값\n\ndef dijkstra(start):\n distance[start] = 0 # 시작점에서 시작점으로 가는 최단 경로값은 0\n heapq.heappush(Q, (0, start))\n while Q:\n dist, now = heapq.heappop(Q) # 가장 작은 값을 heappop으로 꺼내기\n for next, weight in data[now]: # 현재 노드에서 갈 수 있는 노드와 가중치들을 순회하면서\n cost = dist + weight # 경로값은 들고 있는 경로값 + 가중치\n if cost < distance[next]: # 더 작은 경로값이 발견되면\n distance[next] = cost # 최소 경로값 갱신\n heapq.heappush(Q, (cost, next)) # 해당 경로와 다음 노드를 heappush로 가장 앞에 추가\n\nV,E = map(int, input().split())\nstart = int(input())\ndata = [[] for _ in range(V+1)]\nfor _ in range(E):\n u, v, w = map(int, input().split())\n data[u].append((v, w)) # 노드 u에서 갈 수 있는 노드v와 그 때의 가중치 w\ndistance = [INF]*(V+1) # 가상의 최대 경로값 배열\nQ = [] # heapq의 메소드만 사용함, deque와 다르게, 일반적인 리스트 선언\ndijkstra(start) # start부터 출발\nfor d in distance[1:]: # 0번은 인덱스 맞추기용\n print(d if d != INF else 'INF') # 값이 있으면 해당 경로값, 없으면 'INF'" }, { "alpha_fraction": 0.27731093764305115, "alphanum_fraction": 0.45798319578170776, "avg_line_length": 6.645161151885986, "blob_id": "76f37429951da3fc3b96b574ff6f9dec15b24f6a", "content_id": "0515a00605c5d4b36b1eddd4cc8c18c103990e6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 36, "num_lines": 31, "path": "/알고리즘/온라인저지/2022/02/0220/BABBA.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n0\n1\n10\n101\n10110\n\n1 1\n1 2\n2 3\n3 4\n5 5\n8 6\n13 7\n21 8\n34 9\n\n\"\"\"\n\ndp = [0, 1, 1]\n\nK = int(input())\n\n\nif K <= 2:\n print(dp[K-1], dp[K])\nelse:\n for i in range(3, K+1):\n dp.append(dp[i-1] + dp[i-2])\n\n print(dp[K-1], dp[K])\n\n" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 20.571428298950195, "blob_id": "a445ac1655a5ab010de8db283caa34f5ef8845ca", "content_id": "bd9fdd21d4147459afb08a274b3d3c39fe8fdd96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/09/0906/약수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\ncnt = input()\nmeasures = list(map(int, input().split()))\nprint(max(measures) * min(measures))" }, { "alpha_fraction": 0.44408944249153137, "alphanum_fraction": 0.4792332351207733, "avg_line_length": 21.39285659790039, "blob_id": "da3f588cc31ef7876c18d65114326d7ece2bf304", "content_id": "c8c2855bd3d46d54488879aaecdf4e7f51fcfa1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 52, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/09/0923/달팽이2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [0, 1, 0, -1], [1, 0, -1, 0] # 우 하 좌 상\n\ndef check_goal(y, x):\n flag = True\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M and not arr[ny][nx]: \n flag = False\n return flag\n\nN, M = map(int, input().rstrip().split())\nresult = 0\narr = [[0]*M for _ in range(N)]\ny, x = 0, 0\ni = 0\nwhile True:\n if check_goal(y, x): break\n arr[y][x] = 1\n ny, nx = y+dy[i], x+dx[i]\n if not (0<=ny<N and 0<=nx<M) or arr[ny][nx]:\n i = (i+1)%4\n ny, nx = y+dy[i], x+dx[i]\n result += 1\n y, x = ny, nx\nprint(result)" }, { "alpha_fraction": 0.487458199262619, "alphanum_fraction": 0.5367892980575562, "avg_line_length": 25.600000381469727, "blob_id": "d93a2c9ef1eae9a91d35a61c425c3d58469dd79f", "content_id": "cb2abe94c08ea03fad894c28e015757c199ea403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1764, "license_type": "no_license", "max_line_length": 60, "num_lines": 45, "path": "/알고리즘/온라인저지/2022/08/0816/외계인의 기타 연주.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, P = map(int, input().split()) # 놀랍게도 프렛은 영어도 Fret이다\nstack, result = [[] for _ in range(7)], 0 # 기타줄은 여섯줄 인덱스 맞추기\nfor n in range(N):\n S, F = map(int, input().split()) # string, fret\n if not stack[S]:\n stack[S].append(F)\n result += 1\n else: # stack[S]\n if F > stack[S][-1]:\n stack[S].append(F)\n result += 1\n elif F < stack[S][-1]:\n # 같거나 더 낮은 프렛을 만날 때 까지 pop\n while stack[S] and F < stack[S][-1]:\n stack[S].pop()\n result += 1\n # 누른 손을 모두 떼거나, 더 낮은 프렛을 만났을 경우\n if not stack[S] or stack[S][-1] < F:\n stack[S].append(F)\n result += 1\n else: # F == stack[-1]\n pass\nprint(result)\n\n\"\"\"\n예제 1번처럼 8 10 12 10 5 를 연주할 때\n실제로 외계인처럼 누른다면\n현실에선 5 8 10 12 -12 -10 -8 순으로 눌렀다 떼며 답이 7이 된다\n하지만 입력 순서상 5는 맨 뒤에 있으므로\n8을 입력받는 시점에서 5를 동시에 눌러도 되는지 알 방법이 없다\n때문에 컴퓨터적으로 사고하여, 마지막에 5를 추가하여도\n앞에서 눌렀던 셈 치는 코드가 필요하다\n위 코드는 아래 순서로 스택에 들어간다\n<8 10 12 -12 -10 -8 5>\n\n그리고 P의 범위가 300,000 이면서, 시간이 1초이므로\n50만줄의 입력을 받으면서 30만줄의 for문을 돌며\n다음 프렛을 찾아 인덱스를 1씩 다르게 하면서 탐색하면\n아마 시간초과가 날 것이다\n어떻게든, 스택에 넣은 값 자체를 특정할 방법이 필요하다\n\"\"\"" }, { "alpha_fraction": 0.5530864000320435, "alphanum_fraction": 0.5679012537002563, "avg_line_length": 27.928571701049805, "blob_id": "e6a730bc96ba79ef86cdf08137bed6de339a839e", "content_id": "56f0f9ed81e97092fc3e4de814dd163fef478223", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/07/0701/국영수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [] # 성적표 배열\nfor n in range(int(input())):\n tmp = input().split() # 성적을 입력받아서\n for i in range(4): # 성적을 int형으로 변환\n try: tmp[i] = int(tmp[i])\n except: pass\n arr.append(tmp) # 성적표에 추가\n\"\"\"\nlambda에서 정렬 자체를 역순서로 할 수는 없지만\n숫자의 경우 값을 음수(-)로 만들어주어 역정렬을 할 수 있음\n\"\"\"\narr.sort(key=lambda x:(-x[1], x[2], -x[3], x[0]))\n# 국어는 내림차순, 영어는 오름차순, 수학은 내림차순, 이름은 오름차순\nfor a in arr: print(a[0]) # 출력\n" }, { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.4313725531101227, "avg_line_length": 11.875, "blob_id": "27953f7bacf33bfd49c55f8cd077f9bc44426fbe", "content_id": "2d7b633b75247fd53bbc973b58aba81054a78612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/04/0420/Do Not Touch Anything.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "R, C, N = map(int, input().split())\nA = R//N\nB = C//N\nif R%N:\n A += 1\nif C%N:\n B += 1\nprint(A*B)" }, { "alpha_fraction": 0.4895104765892029, "alphanum_fraction": 0.503496527671814, "avg_line_length": 10.076923370361328, "blob_id": "ff6702f9bd9c541e47518944a4afe8f5d30baa52", "content_id": "56e7f3aaedc20022e5747e268b7c4a49c5eb81f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/02/0205/개수 세기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nnums = list(map(int, input().split()))\n\nv = int(input())\n\ncnt = 0\n\nfor n in nums:\n if n == v:\n cnt += 1\n\nprint(cnt)" }, { "alpha_fraction": 0.4139534831047058, "alphanum_fraction": 0.43255814909935, "avg_line_length": 10.368420600891113, "blob_id": "203df9bd5c722c2a01dcf2c1ba965b08869761df", "content_id": "8306c95e2519b1c9d869cd48f2b8c2440baebc3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 23, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/04/0429/골뱅이 찍기 - ㅂ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = '', ''\nN = int(input())\nB = '@'*N*5\nA = '@'*N+' '*N*3+'@'*N\nfor _ in range(N*2):\n print(A)\nfor _ in range(N):\n print(B)\nfor _ in range(N):\n print(A)\nfor _ in range(N):\n print(B)\n\n\"\"\"\nN*2\nN\nN\nN\n\"\"\"" }, { "alpha_fraction": 0.5045045018196106, "alphanum_fraction": 0.5180180072784424, "avg_line_length": 19.272727966308594, "blob_id": "7259ab5a0266e03c755b7ba4e385b478d1ea84ae", "content_id": "658cfca99f77129d49ca9c5aefbd719ecf0bad2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/05/0515/9진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def solution(n, q):\n rev_base = ''\n\n while n > 0:\n n, mod = divmod(n, q)\n rev_base += str(mod)\n\n return rev_base[::-1] \n # 역순인 진수를 뒤집어 줘야 원래 변환 하고자하는 base가 출력\n \nprint(solution(int(input()), 9))" }, { "alpha_fraction": 0.5092024803161621, "alphanum_fraction": 0.5153374075889587, "avg_line_length": 19.5, "blob_id": "f507a01a3e564b4ecadfa739fe964a692341cf00", "content_id": "e250fb777eb523ecd0fbdda35b2fd2a87aca0a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/07/0718/스네이크버드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, L = map(int, input().split())\nF = list(map(int, input().split())) # fruits\nF.sort()\n# print(F)\nfor i in range(len(F)):\n if L >= F[i]:\n L += 1\nprint(L)" }, { "alpha_fraction": 0.3952000141143799, "alphanum_fraction": 0.4607999920845032, "avg_line_length": 23.076923370361328, "blob_id": "e64a9cf0bad1968308abb2d0c09ae1669ed3578a", "content_id": "35432e00413a7350e379792f62638331312a493a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 839, "license_type": "no_license", "max_line_length": 44, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/07/0701/다음 순열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nM = list(map(int, input().split()))\nfind = False\n\"\"\"\nex) 1 2 3 5 4\n1. 뒤에서부터 숫자 두개씩 비교하다가\n2. 뒤에 숫자(5)가 앞에 숫자(3)보다 클 때\n3. 다시 맨 뒤에서부터 시작하여\n4. 2에서 비교하여 더 작았던 앞에 숫자(3)보다 큰 수(4)를 찾으면\n5. 둘(3, 4)의 자리를 바꿔줌 (1 2 3 5 4 -> 1 2 4 5 3)\n6. 찾았던 앞의 숫자보다 큰 수(4)의 뒤쪽을 오름차순 정렬\n7. 앞부분과 정렬된 뒷부분을 붙여서 출력\n\"\"\"\nfor i in range(N-1, 0, -1):\n if M[i-1] < M[i]:\n for j in range(N-1, 0, -1):\n if M[i-1] < M[j]:\n M[i-1], M[j] = M[j], M[i-1]\n M = M[:i] + sorted(M[i:])\n find = True\n break\n if find:\n print(*M)\n break\nif not find:\n print(-1)" }, { "alpha_fraction": 0.4789915978908539, "alphanum_fraction": 0.4789915978908539, "avg_line_length": 17.384614944458008, "blob_id": "8074689f4ae5b2edcb3a8d64bf2847a50000c767", "content_id": "eea30838651b793d58bd3eeddf644a2fe5c1d848", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 29, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0722/問題 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nC = dict() # change\nfor n in range(N):\n a, b = input().split()\n C[a] = b\n# print(C)\nM = int(input())\nfor m in range(M):\n tmp = input()\n try:\n print(C[tmp], end='')\n except:\n print(tmp, end='')" }, { "alpha_fraction": 0.5716292262077332, "alphanum_fraction": 0.608146071434021, "avg_line_length": 34.650001525878906, "blob_id": "bb0629795a0b51670cc09952b4bdf91ce479881f", "content_id": "5ce30a4eea87a21da6c806f5af0ccf381931c804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 107, "num_lines": 20, "path": "/알고리즘/[템플릿]/BFS/돌다리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nA, B, N, M = map(int, input().split())\nQ = deque() # BFS\nQ.append((N, 0)) # 시작점, 초기 이동횟수\nresult = 0\nvisited = [0] * 100001 # 돌다리 방문배열\nwhile Q: # 항상 도달할 수 있는 경우만 주어짐\n now, move = Q.popleft() # 현재 위치, 현재까지 이동한 횟수\n if visited[now]: continue # 방문한 적이 있는 돌다리면 continue\n visited[now] = 1 # 현재 돌다리를 방문함\n if now == M: # 목적지에 도착했을 때\n result = move # result = 가지고 있는 이동횟수\n break # while문 종료\n nexts = [now-1, now+1, now-A, now+A, now-B, now+B, now*A, now*B] # 6가지 경우의 다음 이동 돌다리\n for next in nexts:\n if 0<=next<=100000 and not visited[next]: Q.append((next, move+1)) # 돌다리 범위를 만족하며, 방문한 적 없는 돌다리 일 때\nprint(result)\n\n# https://www.acmicpc.net/problem/12761" }, { "alpha_fraction": 0.27020201086997986, "alphanum_fraction": 0.30050504207611084, "avg_line_length": 13.107142448425293, "blob_id": "8a2b493d0d8f18f56bcced407938e46ec7a2cba7", "content_id": "166b67762360623e07b0c1664dce0d533c46d062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 28, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/01/0131/분해합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nif N == 1:\n print(0)\nelse:\n t = 0\n if N >= 100:\n t = len(str(N)) * 10\n M = N - t\n else:\n M = 1\n\n while True:\n M += 1\n\n a = 0\n\n for i in str(M):\n a += int(i)\n \n temp = M + a\n\n if temp == N:\n print(M)\n break\n elif M == N:\n print(0)\n break\n\n" }, { "alpha_fraction": 0.5116279125213623, "alphanum_fraction": 0.5193798542022705, "avg_line_length": 25, "blob_id": "92f1ea5be288fb165f1d2a6d7dc4bca694bf866d", "content_id": "63ed72d7ba78b8d3e8a54ca0904235bc0eff6e66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/08/0811/영수증.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "X = int(input())\nfor n in range(int(input())):\n A, B = map(int, input().split())\n X -= A*B\nprint('Yes' if X == 0 else 'No')" }, { "alpha_fraction": 0.5787500143051147, "alphanum_fraction": 0.5912500023841858, "avg_line_length": 21.25, "blob_id": "f6d7040d8c7b10f5f2be29e13d4a4fb8b2ee7688", "content_id": "ba1afa8c9b5126ad93a7418a7417249cc777098b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 49, "num_lines": 36, "path": "/알고리즘/온라인저지/2022/10/1021/퇴사.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nT, P = [], []\nfor _ in range(N):\n a, b = map(int, input().rstrip().split())\n T.append(a)\n P.append(b)\ndp = P[:] + [0] # 최대 수익을 동적으로 기록할 DP테이블\nfor i in range(N-1, -1, -1): # Top-Down\n if T[i]+i > N: dp[i] = dp[i+1]\n else: dp[i] = max(dp[i+1], P[i] + dp[i+T[i]])\nprint(dp[0])\n\n\"\"\"\ni일에 시작한 일이 근무일을 넘길 경우\n해당 일은 처리할 수 없고\n그 다음날의 일 처리량과 동일하다\n반대로 근무일 안에 끝낼 수 있을 경우\n그 일을 할 수도 있고 안 할 수도 있는데\n안 할 경우는 dp[i+1]\n할 경우는 P[i] + dp[i+T[i]]\n\n안 할 경우에 dp[i+1]인 이유는\n그 일을 하지 않았기 때문에 dp[i]의 일 처리량과 \n하지 않고 넘어간 다음날인 dp[i+1]의 일 처리량이 동일하다\n일을 할 경우 P[i]는 일 처리량이고\ndp[i+T[i]]는 \n일 처리에 필요한 시간이 경과한 날의 일 처리량이 된다\n이 경우 시간 안에 일 처리가 가능하므로 \nP[i]의 일 처리량을 dp[i+T[i]]에 얹은 값을 계산한다\n그리고 이 둘의 크기를 비교하여 큰 값을 최대 일처리량으로 기록하면서\n위에서부터 내려오는 Top Down 방식으로 값들을 계산하는 DP문제이다\n\"\"\"" }, { "alpha_fraction": 0.5945945978164673, "alphanum_fraction": 0.5995085835456848, "avg_line_length": 17.454545974731445, "blob_id": "b7a16541ff899b6ba9782f9071ba9a307bf80b6a", "content_id": "0037be3b9b28282c33058dc545935b136f2d1fe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 56, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/12/1219/나는야 포켓몬 마스터 이다솜 딕셔너리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nprint(dir(list))\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nN, M = map(int, input().split())\n\npokemon_by_key = {}\n\nfor i in range(1, N+1):\n pokemon_by_key.update({i:input()})\n\nkey_by_pokemon = {v:k for k,v in pokemon_by_key.items()}\n\nfor i in range(M):\n quiz = input()\n try:\n print(pokemon_by_key.get(int(quiz)))\n except:\n print(key_by_pokemon.get(quiz))\n\n" }, { "alpha_fraction": 0.34756097197532654, "alphanum_fraction": 0.46341463923454285, "avg_line_length": 14, "blob_id": "ba97a7dd945e24785657bf6ff0e70fc181c0e252", "content_id": "b40bfc6e287e9a1cc07fb86a4778801d0484fb89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/알고리즘/온라인저지/2023/04/0424/푸앙이와 종윤이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\na = 100-N\nb = 100-M\nc = 100-(a+b)\nd = a*b\nq, r = 0, d\nif d>=100:\n q = d//100\n r = d%100\nprint(a, b, c, d, q, r)\nprint(c+q, r)" }, { "alpha_fraction": 0.5315315127372742, "alphanum_fraction": 0.5585585832595825, "avg_line_length": 21.200000762939453, "blob_id": "8e194c58ae5422e5c68b9561f10156419d31472c", "content_id": "00b147ee084f780920ce525d3b8b756d2ce2a702", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/04/0429/고려대학교에는 공식 와인이 있다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "C, K, P = map(int, input().split())\nresult = 0\nfor c in range(1, C+1):\n result += K*c + P*c*c\nprint(result)\n" }, { "alpha_fraction": 0.35353535413742065, "alphanum_fraction": 0.3717171847820282, "avg_line_length": 34.21428680419922, "blob_id": "061029b4a371f1f9012baeb7cdbc008c84452e12", "content_id": "22f4649966cefbd36d3574ac94c518f9bf6a162d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/01/0122/에라토스테네스의 체.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n, K = map(int, input().split()) # 소수를 찾을 범위\na = [False,False] + [True]*(n-1) # 0 ~ n 까지의 숫자 리스트\n\ncnt = 0\n\nfor i in range(2,n+1): # 2 ~ n까지 반복\n if a[i]: # 2부터 시작, 해당 숫자가 지워지지 않고 남아있는 소수라면 \n for j in range(i, n+1, i): # 해당 소수의 배수들을 \n if a[j] == False:\n continue\n a[j] = False # 리스트에서 전부 False로 만들기\n cnt += 1\n if cnt == K:\n print(j)\n \n" }, { "alpha_fraction": 0.4202898442745209, "alphanum_fraction": 0.4492753744125366, "avg_line_length": 22.33333396911621, "blob_id": "17cdb11de8ad3783d9e84e8805c3aefba77aab0d", "content_id": "2dc507afdfedcf9457e5eb6c536ead88e1d3f3c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/알고리즘/온라인저지/2021/08/0822/별 찍기-2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor n in range(1, N+1):\n print(' '*(N-n) + '*'*n)" }, { "alpha_fraction": 0.5575757622718811, "alphanum_fraction": 0.5575757622718811, "avg_line_length": 19.75, "blob_id": "f4294d5d6b4de59098ca89d3aba34ddd94a26c94", "content_id": "6a5d45da9a80ba69f44f2ee97726b54a7ed96104", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/06/0629/단어 정렬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = []\nfor i in range(int(input())):\n tmp = input()\n if tmp not in arr:\n arr.append(tmp)\narr.sort(key=lambda x:(len(x), x))\nfor a in arr:\n print(a)" }, { "alpha_fraction": 0.5632184147834778, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 28.33333396911621, "blob_id": "1a06f0853f5266ee5c5b0c9319013d9fd2e53e9b", "content_id": "e60abd7b79ccd82cf48acfb67f5f2ddffdff3bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 56, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/03/0321/chino_shock.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "emoji = input()\nA, B, C = len(emoji), emoji.count(':'), emoji.count('_')\nprint(A+B+C*5)" }, { "alpha_fraction": 0.4745098054409027, "alphanum_fraction": 0.4901960790157318, "avg_line_length": 20.33333396911621, "blob_id": "7ea15ac34ef9ca7d47a55a57ff4598a20a00b35c", "content_id": "0b286336f46876208a70ea17495ed87e9909fd46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0629/N과 M (3).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def comb(scope, depth):\n for i in range(1, scope+1):\n result.append(i)\n if depth == M:\n print(*result)\n else:\n comb(scope, depth+1)\n result.pop()\n\nN, M = map(int, input().split())\nresult = []\ncomb(N, 1)" }, { "alpha_fraction": 0.542553186416626, "alphanum_fraction": 0.5957446694374084, "avg_line_length": 18, "blob_id": "c358a177c15233102b38e07c6a3b064a3210b7ae", "content_id": "5c3cc3130797f09fac40523b37ab5a51801c2571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/05/0529/Silnia.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 1\nfor i in range(1, N+1):\n result = (result * i)%10\nprint(result)" }, { "alpha_fraction": 0.5284237861633301, "alphanum_fraction": 0.5684754252433777, "avg_line_length": 21.14285659790039, "blob_id": "2108fd8730ec7aabce617c2bd650b3624d7a761b", "content_id": "d8c841403a56129bc00fc191a1699d6b7aad9d55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 65, "num_lines": 35, "path": "/알고리즘/온라인저지/2022/10/1006/알파벳.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef idx(char):\n return ord(char)-65\n\ndef dfs(y, x, move):\n global result\n result = max(result, move)\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<R and 0<=nx<C and not visited[idx(arr[ny][nx])]:\n visited[idx(arr[ny][nx])] = 1\n dfs(ny, nx, move+1)\n visited[idx(arr[ny][nx])] = 0\n\nR, C = map(int, input().rstrip().split())\narr = [list(input().rstrip()) for _ in ' '*R]\nvisited = [0]*26\nresult = 0\nvisited[idx(arr[0][0])] = 1\ndfs(0, 0, 0)\nprint(result+1)\n\n\"\"\"\n처음으로 스스로 풀은 DFS문제인 것 같다\n채점속도가 좀 느려서, 이게 정답이 될 까 걱정이 되었는데\n신기하게도 정답이었다\nBFS에서 다음 탐색점을 Q에 넣듯이\nDFS에서 다음 탐색점을 재귀로 함수를 실행해주는 코드를 짰다\n\"\"\"\n\n# https://www.acmicpc.net/problem/1987" }, { "alpha_fraction": 0.5095541477203369, "alphanum_fraction": 0.5286624431610107, "avg_line_length": 12.166666984558105, "blob_id": "06216a2f0b31a3d73f481036f1f3689354744ac5", "content_id": "c8e691b25d02a00a9ad4e9c6ff0f6eacc64323f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/02/0201/지능형 기차.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "now = 0\nresult = 0\n\nfor t in range(4):\n off, on = map(int, input().split())\n\n now += (on-off)\n\n if now > result:\n result = now\n\nprint(result)" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5540540814399719, "avg_line_length": 24, "blob_id": "ad398029b41d2da3a29c2af8888bf4db5aba3c32", "content_id": "545a900c46a7c019e3624689f29bbf34b2e061cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 43, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/04/0425/세 막대.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b, c = sorted(map(int, input().split()))\nc = min(a+b-1, c)\nprint(a+b+c)" }, { "alpha_fraction": 0.5180055499076843, "alphanum_fraction": 0.5401661992073059, "avg_line_length": 20.294116973876953, "blob_id": "39f0691870a91e042aa0eb719eb0081409ed4987", "content_id": "f19df6d5b76809d291dfdd7c9ce74b8a66a2f290", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/10/1002/주몽.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nM = int(input().rstrip())\nparts = sorted(list(map(int, input().rstrip().split())))\ni, j = 0, N-1 # j = len(parts)-1\nresult = 0\nwhile i < j:\n armor = parts[i] + parts[j]\n if armor > M: j -= 1\n elif armor < M: i += 1\n else: # armor == M\n result += 1\n i += 1\nprint(result)" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.3903743326663971, "avg_line_length": 18.736841201782227, "blob_id": "fb91cec79cb413fbfb77819d88139a8a8c92ffd2", "content_id": "b772bc963ec2c86a938600626ac70de2ab349537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/2. 파이썬 SW문제해결 기본 List2/6차시 2일차 - 부분집합의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = list(range(1, 12+1))\n\nT = int(input())\n\nfor t in range(1, T+1):\n N, K = map(int, input().split())\n\n n = len(A)\n cnt = 0\n\n for i in range(1<<n):\n tmp = set()\n for j in range(n):\n if i&(1<<j):\n tmp.add(A[j])\n if sum(tmp) == K and len(tmp) == N:\n cnt += 1\n \n print('#{} {}'.format(t, cnt))" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 38.33333206176758, "blob_id": "4cce9e73830accd3be9ffda20d56baf5b4d74d53", "content_id": "bd80c64d884a281d6f4f3e3780b6cd0261d8c05d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 50, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/12/1212/No Brainer.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n X, Y = map(int, input().split())\n print('MMM BRAINS' if X >= Y else 'NO BRAINS')" }, { "alpha_fraction": 0.5709969997406006, "alphanum_fraction": 0.5951661467552185, "avg_line_length": 19.75, "blob_id": "1b8501c01b316fa7164780968061f4d469977a32", "content_id": "00c35731198f426cc622a34edef0f905a0504ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0729/생태학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndct = dict() # tree dict\nlst = [] # tree list\ncnt = 0\nwhile True:\n tree = input().rstrip() # EOFerror\n if not tree: break\n if tree not in lst: lst.append(tree)\n try: dct[tree] += 1\n except: dct[tree] = 1\n cnt += 1\nlst.sort()\nfor l in lst: print(f'{l} {dct[l]/cnt*100:.4f}')" }, { "alpha_fraction": 0.2801932394504547, "alphanum_fraction": 0.4202898442745209, "avg_line_length": 7.666666507720947, "blob_id": "63323988debfddaee78c316b13cb184e01a9213c", "content_id": "5a4c742db5d7bf1ea2a261497de4bc949c7c55e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 36, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/02/0227/중앙 이동 알고리즘.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n0: 4\n1: 4+5 = 9\n2: 9+ \n\n4\n4*4-4-3\n9*4-8-3\n\n\ndp[i] = dp[i-1] * 4 - (4*i+3)\n\"\"\"\n\nN = int(input())\n\ndp = [4]\n\ntmp = 4\n\nfor i in range(1, N+1):\n dp.append(dp[i-1] * 4 - (tmp+3))\n tmp *= 2\n\nprint(dp[N])" }, { "alpha_fraction": 0.390625, "alphanum_fraction": 0.41796875, "avg_line_length": 20.41666603088379, "blob_id": "9ce4ab6184a811f534692c36c4041a29c47f4d23", "content_id": "82f6b3471e5948c4e54bfe2cde3b3255a58e1601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 43, "num_lines": 12, "path": "/알고리즘/[템플릿]/[SAMPLE]/순열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def perm(arr, i):\n if i == len(arr)-1:\n print(arr)\n else:\n for j in range(i, len(arr)):\n arr[i], arr[j] = arr[j], arr[i]\n perm(arr, i+1)\n arr[i], arr[j] = arr[j], arr[i]\n\narr = [1, 2, 3, 4]\n\nperm(arr, 0)" }, { "alpha_fraction": 0.4354838728904724, "alphanum_fraction": 0.4838709533214569, "avg_line_length": 27.69230842590332, "blob_id": "4384994712fc5fa0bb80eba91bb5822135369efe", "content_id": "360d428d6fbd6914cfee579287d0909925f85eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 50, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/12/1220/알파벳 전부 쓰기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n arr = [0]*26\n for s in input():\n char = s.lower()\n if 97 <= ord(char) < 97+26:\n arr[ord(char)-97] = 1\n result = 'missing '\n for i in range(26):\n if not arr[i]:\n result = result + chr(i+97)\n if i == 25 and result != 'missing ': break\n else: result = 'pangram'\n print(result)" }, { "alpha_fraction": 0.31512194871902466, "alphanum_fraction": 0.49073171615600586, "avg_line_length": 17.654544830322266, "blob_id": "b81a12e175829dd03bfea8ec38d16255fc844ade", "content_id": "a09448283dafa4c99af9c9eeb112e1bd39535488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1395, "license_type": "no_license", "max_line_length": 45, "num_lines": 55, "path": "/알고리즘/온라인저지/2021/10/1003/이진 딸기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "strawberry = {\n 0: '0010',\n 1: '0001',\n 2: '0010',\n 3: '0011',\n 4: '0100',\n 5: '0101',\n 6: '0110',\n 7: '0111',\n 8: '1000',\n 9: '1001',\n 10: '1010',\n 11: '1011',\n 12: '1100',\n 13: '1101',\n 14: '1110',\n 15: '1111',\n 16: '1110',\n 17: '1101',\n 18: '1100',\n 19: '1011',\n 20: '1010',\n 21: '1001',\n 22: '1000',\n 23: '0111',\n 24: '0110',\n 25: '0101',\n 26: '0100',\n 27: '0011',\n}\n# 이진딸기의 1번째는 VVV딸기 이다\n# 그리고 쭉 올라갔다가 16부터 내려오는데\n# 14번 올라가고 14번 내려오는 구조이다\n# 때문에 28로 나눠준 나머지를 딕셔너리에서 호출하면 문제가 없을 줄 알았는데\n# 계속 KeyError가 발생했다\n# 핵심은 딕셔너리의 0번째 VV딸기V\n# 0번째의 의미는 14번째 내려온 마지막 딸기이고\n# 그래야 0번째의 다음인 1번째가 VVV딸기 로 다시 순환하는 구조가 된다\n\nT = int(input())\n\nfor t in range(T):\n N = int(input()) # N번째 이진딸기는?\n\n tmp = strawberry[N%28] # 이진딸기는 28번마다 돌아온다\n\n result = '' # 이진수를 V와 딸기로 바꿔줄 result 초기화\n\n for m in tmp: # 구한 이진수를 0을 V로, 1을 딸기로\n if m == '0':\n result = result + 'V'\n else:\n result = result + '딸기'\n \n print(result) # 출력" }, { "alpha_fraction": 0.4923076927661896, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 15.5, "blob_id": "3eb675aca58724d68fb67f8e338fbf0763204839", "content_id": "2351068e420af35d7c600af0053eb7969d08dbea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/알고리즘/온라인저지/2023/04/0420/Correct.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "H, M = map(int, input().split())\nH -= 9\nend = H*60 + M\nprint(end)" }, { "alpha_fraction": 0.4695121943950653, "alphanum_fraction": 0.4878048896789551, "avg_line_length": 15.5, "blob_id": "93dde88295a4bb46d520d68ff064c2a1feecef08", "content_id": "38980f5ee2a48ab4c2065f60d39cb7740edeef37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/06/0621/창영이의 일기장.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "text = input()\nresult = ''\ni = 0\nwhile i < len(text):\n result = result + text[i]\n if text[i] in 'aeiou':\n i += 3\n else:\n i += 1\nprint(result)" }, { "alpha_fraction": 0.4457831382751465, "alphanum_fraction": 0.45783132314682007, "avg_line_length": 19.8125, "blob_id": "84068a22a874430eae2c5c944d6cdfd30619c888", "content_id": "3505f0d0476c096669a5d6ad11b182972bd6b191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 33, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0712/FBI.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "FBI = False\nresult = []\nfor i in range(1, 6):\n agent = input()\n for j in range(len(agent)-2):\n tmp = agent[j:j+3]\n # print(tmp, i)\n if tmp == 'FBI':\n result.append(i)\n if not FBI:\n FBI = True\n break\nif FBI:\n print(*result)\nelse:\n print('HE GOT AWAY!')" }, { "alpha_fraction": 0.4423791766166687, "alphanum_fraction": 0.4535315930843353, "avg_line_length": 15.625, "blob_id": "6f7c16d4028512598eabab7441e33063d8601006", "content_id": "5b60554b8961f2fd293049d2922dcd246163c52e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 36, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0203/세로읽기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = []\n\nmax_len = 0\n\nfor _ in range(5):\n tmp = input()\n arr.append(tmp)\n if len(tmp) > max_len:\n max_len = len(tmp)\n\nfor j in range(max_len):\n for i in range(5):\n try:\n print(arr[i][j], end='')\n except:\n pass\n\n\n\n" }, { "alpha_fraction": 0.4575471580028534, "alphanum_fraction": 0.49528300762176514, "avg_line_length": 33.16666793823242, "blob_id": "dcadb0a567a6595d5ee1452cc2a0189a1d8de76e", "content_id": "c751dc85954af198a31d4341b52ffd2c80c2d2bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/08/0805/고급 수학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor n in range(1, N+1):\n T = sorted(list(map(int, input().split()))) # triangle\n print(f'Scenario #{n}:')\n if T[0]**2 + T[1]**2 == T[2]**2: print('yes\\n')\n else: print('no\\n') " }, { "alpha_fraction": 0.41310539841651917, "alphanum_fraction": 0.45868945121765137, "avg_line_length": 21, "blob_id": "02eef63d382e36b2a9fe2a1b068fdf29cd2628e4", "content_id": "f3c6b58474c784056a47322e63510f06f93c0c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 38, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/05/0504/문문문.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nM = int(input())\nif N < 6:\n result = []\n door = [0, 1, 0, 1, 0, 1]\n door__reverse = [1, 0, 1, 0, 1, 0]\n if M == 0:\n for i in door[1:N]:\n result.append(i)\n else:\n for i in door__reverse[1:N]:\n result.append(i)\n for r in result:\n print(r)\nelse:\n print('Love is open door')" }, { "alpha_fraction": 0.5007451772689819, "alphanum_fraction": 0.5089418888092041, "avg_line_length": 28.844444274902344, "blob_id": "786d645a26c67e6c19fb89e89208741593fbd1fb", "content_id": "3f7baa6cac484d8096ae7ee5f12f5d56bd4df98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "no_license", "max_line_length": 56, "num_lines": 45, "path": "/알고리즘/온라인저지/2022/01/0116/단어 뒤집기 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input() # 입력\n\nword = '' # 단어 혹은 태그\n\nresult = [] # 단어 혹은 태그 단위로 짤라서 담을 리스트\n\nin_tag = False # 입력값을 돌면서, 현재 스캔중인 단어가 태그인지 확인\n\nfor s in sen: # 입력값을 하나씩 순회\n if s == '<': # 태그의 시작\n if word: # 단어가 추가되던 중이었으면\n result.append(word) # 추가중이던 단어를 끊어주고 리스트에 추가\n word = s # 태그를 담으면서 다시 시작\n else: # 시작 혹은 바로 앞에서 단어를 끊어줘서 단어가 없는 경우\n word = word + s # 태그를 담으면서 시작\n in_tag = True # 태그를 스캔중이므로 True\n elif s == '>': # 태그의 끝\n word = word + s # '>'를 담고\n result.append(word) # 리스트에 태그를 추가\n word = '' # 단어 초기화\n in_tag = False # 태그에 담는 것이 종료되었음\n elif s == ' ' and not in_tag: # 공백이면서 태그에 담는 중이 아닐 때\n result.append(word) # 태그가 아닌 그냥 단어를 리스트에 담고\n word = '' # 단어 초기화\n else: # 단어 혹은 태그를 스캔중\n word = word + s # 단어에 담아줌\n\nif word: # 스캔이 끝난 마지막 단어\n result.append(word) # 리스트에 추가\n\n# 첫번째 단어 출력\nif result[0][0] == '<': # 태그이면\n print(result[0], end='') # 그대로 출력\nelse: # 단어이면\n print(result[0][::-1], end='') # 뒤집어서 출력\n \nfor i in range(1, len(result)): # 두번째 단어부터 끝까지 출력\n # 이전 단어가 태그가 아니고 지금 단어도 태그가 아니면\n if result[i-1][0] != '<' and result[i][0] != '<':\n print(end=' ') # 단어 사이 공백\n \n if result[i][0] == '<': # 지금 단어가 태그이면\n print(result[i], end='') # 그대로 출력\n else: # 단어이면\n print(result[i][::-1], end='') # 뒤집어서 출력" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.45864662528038025, "avg_line_length": 21.16666603088379, "blob_id": "1658dd10e7fb802435fb29c7e0d063e06eb5422c", "content_id": "ea17ebb1f085499a5a56a9da762b78d88a2f1d37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/알고리즘/온라인저지/2021/08/0804/직각삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b, c = 1, 1, 1\nresult = []\nwhile a + b + c != 0:\n a, b, c = map(int, input().split())\n if a + b + c == 0:\n continue\n if a**2 + b**2 == c**2:\n result.append('right')\n else:\n result.append('wrong')\nfor res in result:\n print(res)\n" }, { "alpha_fraction": 0.4943181872367859, "alphanum_fraction": 0.5, "avg_line_length": 34.400001525878906, "blob_id": "eb334300f8a8a8cc088ff330ddea0e83074d7b45", "content_id": "65db4c10510e82dac7f462f520ebf386f6c69b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/02/0220/페르시아의 왕들.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n a, b, c, d = map(int, input().split())\n if a == b == c == d == 0: break\n result = list(map(abs, [a-c, a-d, b-c, b-d]))\n print(min(result), max(result))" }, { "alpha_fraction": 0.40454545617103577, "alphanum_fraction": 0.5068181753158569, "avg_line_length": 26.5625, "blob_id": "ff4979ad8153e978225ba7a802e2ddd0b4e0d396", "content_id": "85d9292fdababfd5ce85ccaba5f092e4c1194f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 440, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0731/이름 궁합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# stroke\nS = [3, 2, 1, 2, 3, 3, 2, 3, 3, 2, 2, 1, 2, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 2, 2, 1]\nname1 = input()\nname2 = input()\nfor n in name1:\n tmp = ord(n)-65\narr = []\nfor i in range(len(name1)):\n arr.append(S[ord(name1[i])-65])\n arr.append(S[ord(name2[i])-65])\nfor i in range(len(arr)-2):\n tmp = [0] * (len(arr)-1)\n for j in range(len(arr)-1):\n tmp[j] = (arr[j]+arr[j+1])%10\n arr = tmp\nfor a in arr: print(a, end='')" }, { "alpha_fraction": 0.4482758641242981, "alphanum_fraction": 0.4482758641242981, "avg_line_length": 29, "blob_id": "65755037053fe2641595e2b65032c463e4f2a3ea", "content_id": "51ae396b233e359eecdeb7ed9e3e6015e9de53b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/02/0202/10926.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(input(), '??!', sep='')" }, { "alpha_fraction": 0.47012731432914734, "alphanum_fraction": 0.48481881618499756, "avg_line_length": 30.9375, "blob_id": "ae7b01266b37342a33029e99aa49457af6cc73f7", "content_id": "04b86364e9567f0cdab6949218ed5fe73a24f614", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1247, "license_type": "no_license", "max_line_length": 55, "num_lines": 32, "path": "/알고리즘/온라인저지/2022/04/0426/맥주 마시면서 걸어가기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\ndef bfs():\n q = deque()\n q.append([home[0], home[1]]) # 집에서부터\n while q: # 갈 수 있는 편의점이 있는 동안\n x, y = q.popleft() # 출발\n # 목적지에 도착할 수 있으면\n if abs(x - goal[0]) + abs(y - goal[1]) <= 1000:\n print(\"happy\") # 축제에 갈 수 있다\n return # 축제에 갈 수 있으므로 코드 종료\n for i in range(N): # N개의 편의점 중\n if not visited[i]: # 간 적 없는 편의점\n nx, ny = store[i] # 편의점의 좌표\n # 갈 수 있는 편의점이면\n if abs(x - nx) + abs(y - ny) <= 1000:\n q.append((nx, ny)) # 편의점 좌표 추가\n visited[i] = 1 # 편의점 방문처리\n print(\"sad\") # 다 돌아도 못갔네 ㅠㅠ\n return\n\nfor t in range(int(input())):\n N = int(input())\n home = list(map(int, input().split()))\n store = []\n for i in range(N): # N개의 편의점\n store.append(tuple(map(int, input().split())))\n goal = list(map(int, input().split()))\n visited = [0 for _ in range(N+1)] #home 제외\n bfs() # 집부터 출발" }, { "alpha_fraction": 0.49082568287849426, "alphanum_fraction": 0.5229358077049255, "avg_line_length": 14.642857551574707, "blob_id": "d1793eb3a256c98d0d4d964c94bbeb59984bc905", "content_id": "7cbf1e0b5698b0a1ff5625602bcd0f834822bc08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/03/0303/3 つの整数 (Three Integers).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums = list(map(int, input().split()))\n\nmaxx = 0\nresult = 0\n\ndp = [0] * 101\n\nfor i in range(len(nums)):\n dp[nums[i]] += 1\n if dp[nums[i]] > maxx:\n maxx = dp[nums[i]]\n result = nums[i]\n\nprint(result)" }, { "alpha_fraction": 0.36567163467407227, "alphanum_fraction": 0.4029850661754608, "avg_line_length": 9.307692527770996, "blob_id": "9f5addd664048bdc94fb450d53bdc1456d1b5623", "content_id": "539d8f05fe3151e4f1237dba61b42aead6f2ede6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 20, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/01/0131/별 찍기 - 13.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ni = 1\n\nfor n in range(N):\n print('*' * i)\n i += 1\n\ni -= 1\n\nfor n in range(N-1):\n i -= 1\n print('*' * i)\n" }, { "alpha_fraction": 0.5736196041107178, "alphanum_fraction": 0.5858895778656006, "avg_line_length": 22.35714340209961, "blob_id": "50ffdc2329f799fb580123515896053517243ee1", "content_id": "b59fc2983b85993026aa08a64effb612946b710b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/10/1021/Base Conversion.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nA, B = map(int, input().rstrip().split())\nM = int(input().rstrip())\narr = list(map(int, input().rstrip().split()))[::-1]\nnum = 0\nfor i in range(M): num += A**i*arr[i]\nbase = 1\nwhile base*B < num: base *= B\nwhile base >= 1:\n print(num//base, end=' ')\n num, base = num%base, base//B" }, { "alpha_fraction": 0.5883134007453918, "alphanum_fraction": 0.6095617413520813, "avg_line_length": 25, "blob_id": "dfbe9f83925dfca4a2bc17c77991267c91d9b191", "content_id": "5df77e2a65b40d704d7b42ab10918c1692c79ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/알고리즘/온라인저지/2022/08/0805/입국심사.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ntime = sorted(list(int(input()) for _ in range(N)))\nstart = 0\n# time의 최소값 * 사람 수 => 최대 시간 \nend = min(time) * M # Worst Case : 모든 입국 심사자가 1번 심사대를 통과하려고 떼쓰는 경우\nresult = 0\nwhile start <= end :\n mid = (start + end) // 2\n I = 0 # immigration : 입국심사\n for t in time: I += mid // t # mid초 동안 심사대를 통과할 수 있는 사람 수\n if I >= M: # mid초 동안 M명 이상 심사대를 통과할 수 있음\n end = mid-1 # 더 작은 시간 탐색\n result = mid # 매개 변수 탐색 : 최소값 저장\n else: start = mid+1 # mid초 동안 M명 이상 심사대를 통과할 수 없음\nprint(result)\n\n\"\"\"\n시간을 기준으로 이분탐색한다\n예제1같이 28초의 경우, 1번 심사대가 4명을 심사한다\n\"이번 mid초 동안 몇명이 심사대를 통과할 수 있는가\"를 확인하여\nstart<=end 인 동안 M명을 심사할 수 있는 시간초를\n이분탐색으로 줄여나간다\n\"\"\"\n\n# https://www.acmicpc.net/problem/3079" }, { "alpha_fraction": 0.4375, "alphanum_fraction": 0.5071428418159485, "avg_line_length": 24.5, "blob_id": "3b7ac71da6cfe9bb043ce6ab0649f76aefa6e35c", "content_id": "233bbf1b7d284f843c7da65fb83ebe712e31d0a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/알고리즘/[템플릿]/문자열 탐색/Boyer Moore.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import timeit\nstart = timeit.default_timer()\n\ntext = 'aawneifonoeiwmcl,xjfklsajdfklajewklfhaefnk1r7e8yiueniuencieh02cf12fh0v3ymn,cgjklo10f321hcqvyjkn9og4ml0f21hc3v9yjknqom0h1vcf2y93njkmof210hc3v9jkoymn0f21hcvko9jnym021hfok9cqv3jln,210hf9ocv3jklmny,qgf21h0kmn,jlocqv39cf30hkv9mn,ojlgfvc0hkx12mn,jol9210cf3hk9qvomn,jl10fh9cv3okjmn,g10cf3hv9knom0f9h3koncvm,02f1h9kcv3omn,j2f103vhc9knmjog0f2h19kncv3jmoyg0f21h9cv3njkgmoy0fh921ojkymncgv3,rlf21h0c3v9nyjkmgoh0n9f2yjkmcv13g4h0f21c3vn9kmjyg0hf219ncvjkm3og0f21hc3v9mnjky,o0f21hcvn9km3jy0h1vcf2nk9mj3021hnkf9m,cv3jhf210ncv3mjk9h0f219nycv3jkg0h9f21nycv3jk0h9f21nycv3kjg0fh219mnjkcv3,yogf20h1v3n9cg4kmf0h2913vcgf3210cvh9g4n20f31h9cvnkgf02h9c13vngtkr40fh921nkcvm30hn9kfmcv3h0nf1mk,j9cv3gf,mn12v0chkljox30cf21hkv3mn,9of0cmn,hkvjlo390hk9omnj,2f13vc0h9nk1mojfc3v0h9n12cfkmv3021h9cfnvyh0nf29kmcv13jh29fn1kmcv30f2h91ncvkm30h92nkf1mcv3o0h9f21nkmocv,30h9nk2fmcv130h9fknc12v30h9nk12fmcv320hc9f1nvk30f2h9c1vnkm30h9fnkc1vh9nkmjof12cv30h9fnkmcv12o,30h9f21nkcvmx30hf9n12ckv30h9nfkmc12v30hk9n,mfjloc1v0fh912vcnk3mh9f12vcn30h9nkcf12v30hf2n91mcv0hnf9c12v3h0n9mkj2cfv130h9nfc12v30h9fnkcv12m3gf219hcvn3kg0f21h9cvn3f219hcvk3nf021h9cnv39hf21cv30h9nf2c1vg3h09n1f2ycvg30h9fnc12vy8g3df09h2c1vg30h98y7nfcv213gh0987yfv2c13h0987yf2cv13ghy0987ncfv123h09872fyv134chyf7098cv2g09h87fv2cg13hy67098cv12gh09f21ycv3g4h09y87f12v3cg0h9821vc309h8f12cv309hfcv09hn12vcx309hn1f2cv30h9nf12cv30hn1cf2'\npattern = 'cx309hn1f2cv30h9'\nresult = 0\nfor i in range(len(pattern)-1, len(text)):\n if text[i] == pattern[-1]:\n j = 2\n while True:\n if pattern[-j] == text[i-j+1]:\n if pattern[-j] == pattern[0]:\n result = 1\n break\n else:\n j += 1\n else:\n break\nprint(result)\n\nend = timeit.default_timer()\nprint(f'{end-start:.5f}')" }, { "alpha_fraction": 0.41304346919059753, "alphanum_fraction": 0.43478259444236755, "avg_line_length": 12.214285850524902, "blob_id": "ffcae2b2ea2bd33339f2b2c3bddbd8f677e1b196", "content_id": "cd9fcff1e7d6f32499dac4c28a97511849274d99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 17, "num_lines": 14, "path": "/알고리즘/온라인저지/2023/03/0301/Serious Problem.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 'yee'\ninput()\nA = B = 0\nfor i in input():\n try:\n int(i)\n A += 1\n except:\n B += 1\nif A > B:\n result = 2\nelif A < B:\n result = 'e'\nprint(result)" }, { "alpha_fraction": 0.4534534513950348, "alphanum_fraction": 0.462462455034256, "avg_line_length": 21.200000762939453, "blob_id": "40e375e89993ca1942025715ea253267fc8551b8", "content_id": "5409bf21be262397c0d4c974e508471a782fb264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0720/사탕.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n J, N = map(int, input().split())\n boxes = []\n for n in range(N):\n R, C = map(int, input().split())\n boxes.append(R*C)\n boxes.sort(reverse=True)\n result = 0\n for b in boxes:\n result += 1\n J -= b\n if J <= 0: \n break\n print(result)\n" }, { "alpha_fraction": 0.5605095624923706, "alphanum_fraction": 0.5700637102127075, "avg_line_length": 16.5, "blob_id": "0bcf0d9dff5fe7298f0c4c2ec8fc06926f08a53f", "content_id": "884814b5d254ce909297348cecb5fdbe91365a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 40, "num_lines": 18, "path": "/알고리즘/온라인저지/2021/12/1219/나는야 포켓몬 마스터 이다솜.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nN, M = map(int, input().split())\n\npokemon = ['' for _ in range(N+1)]\n\nfor i in range(1, N+1):\n pokemon[i] = input()\n\nfor i in range(M):\n quiz = input()\n try:\n print(pokemon[int(quiz)])\n except:\n print(pokemon.index(quiz))" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 30, "blob_id": "ae6ce872f24bc84ec03d94cba203a17771d16183", "content_id": "3a01f41c7c43067ff8cb734d3515164593d6617f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/알고리즘/온라인저지/2023/05/0518/Metronome.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(f'{int(input())/4:.2f}')" }, { "alpha_fraction": 0.5620437860488892, "alphanum_fraction": 0.5620437860488892, "avg_line_length": 9.538461685180664, "blob_id": "4cf8c09ae7960615c4e93b074666fc75788e6757", "content_id": "af0b0129450d154c35f65fb94a8c3412f0f6110f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 23, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/02/0220/유학 금지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nCAMBRIDGE\n\"\"\"\n\nCAMBRIDGE = \"CAMBRIDGE\"\n\ntext = input()\n\nfor t in text:\n if t in CAMBRIDGE:\n continue\n\n print(t, end='')\n" }, { "alpha_fraction": 0.6423357725143433, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 21.83333396911621, "blob_id": "25dec903ad9b237344443376aeb8357521268347", "content_id": "ee3b5143d6677a78558dee9a5c3e266882fc0a2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 70, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0927/수 정렬하기 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nfor i in sorted([int(input()) for _ in [0]*N], reverse=True): print(i)\n" }, { "alpha_fraction": 0.356890469789505, "alphanum_fraction": 0.5088339447975159, "avg_line_length": 14.666666984558105, "blob_id": "aeecd25ecb028a7c2365763e459744c7a103d160", "content_id": "8b66caccc86483bbaac78a69a634cd3700783891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 42, "num_lines": 18, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/28차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "data = {\n \"TV\": 2000000,\n \"냉장고\": 1500000,\n \"책상\": 350000,\n \"노트북\": 1200000,\n \"가스레인지\": 200000,\n \"세탁기\": 1000000,\n}\n\ntmp = []\n\nfor d in data:\n tmp.append((d, data[d]))\n\ntmp.sort(key=lambda x: x[1], reverse=True)\n\nfor t in tmp:\n print('{}: {}'.format(t[0], t[1]))\n\n" }, { "alpha_fraction": 0.43852460384368896, "alphanum_fraction": 0.5204917788505554, "avg_line_length": 23.5, "blob_id": "d8c02b23a1fca34cd56c7fa838e04c0cfd921aea", "content_id": "2f33f623c8d764324547bfb1400d9a5dcc0f06f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 34, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/04/0417/영화감독 숌.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncnt = 0 # 몇번째 666인가\nsix3 = 666 # 666부터 출발\nwhile True:\n if '666' in str(six3): # 종말숫자면\n cnt += 1 # 카운트 +1\n if cnt == N: # N번째 종말숫자면\n print(six3) # 값을 출력\n break # while문 종료\n six3 += 1 # 다음 숫자는 종말숫자인가" }, { "alpha_fraction": 0.5828220844268799, "alphanum_fraction": 0.5828220844268799, "avg_line_length": 19.5, "blob_id": "57349ba4ebc810e5a39c9ebba1996660e8bcc571", "content_id": "0a230881870fde291ae6ce511324df1e28564e1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/알고리즘/온라인저지/2021/12/1216/배열 합치기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\nresult = list(map(int, input().split())) + list(map(int, input().split()))\n\nresult.sort()\n\nfor r in result:\n print(r, end=' ')" }, { "alpha_fraction": 0.65625, "alphanum_fraction": 0.65625, "avg_line_length": 20.27777862548828, "blob_id": "615b8cd9e0d514fbb85db3d79476159cb87492d4", "content_id": "6c6d32a70084ea885405874a6ce436bec54d6565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 808, "license_type": "no_license", "max_line_length": 52, "num_lines": 18, "path": "/README.md", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# Just Do It \n\n# 목표\n\n- 배운 내용들을 매일 기록\n- 覚えた内容を毎日記録する\n- Note that I studied today for everyday\n\n# 쉴 때 보기 좋은 명언들\n\n- 건강한 몸에 건강한 정신이 깃든다\n- 하루를 쉬면 메꾸는 데에 삼일이 걸린다\n- 해결할 수 있는 일은 고민하지 말고 하면 되고, 해결할 수 없는 일은 고민해도 소용이 없다\n- 우생마사 : 장마비에 떠내려가는, 소는 가만히 있다가 살고, 말은 발버둥치다가 죽는다\n- 노력이란, 보이지 않는 곳에서 나만 알게 아무도 모르게 하는 것을 말한다\n- 뭘 해야할지, 뭐가 하고 싶은지 모르겠을 땐, 내일 죽는 상상을 해라\n- 완벽하고자 하는 생각이, 모든 일을 시작조차 할 수 없게 만든다\n- \n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5459558963775635, "avg_line_length": 20.799999237060547, "blob_id": "f56e43d383e12710c160419ba79b493029795abb", "content_id": "dd6eb87d5b53d1d6980d15e3b29eddd388acf63f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/알고리즘/온라인저지/2023/01/0107/공유기 설치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def install_wifi(distance):\n prev = houses[0]\n cnt = 1\n for house in houses[1:]:\n if house>=prev+distance:\n cnt += 1\n prev = house\n return cnt\n\nN, C = map(int, input().rstrip().split())\nhouses = sorted([int(input().rstrip()) for _ in range(N)])\nresult = 0\nstart, end = result, houses[-1]\nwhile start<=end:\n mid = (start + end)//2\n if install_wifi(mid)>=C:\n result = max(result, mid)\n start = mid+1\n else: \n end = mid-1\nprint(result)\n\n\"\"\"\n다음 집을 탐색할 때, 다음 집이 위치한 인덱스만 확인함\n\"\"\"" }, { "alpha_fraction": 0.4517241418361664, "alphanum_fraction": 0.5103448033332825, "avg_line_length": 27.700000762939453, "blob_id": "ec91fe98ce55b86a0b053d18b3a65926ba84023f", "content_id": "edcfae794bfa825bceda9713217885b3c7d6aff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 47, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/02/0211/창영마을.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "orders = input()\nballs = [1, 0, 0]\nfor order in orders:\n if order == 'A':\n balls[0], balls[1] = balls[1], balls[0]\n if order == 'B':\n balls[2], balls[1] = balls[1], balls[2]\n if order == 'C':\n balls[0], balls[2] = balls[2], balls[0]\nprint(balls.index(1)+1) " }, { "alpha_fraction": 0.3638676702976227, "alphanum_fraction": 0.44783714413642883, "avg_line_length": 18.649999618530273, "blob_id": "a192af52606681438f5eb3e7625baa58b3c14c68", "content_id": "3141aa1157817671aba41ef539528d58d47b0d1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/03/0317/전자레인지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = [int(input()) for _ in range(5)]\n# print(n)\n\"\"\"\n- to -\n- to +\n0 to +\n+ to +\n\"\"\"\nresult = 0\nif n[0] < 0 and n[1] < 0:\n result += abs(n[0]-n[1]) * n[2]\nelif n[0] < 0 and n[1] > 0:\n result += abs(n[0])*n[2] + n[1]*n[4]\n result += n[3]\nelif n[0] == 0 and n[1] > 0:\n result += n[3]\n result += n[1]*n[4]\nelif n[0] > 0 and n[1] > 0:\n result += abs(n[0]-n[1])*n[4]\nprint(result)\n" }, { "alpha_fraction": 0.5310077667236328, "alphanum_fraction": 0.5426356792449951, "avg_line_length": 23.619047164916992, "blob_id": "6a52531cd8767e066e66f9dbf13716785c3bb0b6", "content_id": "76abbdf341eba27ccc5492f413d44e96d9524ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0807/OX퀴즈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nO가 연속되는지 알 수 있는 법?\n순회할 때\n이번 값이 O인지 확인하고\n몇번째 O인지 확인해서\n인덱스만큼 더해주기\n\"\"\"\n\nT = int(input()) # 테스트 케이스 개수\n\nfor i in range(T): # 테스트 케이스 개수만큼 반복하면서\n O_and_X = input() # OX를 입력받고\n count = 0 # 연속된 O의 개수를 세는 count를 초기화 하고\n result = 0 # 결과값을 0으로 초기화하고\n for O_or_X in O_and_X: # OX를 순회하면서\n if O_or_X == \"O\": # 순회중인 값이 O면\n count += 1 # 연속된 O의 개수 +1\n result += count # O가 연속된 횟수값을 result에 저장\n else: # 순회중인 값이 X일때\n count = 0 # 연속된 O의 카운트를 초기화\n print(result) # 결과값 출력" }, { "alpha_fraction": 0.5285714268684387, "alphanum_fraction": 0.5690476298332214, "avg_line_length": 29.071428298950195, "blob_id": "6a8184efd36129f8ece5ac409441526270aa4a09", "content_id": "749bc57666400fa979e42739a2e2e4e77503cf0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 70, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/08/0805/더하기 사이클.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = int(input())\n\ndef calc(a): # 제시된 계산 규칙을 함수로 정의하고\n result = ((a % 10) + (a // 10)) % 10 + (a % 10) * 10\n return result\n\ncalc_list = [a] # 계산된 수들을 저장할 리스트의 0번째를 a로 초기화\ni = 0 # while문 돌리기 위한 i\nwhile True:\n calc_list.append(calc(calc_list[i])) # 함수를 적용하여 리스트에 추가\n i += 1\n if calc_list[0] == calc_list[::-1][0]: # 첫항(입력값)과 계산해서 추가한 값이 같을 때\n break # while문 종료하고\nprint(len(calc_list)-1) # 사이클 수 구해서 출력" }, { "alpha_fraction": 0.5888158082962036, "alphanum_fraction": 0.5986841917037964, "avg_line_length": 16.941177368164062, "blob_id": "5248cb7ed3065ee3510e209caf6327ce40009b88", "content_id": "39021b7056c88b8116066430bf30cd2b6dafa090", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/알고리즘/온라인저지/2023/02/0226/잃어버린 괄호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def new_sum(exp):\n plus_split = exp.split('+')\n answer = 0\n\n for i in range(len(plus_split)):\n answer += int(plus_split[i])\n\n return answer\n\nexps = input()\nminus_split = exps.split('-')\nresult = new_sum(minus_split[0])\n\nfor a in minus_split[1:]:\n result -= new_sum(a)\n\nprint(result)" }, { "alpha_fraction": 0.42236700654029846, "alphanum_fraction": 0.43431052565574646, "avg_line_length": 26.117647171020508, "blob_id": "6a01dd2355498daeb346647a4782aca2f8c0ae0d", "content_id": "9d5ddc206c6ab609a5daf0e5aef0059790911c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 59, "num_lines": 34, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/1. 파이썬 SW문제해결 기본 List1/7차시 1일차 - 전기버스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n K, N, M = map(int, input().split())\n charge = list(map(int, input().split())) # 충전소 위치\n\n stop = [0] * (N+1) # 정류장 초기값\n\n for c in charge: # 충전소가 있는 정류장 정보를 갱신\n stop[c] = 1\n \n now = 0 # 현재 위치(초기 출발위치)\n cnt = 0 # 충전 횟수\n\n while now+K < N: # 목적지에 도달하기 전까지\n tmp = now # now값을 조작할 것이므로\n # 조작 전 now값을 tmp에 저장\n\n for i in range(now+K, now-1, -1): # 갈 수 있는 가장 멀리부터 \n # 현재 위치까지 거꾸로 확인하기\n if stop[i]: # 충전소가 발견되면\n # 뒤에서 탐색하였기 때문에\n # 가장 먼저 발견되는 충전소는 \n # 갈 수 있는 충전소 중 가장 멀리있는 충전소\n now = i # 해당 충전소의 위치로 이동\n cnt += 1 # 충전 한번 하였음\n break # 해당 for문 종료\n \n if i == tmp: # 충전소를 못찾고 결국 제자리로 돌아오면\n # 목적지에 도달할 수 없으므로\n cnt = 0 # 갈 수 없음\n break # while 종료\n\n print('#{} {}'.format(t, cnt))" }, { "alpha_fraction": 0.5458333492279053, "alphanum_fraction": 0.5708333253860474, "avg_line_length": 15.066666603088379, "blob_id": "e364e49c186d6d6d978eb12641affaa6327193f4", "content_id": "0114afa6ea0afd667649979e2a698807808ad673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 30, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/09/0930/문서 검색.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nword = input().rstrip()\npattern = input().rstrip()\nL = len(pattern)\ni = 0\nresult = 0\nwhile i<len(word)-L+1:\n if pattern == word[i:i+L]:\n result += 1\n i += L-1\n i += 1\nprint(result)" }, { "alpha_fraction": 0.43933823704719543, "alphanum_fraction": 0.4595588147640228, "avg_line_length": 23.772727966308594, "blob_id": "0e4a9dadd61ed42e09b2c17899fe22e42b0bbe31", "content_id": "3d3e5eaccd4d1095fd249c561a3dce8edda33415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/알고리즘/[템플릿]/Floyd-Warshall/플로이드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "INF = int(1e9)\nN = int(input())\nM = int(input())\narr = [[INF]*N for _ in range(N)]\nfor m in range(M):\n a, b, c = map(int, input().split())\n a, b = a-1, b-1\n if c < arr[a][b]:\n arr[a][b] = c\nfor k in range(N):\n for a in range(N):\n for b in range(N):\n arr[a][b] = min(arr[a][b], arr[a][k] + arr[k][b])\nfor i in range(N):\n for j in range(N):\n if i == j:\n arr[i][j] = 0 \n if arr[i][j] == INF:\n arr[i][j] = 0\nfor a in arr: print(*a)\n\n# https://www.acmicpc.net/problem/11404" }, { "alpha_fraction": 0.3787878751754761, "alphanum_fraction": 0.469696968793869, "avg_line_length": 10.166666984558105, "blob_id": "27dfa310cf5c134b8c673ce3891b7e38f7b724e9", "content_id": "f6936d15b61d136d2965137e2654e1a9d222f551", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/10차시 2. 자료구조 – 리스트, 튜플 - 연습문제 11.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "l = [1, 1]\n\nwhile len(l) < 10:\n l.append(l[-1]+l[-2])\n\nprint(l)" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 16.66666603088379, "blob_id": "d86568666bb62b27a98706524533232e230ae619", "content_id": "ee35419877ac6335611130b249c0443dee68370e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0927/소수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nA, B, N = map(int, input().rstrip().split())\nprint(A*10**N//B%10)" }, { "alpha_fraction": 0.48179271817207336, "alphanum_fraction": 0.5070028305053711, "avg_line_length": 28.83333396911621, "blob_id": "72948bf732ad1b06a77f373173cc6c7a71b896c5", "content_id": "074cc9b03882bb36fc868b03a201bf39170fb280", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/04/0427/성적 통계.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nfor k in range(1, K+1):\n score = list(map(int, input().split()))\n score = score[1:]\n # print(score)\n score.sort()\n A, B, C = score[-1], score[0], 0\n for i in range(len(score)-1):\n if score[i+1]-score[i] > C:\n C = score[i+1]-score[i]\n print(f'Class {k}')\n print(f'Max {A}, Min {B}, Largest gap {C}')" }, { "alpha_fraction": 0.4864864945411682, "alphanum_fraction": 0.5027027130126953, "avg_line_length": 12.214285850524902, "blob_id": "50db3dc051ffeb499789bdebe9c6972ce0196442", "content_id": "c742b893d0132977f18561320ed2fb467d5c9b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 35, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/02/0217/와글와글 숭고한.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S, K, H = map(int, input().split())\n\nOK_check = True\n\ndictt = {\n S:'Soongsil',\n K:'Korea',\n H:'Hanyang',\n}\n\nif S+K+H >= 100:\n print('OK')\nelse:\n print(dictt[min(S,K,H)])\n" }, { "alpha_fraction": 0.29646018147468567, "alphanum_fraction": 0.36283186078071594, "avg_line_length": 9.318181991577148, "blob_id": "ab0030daa1105a84aa65971ad823cfb253656a81", "content_id": "f030c5c666abf3cefc5b4e7675cd1a05e2f77628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 30, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/12/1216/학점계산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "grade = input()\n\nalpha = {\n 'A': 4,\n 'B': 3,\n 'C': 2,\n 'D': 1,\n 'F': 0,\n '+': 0.3,\n '0': 0.0,\n '-': -0.3,\n},\n\nresult = 0\n\n\n\nfor g in grade:\n result += alpha[0][g]\n \n\nprint('{:.1f}'.format(result))" }, { "alpha_fraction": 0.5574324131011963, "alphanum_fraction": 0.5810810923576355, "avg_line_length": 21.846153259277344, "blob_id": "99c9c1246ff25e1c097054b33b3f12118b46ab2c", "content_id": "01593ab99ddf4a86b60918067f2df032eb7496c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0724/박 터뜨리기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\nbuckets = list(range(1, K+1))\nbuckets.sort(reverse=True)\nresult = 0\nif N < sum(buckets):\n result = -1\nelse:\n idx = 0\n for i in range(N-sum(buckets)):\n buckets[idx] += 1\n idx = (idx+1)%K\n result = max(buckets) - min(buckets)\nprint(result)" }, { "alpha_fraction": 0.47999998927116394, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 12, "blob_id": "e1c9a5a5f55a6a89d996f6c031e82a407e76b61b", "content_id": "896b5a18df2d6a376fc438ed4705848bee82883f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = input()\nprint('1\\n0')" }, { "alpha_fraction": 0.4524714946746826, "alphanum_fraction": 0.47148290276527405, "avg_line_length": 19.30769157409668, "blob_id": "07e478c87a1bf9ae98e19879597e23fcd35a33a3", "content_id": "c393e820a6830fd2cd06c7bd863ace94c7bae03b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 37, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/06/0613/한조서열정리하고옴ㅋㅋ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = list(map(int, input().split()))\nresult = 0\nfor i in range(N-1):\n tmp = 0\n for j in range(i+1, N):\n if arr[i] > arr[j]:\n tmp += 1\n else:\n break\n if tmp > result:\n result = tmp\nprint(result)" }, { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.5259259343147278, "avg_line_length": 21.66666603088379, "blob_id": "9eed69c5e4aa7e34a28382e842de976a2869d204", "content_id": "4a26191c9a63789601a44c9aabc5453a1261c66d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/01/0126/숫자 빈도수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, D = map(int, input().split())\ncount = [0]*10\nfor i in range(1, N+1):\n for j in str(i):\n count[int(j)] += 1\nprint(count[D])" }, { "alpha_fraction": 0.4688427448272705, "alphanum_fraction": 0.48367953300476074, "avg_line_length": 23.14285659790039, "blob_id": "026a47c7470f95d4eafa3003bf65d72e93e1160e", "content_id": "a55d6cc7bf2f99d3f6171ebffa2a0c568d5257d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/10/1012/모든 순열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def perm(arr, i):\n if i == len(arr)-1:\n result.append(arr[:])\n else:\n for j in range(i, len(arr)):\n arr[i], arr[j] = arr[j], arr[i]\n perm(arr, i+1)\n arr[i], arr[j] = arr[j], arr[i]\n\narr = list(range(1, int(input())+1))\nresult = []\nperm(arr, 0)\nresult.sort()\nfor r in result: print(*r)" }, { "alpha_fraction": 0.4663677215576172, "alphanum_fraction": 0.4708520174026489, "avg_line_length": 19.18181800842285, "blob_id": "7c212d2d82f9343ea7fefebf8833825a7779c863", "content_id": "d32d308af643480b5a6601c5c1689ce512db4f39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/알고리즘/온라인저지/2021/09/0906/최소공배수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nT = int(input())\nfor t in range(T):\n A, B = map(int,input().split())\n C, D = A, B\n while A % B != 0:\n A, B = B, A % B\n print(B * (C//B) * (D//B))\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 24.5, "blob_id": "5ff55148ee570657acc29af2a02533fef31505ed", "content_id": "2bee23986f9ac5b03e49f9e95b10df7be3ad9d58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/06/0615/성 지키기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\ncastle = [list(input()) for _ in range(N)]\nrow = set()\ncol = set()\nfor i in range(N):\n for j in range(M):\n if castle[i][j] == 'X':\n row.add(i)\n col.add(j)\nprint(max(N-len(row), M-len(col)))" }, { "alpha_fraction": 0.40956342220306396, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 16.851852416992188, "blob_id": "304a204e877399af57ba8e5710d35031fd4b2386", "content_id": "a58b9822e891db72acab15cd6af7f7906a7371b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 33, "num_lines": 27, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/5차시 2. 자료구조 – 리스트, 튜플 - 연습문제 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "all = set()\n\nfor i in range(1, 10):\n for j in range(1, 10):\n all.add(i * j)\n\nall.discard(1)\n\nfor i in range(3, 82, 3):\n for j in range(7, 82, 7):\n if i in all:\n all.discard(i)\n elif j in all:\n all.discard(j)\n\nresult = [[] for _ in range(8)]\n\nfor i in range(2, 10):\n tmp = 0\n for j in range(i, 82, i):\n tmp += 1\n if j in all:\n result[i-2].append(j)\n if tmp == 9:\n break\n\nprint(result)" }, { "alpha_fraction": 0.4434782564640045, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 37.66666793823242, "blob_id": "d46f46b724b9a3f336f8acf2bcf04ad0269117bb", "content_id": "6cef050bb8a8e946af7af24c7058a9da02a45040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/08/0822/Contest Timing.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "D, H, M = map(int, input().split())\nresult = (D-11)*24*60 + (H-11)*60 + (M-11)\nprint(result if result >= 0 else -1)" }, { "alpha_fraction": 0.5293244123458862, "alphanum_fraction": 0.5567928552627563, "avg_line_length": 29.636363983154297, "blob_id": "afdd97f08677ba672abe91eff28bba9755249878", "content_id": "9047eb822e4dbf74caacb7a59760aee5f82e5f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1845, "license_type": "no_license", "max_line_length": 69, "num_lines": 44, "path": "/알고리즘/[템플릿]/Dijkstra/미로만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom heapq import heappush, heappop\n\ninput = sys.stdin.readline # 일종의 루틴 같은 것\ndy, dx = [1, 0, -1, 0], [0, 1, 0, -1] # 델타이동\n\ndef dijkstra():\n heappush(Q, (0, 0, 0)) # change, y, x\n visited[0][0] = 1 # 0, 0 출발\n while Q:\n change, y, x = heappop(Q) # heapq 특성상, 튜플등에선 맨 앞 값을 기준으로 정렬한다\n if y == x == N-1: break # 방을 바꿔가며 어찌저찌 목표지점에 도착\n delta_move(change, y, x) # 4방향 델타이동\n return change # line 11의 change를 의미함, while문을 break했을 때의 change\n\ndef delta_move(change, y, x):\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<N and not visited[ny][nx]:\n visited[ny][nx] = 1\n if room[ny][nx] == 0: # 검은 방 : 통과 불가능, 흰 방으로 바꿈\n heappush(Q, (change+1, ny, nx)) # 방 바꾸면서 가자\n else: # 흰 방 : 통과 가능\n heappush(Q, (change, ny, nx))\n\nN = int(input().rstrip())\nroom = [list(map(int, input().rstrip())) for _ in range(N)]\nvisited = [[0]*N for _ in range(N)]\nQ = []\nprint(dijkstra())\n\n\"\"\"\n핵심\n1. visited[i][j]에 대해서, 한 차원 더 방문배열을 만들면서\n \"방을 몇 번 바꾸면서 지나왔는가\"는 기록할 필요가 없다\n 어차피 i,j번 방이, 검은 방을 흰색 방으로 바꿔야 갈 수 있다면\n 방을 바꾸지 않고는, change+1 하지 않고는 해당 지점에 닿을 수 없다\n2. room[N-1][N-1]에 도달하던 말던 상관없이\n 방을 바꾸는 횟수를 최소로 하여 목표까지 도달해야 하기 때문에\n Q[0]에 집어 넣은 change의 값이 작은 것 부터 탐색한다\n 그래서 이 문제는 다익스트라다\n\"\"\"\n\n# https://www.acmicpc.net/problem/2665" }, { "alpha_fraction": 0.26778241991996765, "alphanum_fraction": 0.3012552261352539, "avg_line_length": 18.58333396911621, "blob_id": "8f863018ae81c5a8c18c4a55312dcc8ab2345523", "content_id": "f9333004ce1914d8555f56fb19f2d2d027e093b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 51, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0605/별 찍기 - 21.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n# N = 3\nif N == 1:\n tmp = ['*']\nelse:\n if N%2:\n tmp = [['*']*((N//2)+1), ['']+['*']*(N//2)]\n else:\n tmp = [['*']*(N//2), ['']+['*']*(N//2)]\nfor i in range(N):\n for t in tmp:\n print(*t)\n " }, { "alpha_fraction": 0.42578125, "alphanum_fraction": 0.4375, "avg_line_length": 29.760000228881836, "blob_id": "bd11aa1d53509053977f0630b933b88bc3f823b4", "content_id": "42e49d8d3627b2b5a02e76c9d14c53049d683ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 884, "license_type": "no_license", "max_line_length": 102, "num_lines": 25, "path": "/알고리즘/온라인저지/2021/08/0823/암호 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L, C = map(int, input().split())\narr = list(input().split())\n\nn = len(arr)\nresult = []\nfor i in range(1<<n): # 모든 암호(부분집합)들 중에서\n tmp = []\n for j in range(n):\n if i&(1<<j):\n tmp.append(arr[j])\n if len(tmp) == L: # 길이가 L이고\n if ('a' in tmp) or ('e' in tmp) or ('i' in tmp) or ('o' in tmp) or ('u' in tmp): # 모음이 하나이상 있고\n count = 0\n for tm in tmp:\n if tm in 'bcdfghjklmnpqrstvwxyz': # 자음이\n count += 1\n if count >= 2: # 두개 이상일 때\n tmp.sort() # 정렬해서\n tmp2 = ''\n for t in tmp:\n tmp2 = tmp2 + t # 단어로 만들어서\n result.append(tmp2) # 추가\nresult.sort() # 추가한 값들을 정렬\nfor res in result:\n print(res) # 출력" }, { "alpha_fraction": 0.5122873187065125, "alphanum_fraction": 0.5406427383422852, "avg_line_length": 23.090909957885742, "blob_id": "14a7acb5a0e885f230591d75bbe486596f1a885f", "content_id": "63c65bd4e38b399ee5c9286ff7aefd13d709135a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/09/0925/퇴사.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nT, P, dp = [], [], []\nfor i in range(N):\n a, b = map(int, input().rstrip().split())\n T.append(a)\n P.append(b)\n dp.append(b)\ndp.append(0)\nfor i in range(N-1, -1, -1):\n if T[i]+i > N: # 이 날 시작한 일이 퇴사일을 넘길 경우\n dp[i] = dp[i+1]\n else: # 퇴사일 전에 일을 마칠 수 있는 경우\n # 그 일을 마친 날 기준, 해당 일에 얻을 수 있는 최대 이익\n dp[i] = max(dp[i+1], P[i] + dp[i+T[i]])\nprint(dp[0])\n\n# https://www.acmicpc.net/problem/14501\n# https://pacific-ocean.tistory.com/199" }, { "alpha_fraction": 0.4275362193584442, "alphanum_fraction": 0.4492753744125366, "avg_line_length": 22.16666603088379, "blob_id": "c978fe442dfc40753c7577772655ca0dc7717d4f", "content_id": "b0ab13b81175361078d58bc389b049bec3b78fca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/06/0607/시그마.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split())\nn_max = max(a, b)\nn_min = min(a, b)\nn = n_max - n_min\ns = (n * (n + 1)) // 2\nprint(s + (n_min * (n + 1)))" }, { "alpha_fraction": 0.46060606837272644, "alphanum_fraction": 0.539393961429596, "avg_line_length": 19.75, "blob_id": "d598e479161cc6c290b07b9acb6e4cf8c628bfba", "content_id": "0505780c3b58618328f0c2a780b57edaea1bb717", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/04/0409/Bicycle.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums = [int(input()) for _ in range(5)]\na, x, b, y, T = nums\nresult = [a, b]\nA = max(0, T-30)\nB = max(0, T-45)\nresult[0] += A*x*21\nresult[1] += B*y*21\nprint(*result)" }, { "alpha_fraction": 0.4301075339317322, "alphanum_fraction": 0.44301074743270874, "avg_line_length": 21.190475463867188, "blob_id": "5794ecc16106255d36e5ffb0e94f809ecea77980", "content_id": "62cea035ba915366f744c7cec0135fdc2a67dee9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 38, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0823/A와 B.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = list(input())\nT = list(input())\nresult = 0\n# A를 추가하거나, 뒤집고 B를 추가하는 두가지만 가능\n# 뒤집어서 생각하면\n# T에서 A를 빼거나, B를 빼고 뒤집어가면서 S와 일치하는지 확인\nwhile True:\n if S == T: # 만들 수 있으면 \n result = 1 # 1\n break\n if T[-1] == 'A': # 맨 뒷글자가 A면\n T.pop() # 빼고\n if not T: # 다 빼서 T가 []가 되면\n break # 종료\n elif T[-1] == 'B': # 맨 뒷글자가 B면\n T.pop() # 빼고\n if not T: # T == []\n break # 종료\n T = T[::-1] # 뒤집고\n\nprint(result)" }, { "alpha_fraction": 0.4858299493789673, "alphanum_fraction": 0.4898785352706909, "avg_line_length": 19.66666603088379, "blob_id": "ff138b5dec70e26398ec97f58cbfb4c00e03dbb3", "content_id": "bc0518ef6ce8ac139bff759b42cf85a1e81ae49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 247, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0903/분수 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def gcd(a, b):\n while b > 0: a, b = b, a%b\n return a\n\nA, B = map(int, input().split())\nC, D = map(int, input().split())\nGCD = gcd(B, D)\nLCM = GCD * (B//GCD) * (D//GCD)\nE = A*(LCM//B) + C*(LCM//D)\nF = LCM\nGCD = gcd(E, F)\nprint(E//GCD, F//GCD)" }, { "alpha_fraction": 0.47058823704719543, "alphanum_fraction": 0.48235294222831726, "avg_line_length": 20.375, "blob_id": "41ad2a1b99efda3ebf55d6138dd53de5a467e2fc", "content_id": "4b674e4f0b78191770d60272502900b1d2a06045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 39, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/06/0608/Triathlon.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor _ in range(int(input())):\n A, D, G = map(int, input().split())\n C = A*(D+G)\n if A == D+G:\n C *= 2\n result = max(result, C)\nprint(result)" }, { "alpha_fraction": 0.5520833134651184, "alphanum_fraction": 0.5729166865348816, "avg_line_length": 11.125, "blob_id": "b0ce847b856a2d50c8685c18365bea0960a930e6", "content_id": "7555147602ad2e5b10807573ffd8473cb148b393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 26, "num_lines": 8, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/31차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 6.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = {}\n\nfor i in range(1, N+1):\n result.update({i:i*i})\n\nprint(result)" }, { "alpha_fraction": 0.5396419167518616, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 29.153846740722656, "blob_id": "6d55bc0f1c426a8b9e25e7dc0b2705df8df0c1ea", "content_id": "a504f7b1a131bc599c52f2d28d771db8ce7ef5f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/알고리즘/온라인저지/2023/01/0114/텔레프라임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def prime_list(size):\n sieve = [True]*(size+1)\n for i in range(2, int(size**0.5)+1):\n if sieve[i]:\n for j in range(i+i, size+1, i):\n sieve[j] = False\n return [i for i in range(2, size+1) if sieve[i]]\n\nold, new = input().split()\nnew = int(new+old)\nold = int(old)\nprimes = prime_list(int(1e7))\nprint('Yes' if old in primes and new in primes else 'No')" }, { "alpha_fraction": 0.45781466364860535, "alphanum_fraction": 0.46473029255867004, "avg_line_length": 33.47618865966797, "blob_id": "b66fa1c79978d5f3526c6d26667576eaa4f5a9cb", "content_id": "70ea43d18493bdb22ee55fdea47dad285faf1747", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 947, "license_type": "no_license", "max_line_length": 71, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/09/0908/마니또.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "case = 1 # 테스트 케이스\nwhile True:\n N = int(input()) # 사람 수\n if N == 0: break # while 종료\n result = [] # 결과를 담을 배열\n for n in range(N):\n A, B = input().split() # 사람 A, 사람 B\n flag = False # 이미 있는 그룹인지 확인할 것\n for i in range(len(result)): # 마니또 그룹들 중에서\n if A in result[i] or B in result[i]: # 둘 중 하나라도 속하는 그룹이 있다면\n flag = True # 둘 다 결국 같은 그룹\n # set()로 중복처리하면서 사람들을 다 추가할 것\n result[i].add(A)\n result[i].add(B)\n if not flag: # 둘 다 속한 마니또그룹이 없으면\n result.append(set()) # 그룹 추가\n # 사람들 추가\n result[-1].add(A)\n result[-1].add(B)\n print(case, len(result)) # 출력\n case += 1 # 다음 케이스" }, { "alpha_fraction": 0.6487804651260376, "alphanum_fraction": 0.6487804651260376, "avg_line_length": 21.88888931274414, "blob_id": "4e298261d501b46768387138f205e6f9478322d3", "content_id": "35838888439273c23c5bcea09c070a224a3e2e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 46, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/05/0513/빅데이터 정보보호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "input()\nS = input()\nbigdata, security = S.count('b'), S.count('s')\nresult = 'bigdata? security!'\nif bigdata > security:\n result = 'bigdata?'\nif bigdata < security:\n result = 'security!'\nprint(result)" }, { "alpha_fraction": 0.5222222208976746, "alphanum_fraction": 0.5444444417953491, "avg_line_length": 10.375, "blob_id": "c5ca30f1159baf3e0f52aa88315cc76c79e7cdce", "content_id": "10cd16d61291bfde9bf077d8920231e5a3be715b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 23, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0209/나머지와 몫이 같은 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 0\n\nfor n in range(1, N):\n result += N * n + n\n\nprint(result)" }, { "alpha_fraction": 0.48404255509376526, "alphanum_fraction": 0.5106382966041565, "avg_line_length": 17.899999618530273, "blob_id": "6284f1d16b5fdb96c570593733abc628c28c1b2c", "content_id": "6a15fcaa6c84350c0702f6e59de77f95da8b1c48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/15차시 2. 자료구조 – 리스트, 튜플 - 연습문제 17.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import math\nfrom math import pi\n\nx = list(map(int, input().split(', ')))\n\nfor i in x:\n if i == x[-1]:\n print(round(2*pi*i, 2))\n else:\n print(round(2*pi*i, 2), end=', ')" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 41.5, "blob_id": "b87ce35f7bcca3781963da20eb25a75eae60cc8c", "content_id": "fb992ead3d912680f659de9b016e792100b27bcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0803/N번째 큰 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n print(sorted(list(map(int, input().split())))[-3])" }, { "alpha_fraction": 0.6113074421882629, "alphanum_fraction": 0.6466431021690369, "avg_line_length": 20.846153259277344, "blob_id": "e078c5b357834ad5ddec44806f3f7258212c18dd", "content_id": "d45d7b03417b1e995045c02b6b663957f23a8816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 63, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0927/점수 계산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nscores = []\nfor i in range(1, 9): scores.append([int(input().rstrip()), i])\nscores.sort(key=lambda x:-x[0])\nresult = [0]\nfor score in scores[:5]:\n result[0] += score[0]\n result.append(score[1])\nprint(result[0])\nprint(*sorted(result[1:]))" }, { "alpha_fraction": 0.6132264733314514, "alphanum_fraction": 0.6432865858078003, "avg_line_length": 30.25, "blob_id": "06560b7b8febe621f12edc8b9953ec9beeb50a9a", "content_id": "a643fca562ba1f7faff32c4078ff86e8701c9cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0721/카우버거.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "B, C, D = map(int, input().split())\nburger = list(map(int, input().split()))\nside = list(map(int, input().split()))\ndrink = list(map(int, input().split()))\nburger.sort(reverse=True)\nside.sort(reverse=True)\ndrink.sort(reverse=True)\nmin_set = min(len(burger), len(side), len(drink))\nresult1 = sum(burger) + sum(side) + sum(drink)\nprint(result1)\nresult2 = result1\nfor i in range(min_set):\n result2 -= burger[i] * 0.1\n result2 -= side[i] * 0.1\n result2 -= drink[i] * 0.1\nprint(f'{result2:.0f}')" }, { "alpha_fraction": 0.4595959484577179, "alphanum_fraction": 0.502525269985199, "avg_line_length": 19.894737243652344, "blob_id": "923a6ece9e13afe4039da11aca1ae3d0d5c3a066", "content_id": "f068a3cc2301a75db56adc5d254709600f7b352f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 38, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/01/0112/부재중 전화.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, L, D = map(int, input().split())\nsong_len_max = N*L + (N-1)*5\nring_len_max = (song_len_max//D + 2)*D\narr1 = [0]*ring_len_max\narr2 = [0]*ring_len_max\ni = 0\nfor n in range(N):\n for l in range(L):\n arr1[i] = 1\n i += 1\n i += 5\ni = 0\nwhile i < ring_len_max:\n arr2[i] += 1\n i += D\nfor i in range(ring_len_max):\n if not arr1[i] and arr2[i]:\n print(i)\n break" }, { "alpha_fraction": 0.5312855243682861, "alphanum_fraction": 0.5711035132408142, "avg_line_length": 22.157894134521484, "blob_id": "a1d70113e03cad99468197ae840eb4a3bf77bd7d", "content_id": "820462ac5123397507d7eb36808454f54fd40c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 66, "num_lines": 38, "path": "/알고리즘/온라인저지/2022/10/1006/내리막 길.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef dfs(y, x):\n if y == N-1 and x == M-1: return 1\n if visited[y][x] != -1: return visited[y][x]\n visited[y][x] = 0\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M:\n if arr[ny][nx] < arr[y][x]:\n visited[y][x] += dfs(ny, nx)\n return visited[y][x]\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nvisited = [[-1]*M for _ in range(N)]\nprint(dfs(0, 0))\n\n\"\"\"\n탐색하면서 처리하는 방문 배열이 그냥 배열이 아닌 DP배열이다\n1.방문배열 초기화는 -1\n2.방문처리는 0\n3.해당 지점을 경유하여 목적지에 도달 가능한 경로 경우의 수\n등으로 방문배열을 채워나간다\n3번이 채워지는 원리가 신박하다\n목적지에 도달할 경우 line 7을 통해서\n지나온 모든 경로에 1을 더해준다\n결과적으로 시작점에서 봤을 때\n목적지까지 갈 수 있는 경로의 수가 visited[0][0]에 들어오게 된다\n\n<참고한 링크>\nhttps://ca.ramel.be/70\n\"\"\"\n\n# https://www.acmicpc.net/problem/1520" }, { "alpha_fraction": 0.4149560034275055, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 19.696969985961914, "blob_id": "03a49c42fbbf838e945fa3e9afba076faae20fdf", "content_id": "a10d8d54523e780736fbbf23ca8e8c896ba10d83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/알고리즘/SWEA/LEARN/Course/3. Programming Advanced/2. 파이썬 SW문제해결 응용 구현 02 완전 검색/3차시 2일차 - 최소합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dx = [1, 0]\ndy = [0, 1]\n\ndef dfs(x, y, tmp):\n global result\n if x == N-1 and y == N-1:\n if tmp < result:\n result = tmp\n\n if tmp > result:\n return\n\n for i in range(2):\n visited[y][x] = 1\n nx = x+dx[i]\n ny = y+dy[i]\n if 0<=nx<N and 0<=ny<N and visited[ny][nx] == 0:\n dfs(nx, ny, tmp+arr[ny][nx])\n visited[y][x] = 0\n\nTC = int(input())\n\nfor tc in range(1, TC+1):\n N = int(input())\n arr = []\n for n in range(N):\n arr.append(list(map(int, input().split())))\n \n visited = [[0]*N for _ in range(N)]\n\n result = 99999999\n dfs(0, 0, arr[0][0])\n print('#{} {}'.format(tc, result))" }, { "alpha_fraction": 0.631147563457489, "alphanum_fraction": 0.6434426307678223, "avg_line_length": 23.5, "blob_id": "d5afc7a1eff0d28be6d0ecc2d95fcc5fece9ba38", "content_id": "f36655b0d474b9897db3e1ac6ca2a93cd3c61cb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/11/1115/멀티탭 충분하니.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().rstrip().split())\nmulti_taps = list(map(int, input().rstrip().split()))\nresult = 0\nfor multi_tap in multi_taps:\n result += (multi_tap+1)//2\nprint('YES' if result >= N else 'NO')" }, { "alpha_fraction": 0.5072463750839233, "alphanum_fraction": 0.52173912525177, "avg_line_length": 24.875, "blob_id": "db041a3de5ca13a121bf7e3874c9b158945e489a", "content_id": "c8aefa21b27dd2bd48461e3c4ed514df90a6ca31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/알고리즘/온라인저지/2021/08/0816/단어순서 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(1, T+1):\n sentence = list(input().split())\n sentence = sentence[::-1]\n print(f'Case #{t}: ', end='')\n for sen in sentence:\n print(sen, end=' ')\n print()\n" }, { "alpha_fraction": 0.4270557165145874, "alphanum_fraction": 0.4482758641242981, "avg_line_length": 24.200000762939453, "blob_id": "0950086e4df027a87da9f72a88612c0b44f783c9", "content_id": "0c19491eebbb31027b188c1e05a90b4700ce7b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0726/카약.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "R, C = map(int, input().split())\nresult = [0] * 9\narr = [input() for _ in range(R)]\nrank = 1\nfor c in range(C-2, 0, -1):\n goal = False\n for r in range(R):\n if arr[r][c] != '.':\n tmp = int(arr[r][c])-1\n if not result[tmp]:\n result[tmp] += rank\n goal = True\n if goal:\n rank += 1\nfor r in result: print(r)" }, { "alpha_fraction": 0.4096728265285492, "alphanum_fraction": 0.4267425239086151, "avg_line_length": 31.720930099487305, "blob_id": "eff56f39e6216551a91040ec11b88dbc94b7e175", "content_id": "99b9cf72495fae81c4f766c8c247b5a1ab52497a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 62, "num_lines": 43, "path": "/알고리즘/온라인저지/2022/05/0501/사탕 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nN = int(sys.stdin.readline())\narr = [list(map(str, input())) for _ in range(N)]\nresult = 0 # 결과 최대값\n\ndef check(arr): # 현재 배열에서 최대값을 확인함\n cnt = 0 # 배열에서 확인할 최대값 초기화\n for i in range(N):\n row = 1 # 행, 가로\n col = 1 # 열, 세로\n # 최대길이를 파악하는 방향은 한 쪽 방향이다\n # 하지만 배열의 모든 좌표에 대해서 진행하므로 문제없음!\n for j in range(N-1): \n if arr[i][j] == arr[i][j+1]: # 연속할 경우\n row += 1 # 길이++\n else: # 연속하지 않을 경우 다시 1부터\n cnt = max(cnt, row) # 갱신 한 번 해주고\n row = 1\n \n if arr[j][i] == arr[j+1][i]: # 연속할 경우\n col += 1 # 길이++\n else: # 연속하지 않을 경우 다시 1부터\n cnt = max(cnt, col) # 갱신 한 번 해주고\n col = 1\n cnt = max(cnt, row, col) # 기존cnt, 행, 열의 각각 값들 중 최대값 갱신\n return cnt\n\nfor i in range(N):\n for j in range(N-1):\n # 위치를 바꿔서 같으면 옮길 필요가 없음\n # 가로 옮겨보고\n if arr[i][j] != arr[i][j+1]:\n arr[i][j], arr[i][j+1] = arr[i][j+1], arr[i][j]\n result = max(result, check(arr)) # 최대값 갱신\n arr[i][j], arr[i][j+1] = arr[i][j+1], arr[i][j]\n # 세로 옮겨보고\n if arr[j][i] != arr[j+1][i]:\n arr[j][i], arr[j+1][i] = arr[j+1][i], arr[j][i]\n result = max(result, check(arr)) # 최대값 갱신\n arr[j][i], arr[j+1][i] = arr[j+1][i], arr[j][i]\n \nprint(result) # 최대값 출력" }, { "alpha_fraction": 0.41886791586875916, "alphanum_fraction": 0.4226415157318115, "avg_line_length": 16.733333587646484, "blob_id": "13d4e056522da7a1ab73c6acbb5bb962584bb156", "content_id": "a24058580efd4ebe8cfa97031cf4552245cf5044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 33, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/10/1024/1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nwhile True:\n try:\n N = int(input().rstrip())\n num = ''\n while True:\n num += '1'\n if not int(num)%N:\n print(len(num))\n break\n except:\n break" }, { "alpha_fraction": 0.6846153736114502, "alphanum_fraction": 0.6846153736114502, "avg_line_length": 20.83333396911621, "blob_id": "61fdbf44f745f827825cec12a425775c956e1fb1", "content_id": "1692136e8c7d63df93d433c17a60cf8696c13433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/알고리즘/온라인저지/2021/08/0814/숫자의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nstr_numbers = input()\nnumbers = []\nfor number in str_numbers:\n numbers.append(int(number))\nprint(sum(numbers))" }, { "alpha_fraction": 0.4690265357494354, "alphanum_fraction": 0.5132743120193481, "avg_line_length": 17.91666603088379, "blob_id": "a7774dbeb12de0cfd5f0d62c89b9e8f1af0caaea", "content_id": "1fc940e0e8e0a452e53152b2f0d569dc9cb3de6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0627/가장 많은 글자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ninput = sys.stdin.read\n\nS = input().replace('\\n', '').replace(' ', '')\nA = [0] * 26\nfor s in S:\n A[ord(s)-97] += 1\nresult = ''\nfor i in range(26):\n if A[i] == max(A):\n result += chr(i+97)\nprint(result)" }, { "alpha_fraction": 0.44999998807907104, "alphanum_fraction": 0.5, "avg_line_length": 19.14285659790039, "blob_id": "2b46e39cc66ae1e35dcac213f6bd4b7fd67fc808", "content_id": "83160b63ed88087627e24e4076fe7dcb2c6ba1fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/11/1128/Fan Death.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = set()\nfor n in range(1, int(N**0.5)+1):\n if not N%n:\n arr.add(n)\n arr.add(N//n)\nprint(sum(arr)*5-24)" }, { "alpha_fraction": 0.35854342579841614, "alphanum_fraction": 0.39355742931365967, "avg_line_length": 27.600000381469727, "blob_id": "3b1968678fe2e4a856e62524966e1638eb5f8d4a", "content_id": "3dabdc7f6d8b5db1bae7cc5f2170f0863cb442cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 57, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/09/0913/겉넓이 구하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\nN, M = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\nzero = 0\nresult = 0\nfor i in range(N):\n for j in range(M):\n if arr[i][j] == 0: \n zero += 1\n continue\n now = arr[i][j]\n while now > 0:\n face = 4 # 도형의 면\n if now == arr[i][j]: face += 1\n y, x = i, j\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k]\n if 0<=ny<N and 0<=nx<M:\n if arr[ny][nx] >= now:\n face -= 1\n result += face\n now -= 1 \nprint(result + N*M - zero)\n\n# https://www.acmicpc.net/problem/16931" }, { "alpha_fraction": 0.3983488082885742, "alphanum_fraction": 0.4241486191749573, "avg_line_length": 23.871795654296875, "blob_id": "0f7533770f36470f311fd00597c8e4f0d14b5d4e", "content_id": "f1bb2b8be5d7b200bf8e1e5c53cc1bf2d7d1b042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 63, "num_lines": 39, "path": "/알고리즘/온라인저지/2023/03/0326/부등호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nsign = input().split()\nvisited = [0]*10 # 0~9\nmaxx, minn = \"\", \"\"\n\ndef check(a, b, K):\n # 1<2 : True\n # '1'<'2' : True\n if K == '<':\n return a<b # T or F\n if K == '>':\n return a>b # T or F\n\ndef recur(idx, num):\n global maxx, minn\n\n if idx == K+1:\n if not minn: # 가장 먼저 부등호를 완성한 숫자가 최소값\n minn = num\n else: # 최소값을 찾았을 경우, 이후 나오는 숫자들로 최대값 갱신\n maxx = num\n return # 아래 for문 스킵\n \n for i in range(10):# 0부터 9까지 \n if not visited[i]:\n # 디버깅\n # if idx != 0:\n # print(*list(num))\n # print('', *sign[:len(num)-1])\n # print(num[-1], sign[idx-1], i)\n # print()\n if idx == 0 or check(num[-1], str(i), sign[idx-1]):\n visited[i] = 1 # visited 찍고\n recur(idx+1, num+str(i)) # 재귀\n visited[i] = 0 # visited 풀고\n\nrecur(0, \"\")\nprint(maxx)\nprint(minn)" }, { "alpha_fraction": 0.5955334901809692, "alphanum_fraction": 0.6054590344429016, "avg_line_length": 24.25, "blob_id": "68a6e018e663eb982d1eabf625c83c6169e6c248", "content_id": "e0cb7ed285af67c14e5d46d3fdec392e1251b184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 51, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/10/1012/결혼식.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = int(input().rstrip()), int(input().rstrip())\nresult = set()\nfriends = [[] for _ in range(N+1)]\nfor m in range(M):\n a, b = map(int, input().rstrip().split())\n friends[a].append(b)\n friends[b].append(a)\nfor friend in friends[1]:\n if friend != 1: result.add(friend)\n for f in friends[friend]:\n if f != 1: result.add(f)\nprint(len(result))" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5476190447807312, "avg_line_length": 23.14285659790039, "blob_id": "6efab427f96bb5b28fc2b3725472540debacbc24", "content_id": "68db41f4fd197a5828bbaa97b184754efc3b0e81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/02/0213/Hard choice.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nD, E, F = map(int, input().split())\nresult = 0\nif A < D: result += D-A\nif B < E: result += E-B\nif C < F: result += F-C\nprint(result)" }, { "alpha_fraction": 0.44144144654273987, "alphanum_fraction": 0.477477490901947, "avg_line_length": 21.299999237060547, "blob_id": "f5aab2e47674738ce8a2b4f19b23a49541cb49fe", "content_id": "527f9082c4d4ae09ca5bad082fde6232ae9f2356", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/07/0727/공 포장하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "B = list(map(int, input().split())) # balls\nresult = tmp = min(B)\nfor i in range(3):\n B[i] -= tmp\n if B[i]:\n result += B[i]//3\n B[i] = B[i]%3\nL = [0, 1, 1, 2, 2] # left\nresult += L[sum(B)]\nprint(result)" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.4912280738353729, "avg_line_length": 11.052631378173828, "blob_id": "e931bd5324ad62edf679af734eee4071fd8f5a63", "content_id": "a93b9af30875fe75e014aa075a63b4894967bca0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 19, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/12/1216/개표.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "V = int(input())\n\nvotes = input()\n\navote = 0\nbvote = 0\n\nfor v in votes:\n if v == 'A':\n avote += 1\n else:\n bvote += 1\n\nif avote > bvote:\n print('A')\nelif bvote > avote:\n print('B')\nelse:\n print('Tie')" }, { "alpha_fraction": 0.4975288212299347, "alphanum_fraction": 0.51729816198349, "avg_line_length": 19.965517044067383, "blob_id": "2f8509ba839ca9c2bee8defb38e9a574aaa5e282", "content_id": "b3299775651bb70fc39fbb1ff50ae1470aa3d6e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 42, "num_lines": 29, "path": "/알고리즘/온라인저지/2022/09/0918/누울 자리를 찾아라.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef lay_check(arr):\n cnt = 0\n for i in range(N):\n size = 0\n for j in range(N):\n if arr[i][j] == '.': size += 1\n else:\n if size>=2: cnt += 1 \n size = 0\n if size>=2: cnt += 1\n result.append(cnt)\n\nN = int(input().rstrip())\narr = [input().rstrip() for _ in range(N)]\nrotated_arr = []\nresult = []\nlay_check(arr)\nfor j in range(N):\n tmp = ''\n for i in range(N):tmp += arr[i][j]\n rotated_arr.append(tmp)\nlay_check(rotated_arr)\nprint(*result)\n\n# https://www.acmicpc.net/problem/1652" }, { "alpha_fraction": 0.3913043439388275, "alphanum_fraction": 0.43478259444236755, "avg_line_length": 22.5, "blob_id": "c918e74bf5070db4fa9b5749a001e13aea856f58", "content_id": "b37021c44aa5d293946be4bf1ac24cb1d9edc18e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/06/0601/Робинзон Крузо.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprint('V'*(N//5) + 'I'*(N%5))" }, { "alpha_fraction": 0.45483359694480896, "alphanum_fraction": 0.49286845326423645, "avg_line_length": 14.023809432983398, "blob_id": "de6e50f47d4beb06b171f8efb430021b74c431a9", "content_id": "16102c2cb104c07a67a5e02beb8f56200af909c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 34, "num_lines": 42, "path": "/알고리즘/온라인저지/2022/01/0116/달팽이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 상우하좌\n\ndy = (-1, 0, 1, 0)\ndx = (0, 1, 0, -1)\n\n\"\"\"\nN이 홀수일때 짝수일때 각각 스타트포인트 잡아서\n델타 이동으로 배열을 채우면서\nK의 좌표를 찾아서 저장 후\n배열 다 채우고 배열 출력\nK의 좌표 출력\n\"\"\"\n\nN = int(input())\nK = int(input())\n\nstart_x = start_y = N//2\n\narr = [[0] * N for _ in range(N)]\n\narr[start_y][start_x] = 1\n\ndir = 0\na = b = 0\n\nfor i in range(2, N**2 + 1):\n start_x = start_x + dx[dir]\n start_y = start_y + dy[dir]\n\n arr[start_y][start_x] = i\n if i == K:\n a = start_y\n b = start_x\n\n tp_x = start_x + dx[(dir+1)%4]\n tp_y = start_y + dy[(dir+1)%4]\n if not arr[tp_y][tp_x]:\n dir = (dir+1)%4\n\nfor ar in arr:\n print(*ar)\nprint(a+1, b+1) " }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 38.25, "blob_id": "63ecef5e152618b9d7235593cf70011bb53e205e", "content_id": "8a3110d3ba02a71c216274ca12c34588e9630ed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 42, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/08/0822/Which Alien.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = int(input()), int(input())\nif N>=3 and M<=4: print('TroyMartian')\nif N<=6 and M>=2: print('VladSaturnian')\nif N<=2 and M<=3: print('GraemeMercurian')" }, { "alpha_fraction": 0.37226277589797974, "alphanum_fraction": 0.43795621395111084, "avg_line_length": 9.615385055541992, "blob_id": "3aa8fc59e05056b739e44176f099e12bea154b3b", "content_id": "f57e1d95598c7fe21e78206aabeffbc5636e0f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 32, "num_lines": 13, "path": "/알고리즘/온라인저지/2021/12/1213/오븐 시계.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nC = int(input())\n\nB += C\n\nwhile B >= 60:\n A += 1\n B -= 60\n\nwhile A >= 24:\n A -= 24\n\nprint(A, B)" }, { "alpha_fraction": 0.43923240900039673, "alphanum_fraction": 0.4648187756538391, "avg_line_length": 25.11111068725586, "blob_id": "0444435160a7b188ae9f1669ae6ea59875494cde", "content_id": "d97f7129a9f8d908c353630127d807ec40727d31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/06/0629/역사.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\narr = [[0]*N for _ in range(N)]\nfor m in range(M):\n a, b = map(int, input().split())\n arr[a-1][b-1] = 1\nfor k in range(N):\n for a in range(N):\n for b in range(N):\n if arr[a][k] and arr[k][b]:\n arr[a][b] = 1\nfor s in range(int(input())):\n a, b = map(int, input().split()) \n result = 0\n if arr[a-1][b-1]:\n result = -1\n elif arr[b-1][a-1]:\n result = 1\n print(result)" }, { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.517241358757019, "avg_line_length": 29, "blob_id": "9a5efa2adf415e3529731e08057e9b907438940e", "content_id": "58b612214433657cd93f0c755b081f6b72e4c8d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(input(), '\\n1', sep='')" }, { "alpha_fraction": 0.3238866329193115, "alphanum_fraction": 0.4048582911491394, "avg_line_length": 19.66666603088379, "blob_id": "49fb93bcdf813804e9c7e3a59962d99f53b3352e", "content_id": "49d52541f15d698e1b1747ad7e0a21556e901b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 247, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0608/날짜 계산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "E, S, M = map(int, input().split())\ne, s, m = 1, 1, 1\nresult = 1\nwhile (E, S, M) != (e, s, m):\n e, s, m, result = e+1, s+1, m+1, result+1\n if e > 15:\n e -= 15\n if s > 28:\n s -= 28\n if m > 19:\n m -= 19\nprint(result)" }, { "alpha_fraction": 0.4895833432674408, "alphanum_fraction": 0.5104166865348816, "avg_line_length": 18.399999618530273, "blob_id": "b8c6cf6f4868450d5e6fc7163152b793bcb29ab5", "content_id": "10ada1d6d7f292fade523ca3f2e22409ba86ecce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/04/0404/골뱅이 찍기 - ㄴ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nA = '@'*N\nB = '@'*N*5\nfor _ in range(N*4): print(A)\nfor _ in range(N): print(B)" }, { "alpha_fraction": 0.4689265489578247, "alphanum_fraction": 0.5819209218025208, "avg_line_length": 18.77777862548828, "blob_id": "90b991c4aaec2c8237d80882a5e922cc5b7ad393", "content_id": "0d79f3edf4f4b0339ebc88136a43287911e2ec3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/09/0924/피보나치 수 7.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = [0, 1] + [0]*int(1e7)\nfor i in range(2, int(1e7)):\n dp[i] = (dp[i-1] + dp[i-2])%1000000007\nprint(dp[N])" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.550000011920929, "avg_line_length": 40, "blob_id": "25320a376f968d5665ab580f85c95e7cb634f43f", "content_id": "fdf095285bc58fe316824fe78fc545d918c3d9ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 40, "num_lines": 1, "path": "/알고리즘/온라인저지/2023/05/0525/연세여 사랑한다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(abs(ord('I') - ord(input())) + 84)" }, { "alpha_fraction": 0.45145630836486816, "alphanum_fraction": 0.4805825352668762, "avg_line_length": 19.700000762939453, "blob_id": "3e8b8ac326a8e9b9496253ea6028961509c3f71f", "content_id": "ae48678ee8951689ee285d6d3b1893bc1f686660", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/03/0307/Telemarketer or not.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input())\nB = int(input())\nC = int(input())\nD = int(input())\nresult = 0\nif A == 8 or A == 9:\n if D == 8 or D == 9:\n if C == B:\n result = 1\nprint('ignore' if result else 'answer')" }, { "alpha_fraction": 0.5178267955780029, "alphanum_fraction": 0.5432937145233154, "avg_line_length": 25.81818199157715, "blob_id": "9a925dfadfcceee1232c6bd95c91e977cb6debb5", "content_id": "431dab2af24902a5bb5522932a290f45a46e11fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/10/1002/점프왕 쩰리 (Small).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy, dx = [1, 0], [0, 1] # down, right\n\nN = int(input().rstrip())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nvisited = [[0]*N for _ in range(N)]\nQ = deque()\nQ.append((0, 0))\nvisited[0][0] = 1\nwhile Q:\n y, x = Q.popleft()\n if y == x == N-1: print('HaruHaru'); break\n jump = arr[y][x]\n for i in range(2):\n ny, nx = y+dy[i]*jump, x+dx[i]*jump\n if 0<=ny<N and 0<=nx<N and not visited[ny][nx]:\n Q.append((ny, nx))\n visited[ny][nx] = 1\nelse: print('Hing')" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5844155550003052, "avg_line_length": 18.375, "blob_id": "59f2a147b4f30cd611c1aed4b4f5bd3c2b19dc26", "content_id": "075e3d74778d18a082f4728e126641b58522499c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/06/0614/Terms of Office.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "X = int(input())\nY = int(input())\nresult = []\nwhile X<=Y:\n result.append(X)\n X += 60\nfor r in result:\n print(f'All positions change in year {r}')" }, { "alpha_fraction": 0.39401495456695557, "alphanum_fraction": 0.39650872349739075, "avg_line_length": 21.33333396911621, "blob_id": "b8342aac27029be3feb201ce23223cdb1f5b2497", "content_id": "49da4f87e2b5b62de30dfe68753a8e419bdb7aa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 27, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/06/0603/계산기 프로그램.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nwhile True:\n order = input()\n if order == '=':\n print(result)\n break\n elif order in '+-*/':\n num = int(input())\n if order == '+':\n result += num\n elif order == '-':\n result -= num\n elif order == '*':\n result *= num\n elif order == '/':\n result //= num\n else:\n result = int(order)" }, { "alpha_fraction": 0.4428044259548187, "alphanum_fraction": 0.468634694814682, "avg_line_length": 13.315789222717285, "blob_id": "f1c7939216383a759564141cee45f9acd79ec359", "content_id": "324bc01165c18301cd23e57547b71e1ef02deeb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 25, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/02/0202/홀수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "summ = 0\nminn = 100\nis_odd = False\n\nfor _ in range(7):\n num = int(input())\n\n if num%2:\n if num < minn:\n minn = num\n summ += num\n if not is_odd:\n is_odd = True\n\nif is_odd:\n print(summ)\n print(minn)\nelse:\n print(-1)" }, { "alpha_fraction": 0.4083601236343384, "alphanum_fraction": 0.5112540125846863, "avg_line_length": 14.600000381469727, "blob_id": "922aab98faeb3f187637a572273b34bfb2c699c5", "content_id": "4233d6e3a376f1e84c11e9a7ba29844a1ac9c91a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 60, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/01/0131/2007년.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1월 1일은 월요일\nx월y일이 1년중 몇번째 날인지 구해서\nmod 7\n\"\"\"\n\nx, y = map(int, input().split())\n\nday = y-1\n\nif x > 1:\n month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n for i in range(x-1):\n day += month[i]\n\nweek = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\n\nweekday = day % 7\n\nprint(week[weekday])" }, { "alpha_fraction": 0.3866666555404663, "alphanum_fraction": 0.41777777671813965, "avg_line_length": 21.600000381469727, "blob_id": "1e4e93491a0ad54e41f6bef17b123ec3824b2adf", "content_id": "19fb859599c4ff8adb51f9abfaa7902a8f5cbf71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/12/1214/전북대학교.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nif not N%2:\n print('I LOVE CBNU')\nelse:\n print('*'*N)\n space = N//2\n print(' '*space + '*')\n for _ in range(N//2):\n space -= 1\n print(' '*space + '*'+' '*(((N//2)-space)*2-1)+'*')" }, { "alpha_fraction": 0.4701492488384247, "alphanum_fraction": 0.503731369972229, "avg_line_length": 25.899999618530273, "blob_id": "253cdc08615cd0b512edb5495c66af2d3dea533d", "content_id": "77e65dfe341c120ae8b96a7b4532a60b542bb290", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/06/0629/정수 삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = []\nfor n in range(N):\n tmp = [0] + list(map(int, input().split())) + [0]\n arr.append(tmp)\nfor i in range(1, len(arr)):\n for j in range(1, len(arr[i])-1):\n tmp = arr[i-1][j-1:j+1]\n arr[i][j] += max(tmp)\nprint(max(arr[N-1]))" }, { "alpha_fraction": 0.5648535490036011, "alphanum_fraction": 0.5648535490036011, "avg_line_length": 29, "blob_id": "f62db0002cfb38af8f6b306601e4e0571bf435e8", "content_id": "1624de62176ce8987f705c340191854b2512eda3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 49, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0511/Gnome Sequencing.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print('Gnomes:')\nfor i in range(int(input())):\n tmp = list(map(int, input().split()))\n A, B = sorted(tmp), sorted(tmp, reverse=True)\n result = 'Unordered'\n if (tmp == A or tmp == B):\n result = 'Ordered'\n print(result)" }, { "alpha_fraction": 0.42489269375801086, "alphanum_fraction": 0.4849785268306732, "avg_line_length": 13.5625, "blob_id": "79fb8f5445a103d423c659d163b5c05ea05a5ed4", "content_id": "65a7d37d19f3761289a19ddea3f19d2b678e45b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 22, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0212/대표값.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dp = [0] * (101)\nsumm = 0\nmode = 0\nmaxx = 0\n\nfor i in range(10):\n num = int(input())\n summ += num\n tmp = num//10\n dp[tmp] += 1\n if dp[tmp] > maxx:\n maxx = dp[tmp]\n mode = num\n\nprint(summ//10)\nprint(mode)\n" }, { "alpha_fraction": 0.47252747416496277, "alphanum_fraction": 0.593406617641449, "avg_line_length": 17.266666412353516, "blob_id": "6794d53ef724942b96a757a3175d018697dbc51c", "content_id": "ed6acb1c3707dbc92a48a87e54463165b8256a7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/27차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "phone = {\n '홍길동': '010-1111-1111',\n '이순신': '010-1111-2222',\n '강감찬': '010-1111-3333',\n}\n\nprint('아래 학생들의 전화번호를 조회할 수 있습니다.')\nfor name in phone:\n print(name)\n\nprint('전화번호를 조회하고자 하는 학생의 이름을 입력하십시오.')\n\ntmp = input()\n\nprint('{}의 전화번호는 {}입니다.'.format(tmp, phone[tmp]))" }, { "alpha_fraction": 0.4720279574394226, "alphanum_fraction": 0.5262237787246704, "avg_line_length": 18.724138259887695, "blob_id": "0354f11f81581962fb45da14e33130478adaf459", "content_id": "27d3003d4bdbe29d2f8fb37d261bd9d0e1e2e7d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 49, "num_lines": 29, "path": "/알고리즘/온라인저지/2021/12/1216/주사위 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nb = []\n\nfor n in range(N):\n\n a = list(map(int, input().split()))\n\n dice_num_count = [0 for _ in range(6)]\n\n for i in a:\n dice_num_count[i-1] += 1\n\n same_or_max = 0\n result = 0\n\n if 3 in dice_num_count:\n same_or_max = dice_num_count.index(3) + 1\n result = 10000 + same_or_max * 1000\n elif 2 in dice_num_count:\n same_or_max = dice_num_count.index(2) + 1\n result = 1000 + same_or_max * 100\n else:\n same_or_max = max(a)\n result = same_or_max * 100\n\n b.append(result)\n\nprint(max(b))\n" }, { "alpha_fraction": 0.5839999914169312, "alphanum_fraction": 0.5839999914169312, "avg_line_length": 30.5, "blob_id": "16acc665d09f482b029015338164b1b4238dcdf9", "content_id": "0200e9845c3719ce8ca2a36a90dac62e9991f9ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 40, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/05/0513/헬멧과 조끼.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nA = max(list(map(int, input().split())))\nB = max(list(map(int, input().split())))\nprint(A+B)" }, { "alpha_fraction": 0.48040884733200073, "alphanum_fraction": 0.5332197546958923, "avg_line_length": 24.565217971801758, "blob_id": "a7b1ad3bd5f6f3aace344e928f3a49bc1f52287c", "content_id": "4b542233ffd8b4d91ce9879265dba67ece455330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/10/1006/데스 나이트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy = [-2, -2, 0, 0, 2, 2]\ndx = [-1, 1, -2, 2, -1, 1]\n\nN = int(input().rstrip())\nr1, c1, r2, c2 = map(int, input().rstrip().split())\nvisited = [[0]*N for _ in range(N)]\nQ = deque()\nQ.append((r1, c1, 0))\nvisited[r1][c1] = 1\nresult = -1\nwhile Q:\n y, x, move = Q.popleft()\n if y == r2 and x == c2: result = move; break\n for i in range(6):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<N and not visited[ny][nx]:\n Q.append((ny, nx, move+1))\n visited[ny][nx] = 1\nprint(result)" }, { "alpha_fraction": 0.4341636896133423, "alphanum_fraction": 0.4590747356414795, "avg_line_length": 11.260869979858398, "blob_id": "d4906c3d32a50e78c72d07b6d4757dbb4504e31c", "content_id": "ebfb47b421a99388f94e3edbc9dfe08b03977fe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 26, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/02/0205/방학 숙제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L = int(input())\nA = int(input())\nB = int(input())\nC = int(input())\nD = int(input())\n\nmaxx = 0\n\nA_by_C = 0\nif A%C == 0:\n A_by_C = A//C\nelse:\n A_by_C = A//C + 1\n\nB_by_D = 0\nif B%D == 0:\n B_by_D = B//D\nelse:\n B_by_D = B//D + 1\n\nmaxx = max(A_by_C, B_by_D)\n\nprint(L - maxx)" }, { "alpha_fraction": 0.5180723071098328, "alphanum_fraction": 0.5397590398788452, "avg_line_length": 19.799999237060547, "blob_id": "3a6f1801bf11ea0f85e7d4176457e043d2a30130", "content_id": "5f35053c1cb3fd11e653a6f94f48fff514f29dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "no_license", "max_line_length": 42, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/11/1124/비밀 이메일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nmessage = input().rstrip()\nlength = len(message)\ndivs = []\nfor i in range(1, int((length+1)**0.5)+1):\n if not length%i:\n divs.append((i, length//i))\nR, C = divs[-1]\narr = [[0]*C for _ in range(R)]\nk = 0\nfor j in range(C):\n for i in range(R):\n arr[i][j] = message[k]\n k += 1\nfor i in range(R):\n for j in range(C):\n print(arr[i][j], end='')" }, { "alpha_fraction": 0.40611353516578674, "alphanum_fraction": 0.4192139804363251, "avg_line_length": 27.75, "blob_id": "46b0ea7e5dea5af9c462951e28037163c08e78b2", "content_id": "b64acdfb4d99a1a4ab99823f4a72e7027b2dbb45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/03/0319/Viva la Diferencia.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n A, B, C, D = map(int, input().split())\n if A == B == C == D == 0: break\n result = 0\n while not (A == B == C == D):\n A, B, C, D = map(abs, (A-B, B-C, C-D, D-A))\n result += 1\n print(result)" }, { "alpha_fraction": 0.38591548800468445, "alphanum_fraction": 0.42323943972587585, "avg_line_length": 40.79411697387695, "blob_id": "52abcdd05eb4a43523da52633f7ed4d691a9f081", "content_id": "c9174e807f57df57b0599eb474c53b3c9c9a902a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1832, "license_type": "no_license", "max_line_length": 89, "num_lines": 34, "path": "/알고리즘/온라인저지/2022/09/0908/십자뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import combinations as comb\n\ncoordinate = [None, \n (0, 0), (0, 1), (0, 2),\n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n] # 3x3 사각형의 좌표들\ndy, dx = [0, -1, 1, 0, 0], [0, 0, 0, -1, 1] # 자기 자신 + 상하좌우\n\nP = int(input())\nfor p in range(P):\n answer = [list(input()) for _ in range(3)] # 정답 배열\n result = 0 # 최소 클릭 횟수\n for i in range(1, 10): # 누르는 횟수 1부터 9까지 브루트포스\n tmp = list(comb(range(1, 10), i)) # i번 누를 때 나올 수 있는 조합\n for t in tmp: # 클릭할 수 있는 조합들 중에서\n if result: break # 정답을 찾았다면 종료\n arr = [list('.'*3) for _ in range(3)] # 답안지\n for j in t: # 클릭할 수 있는 조합들에서\n coor = coordinate[j] # 클릭할 좌표\n y, x = coor[0], coor[1] # 좌표의 y, x\n for k in range(5): # 자기 자신과 상하좌우, 총 5지점의 색을 뒤집는다\n ny, nx = y+dy[k], x+dx[k] # 색을 뒤집을 좌표들\n if 0<=ny<3 and 0<=nx<3: # 새로 얻은 좌표들은 사각형 안에 있어야 한다 (3x3 사각형이니 망정이지;;)\n if arr[ny][nx] == '*': # * -> .\n arr[ny][nx] = '.'\n elif arr[ny][nx] == '.': # . -> *\n arr[ny][nx] = '*'\n flag = True # 구한 답안과 정답을 비교할 것\n for l in range(3):\n if arr[l] != answer[l]: # 답안과 정답이 다른 줄이 나오면 그 답안은 정답이 아님\n flag = False\n if flag: result = i # 답안의 모든 줄이 정답과 일치하면 해당 클릭 수를 결과로 저장\n print(result)" }, { "alpha_fraction": 0.4691357910633087, "alphanum_fraction": 0.5020576119422913, "avg_line_length": 33.57143020629883, "blob_id": "e44458d7fa8a4c1738fd8abaa98b90feb23228ce", "content_id": "7fd03f61d54ca516e7a787c0e9b200a813552eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/알고리즘/[템플릿]/에라토스테네스의 체/에라토스테네스의 체.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def prime_list(size):\n sieve = [True]*(size+1)\n for i in range(2, int(size**0.5)+1):\n if sieve[i]:\n for j in range(i+i, size+1, i):\n sieve[j] = False\n return [i for i in range(2, size+1) if sieve[i]]\n\n" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6645962595939636, "avg_line_length": 21, "blob_id": "0c031accf3d3c6e39bc91b862adc603140989c13", "content_id": "9f604bea7c74a63f660f966f7279e1d92f993c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "no_license", "max_line_length": 54, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/09/0911/연속합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nnums = list(map(int, input().split()))\nresult = [nums[0]]\nfor i in range(len(nums)-1):\n result.append(max(result[i]+nums[i+1], nums[i+1]))\nprint(max(result))\n\n\"\"\"\n한 시간을 머리를 박았는데\n접근도 못하고\n답지를 봤더니 이렇게 허무할 줄은 몰랐다\n실패요인은\nDP라는 태그를 보고 그 계산값들을 저장하면서\n들고 간다는 것 까지는 이해하였지만\n어떻게 들고 가는지, 그 과정을 이해하지 못하였다\nresult[i] : min(여기까지 끌고온 연속합, i번째 값)\nresult[i]+nums[i+1] : 구해온 연속합에 다음 수를 더한 값\nnums[i+1] : 현재 값\nresult에는, 최대값이 될 수 있는\n구한 연속값들이 모두 들어있고\n그 중에 최대값을 max(result)를 통해 출력하는 문제였다\n\"\"\"" }, { "alpha_fraction": 0.43529412150382996, "alphanum_fraction": 0.4588235318660736, "avg_line_length": 24.600000381469727, "blob_id": "6836023895af0b8ed822741f6e47bfcd2fbba9a5", "content_id": "06d6f4eab9a644c2e2c7960d4188da1e1f821089", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/11/1130/팬그램.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n S = input()\n if S == '*': break\n alphabet = [0]*26\n for s in S:\n if s == ' ': continue\n tmp = ord(s)-ord('a')\n if not alphabet[tmp]:\n alphabet[tmp] = 1\n print('Y' if sum(alphabet) == 26 else 'N')" }, { "alpha_fraction": 0.45945945382118225, "alphanum_fraction": 0.5135135054588318, "avg_line_length": 37, "blob_id": "bdeb59730db9e56be932d7b2172b19cf633ab808", "content_id": "bc6f838a66be475816fcf39be4354abbbfc2027f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 37, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(int(input())**2, '\\n2', sep='')" }, { "alpha_fraction": 0.47445255517959595, "alphanum_fraction": 0.5328466892242432, "avg_line_length": 16.25, "blob_id": "6e7252763012fbec27dabf8250f9295df022dbea", "content_id": "0e14d78425ec89f745a3548b14287661365b5102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0205/과제 안 내신 분.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "table = [False] * 30\n\nfor _ in range(28):\n table[int(input()) - 1] = True\n\nfor i in range(30):\n if not table[i]:\n print(i+1)" }, { "alpha_fraction": 0.5622775554656982, "alphanum_fraction": 0.5836299061775208, "avg_line_length": 30.33333396911621, "blob_id": "745c75bad8df0de66f3cbda545a86f8bc47ca668", "content_id": "6e8a3ab642459928f90c7f561d8e4587166a75d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/07/0731/언제까지 더해야 할까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = int(input())\nlast_num = 1 # 출력값, 초기값은 1\nsum_all = 0 # 1부터 전부 더한 값\nwhile sum_all < a: # 전부 더한 값이 입력값보다 작을 때\n sum_all += last_num # 1부터 하나씩 더한다\n if sum_all >= a: # 전부 더한 값이 입력값보다 크거나 같으면\n break # while문 종료\n last_num += 1 # 그렇지 않으면 출력값을 계속 더하고\nprint(last_num) # 출력" }, { "alpha_fraction": 0.5659340620040894, "alphanum_fraction": 0.5879120826721191, "avg_line_length": 17.299999237060547, "blob_id": "2efc53685c3ed08b1f3678f1d782519287602ee0", "content_id": "9a8f13950ad3c6e88096fcb24d97ce383d45657b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 33, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/10/1025/일반 화학 실험.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nnow = float(input().rstrip())\nwhile True:\n D = float(input().rstrip())\n if D >= 999: break\n print('{:.2f}'.format(D-now))\n now = D" }, { "alpha_fraction": 0.5165125727653503, "alphanum_fraction": 0.5640686750411987, "avg_line_length": 26.071428298950195, "blob_id": "9cada81095d39a18099e37bc206f644d9c3ef7bb", "content_id": "ed8d61ebda3f947fa9a950051f2d7f595ea6b84c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1263, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/알고리즘/온라인저지/2021/08/0808/A→B.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nA, B = 시작값, 결과값\n2를 곱하거나, 맨 뒤에 1을 넣거나\nA -> B로 가는 연산의 최솟값\nB의 1의자리가 1이면 직전 연산에서 1추가를 실행한것\n 1을 빼주고\nB의 1의자리가 1이 아니고 2의배수면 직전 연산에서 2 곱하기를 실행한것\n 2를 나눠주고\nB를 시작으로 A까지 가는 코드를 구성\n만들 수 없는 경우를 찾는법?\n\n\"\"\"\n\nimport sys # sys.stdin.readline()\nA, B = map(int, sys.stdin.readline().split()) # A, B 입력받고\ncount = 0 # 결과값 초기화\nwhile B != A: # A와 B가 같아질때까지 while문 반복\n if B%10 == 1: # B의 1의자리수가 1이면 직전 연산에서 맨 뒤에 1을 추가한 것\n B //= 10 # 10으로 나눈 몫을 반환하여 1을 제거\n count += 1 # 연산 카운트 +1\n else: # B의 1의자리수가 1이 아니면 직전연산에서 *2를 한 것\n B /= 2 # 2로 나눠주고\n count += 1 # 연산 카운트 +1\n if B < A: # 연산을 계속했는데 B가 A로 가지 못하고 A보다 작아지면\n # B에서 A를 만들 수 없으니\n count = -2 # 결과값에 1 더해질 경우를 포함해 카운트를 -2로 저장\n break # while문 종료\nprint(count + 1) # 출력" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 46, "blob_id": "3f8f4d576c681084211daf1c804bbab36bf1e043", "content_id": "ff1166a7601f1cf1961ee8c7985298a8072a6e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/10/1012/SciComLove.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in [0]*int(input()): print('SciComLove')" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5757575631141663, "avg_line_length": 21.33333396911621, "blob_id": "7b4e660a242fe7500a5022d395f2072085ebc240", "content_id": "26972a19119955f21f60d8d6574d6da40bbd0a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = int(input())**3\ndim = 3\nprint(result, f'\\n{dim}', sep='')" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.5688405632972717, "avg_line_length": 22.08333396911621, "blob_id": "092361e9ca22699e381dc79b5d1e6e0f9c75edb5", "content_id": "19b84f2bbd1db18f132bd722f4b1b07111cd86b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 65, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0927/단어 나누기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nword = input().rstrip()\nlength = len(word)\nchange = []\nfor i in range(1, length-1):\n for j in range(i+1, length):\n A, B, C = word[:i][::-1], word[i:j][::-1], word[j:][::-1]\n change.append(A+B+C)\nprint(sorted(change)[0])" }, { "alpha_fraction": 0.49787235260009766, "alphanum_fraction": 0.5063830018043518, "avg_line_length": 22.600000381469727, "blob_id": "a36d18925e9cecc6e41416d90b92752384c6d6aa", "content_id": "526b93b1d765c134fd51d4fd1ad71ac122c62394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/03/0305/스트릿 코딩 파이터.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nN = int(input())\nresult = []\nfor n in range(N):\n tmp = 0\n for i in range(3):\n a, b, c = map(int, input().split())\n tmp += a*A + b*B + c*C\n result.append(tmp)\nprint(max(result))" }, { "alpha_fraction": 0.6053169965744019, "alphanum_fraction": 0.6237218976020813, "avg_line_length": 29.625, "blob_id": "032a1c34126b5743e55adfa7604edb8ad2ad1155", "content_id": "7663cd4bd0993df2e22bfe55b2d19dfb8125bca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 60, "num_lines": 16, "path": "/알고리즘/[템플릿]/스택/에디터.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nleft = list(input().rstrip())\nright = []\n# 커서의 이동을 좌우 스택간에 이동으로 구현\nfor _ in range(int(input().rstrip())):\n order = input().rstrip().split()\n if order[0] == 'L' and left: right.append(left.pop())\n elif order[0] == 'D' and right: left.append(right.pop())\n elif order[0] == 'B' and left: left.pop()\n elif order[0] == 'P': left.append(order[1])\nfor a in left+list(reversed(right)): print(a, end='')\n\n# https://www.acmicpc.net/problem/1406" }, { "alpha_fraction": 0.2628205120563507, "alphanum_fraction": 0.5064102411270142, "avg_line_length": 7.722222328186035, "blob_id": "c66221e74c9419d93995a27c3eb93187e07d4bc5", "content_id": "2f1a3f40041c583117339146cf66f6b74172c645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/04/0430/거북이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1 2 3 4\n4 1 3 2\n4 3\n1 2\n\n4 4 3 4\n3 4 4 4\n4 4\n3 4\n\n4 7 3 9\n3 4 7 9\n9 3 7 4\n\n\"\"\"\nA, B = map(int, sorted(list(map(int, input().split())))[:3:2])\nprint(A*B)" }, { "alpha_fraction": 0.4771573543548584, "alphanum_fraction": 0.4771573543548584, "avg_line_length": 18.600000381469727, "blob_id": "5dc97a961aa501a4c2fd9cd64dd1401654b29ee8", "content_id": "c945c5520f1cfd88e75f5e850a7ccf1556224d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0207/아.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "jh = input()\nhospital = input()\n\nfor i in range(max(len(jh), len(hospital))):\n if hospital[i] == 'h':\n print('go')\n break\n elif jh[i] == 'h':\n print('no')\n break\n\n" }, { "alpha_fraction": 0.48695650696754456, "alphanum_fraction": 0.48695650696754456, "avg_line_length": 15.571428298950195, "blob_id": "7658a99d40944ffbe34d9ec696607b7f17ce61f3", "content_id": "c1dede77301ccf831ff37e2daa79b4bdd4128c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 36, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/06/0624/더하기 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "num = ''\nwhile True:\n try:\n num += input()\n except:\n break\nprint(sum(map(int, num.split(','))))" }, { "alpha_fraction": 0.41743120551109314, "alphanum_fraction": 0.5458715558052063, "avg_line_length": 30.285715103149414, "blob_id": "6d76577f86b6a99b82b744e98fa6538275668345", "content_id": "1da8bb83990a2d96a684dccc896225612a0b5e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/05/0501/전투 드로이드 가격.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "menu = [350.34, 230.90, 190.55, 125.30, 180.90]\nfor t in range(int(input())):\n tmp = list(map(int, input().split()))\n result = 0\n for i in range(5):\n result += menu[i]*tmp[i]\n print(f'${result:.2f}')" }, { "alpha_fraction": 0.4779411852359772, "alphanum_fraction": 0.5073529481887817, "avg_line_length": 18.571428298950195, "blob_id": "366b0e24c897ee6333a087120c087d9f128a9706", "content_id": "f9c8c725f26e6a75f0d111b67dba6205c20e32b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/12/1201/파인만.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = int(input())\n if not N: break\n result = 0\n for i in range(1, N+1):\n result += i**2\n print(result)" }, { "alpha_fraction": 0.4680851101875305, "alphanum_fraction": 0.4893617033958435, "avg_line_length": 26, "blob_id": "88aae1c374b2a8299c3aedaa7a9dd15c35bdfd79", "content_id": "cd3d9011527f102b81fe88ed16434b7aa5ddb8a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/09/0902/부호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(3):\n N = int(input())\n tmp = [int(input()) for __ in range(N)]\n result = '0'\n if sum(tmp) > 0: result = '+'\n elif sum(tmp) < 0: result = '-'\n print(result)" }, { "alpha_fraction": 0.5044247508049011, "alphanum_fraction": 0.5132743120193481, "avg_line_length": 13.125, "blob_id": "8ec2dcf6eaf614389371b39640dbc70948822c1a", "content_id": "79474395687b273dcf75c1b32592b13871a5c735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0207/캥거루 세마리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\n\nfurther = A\n\nif (B-A) < (C-B):\n further = C\n\nprint(abs(further - B) - 1)\n" }, { "alpha_fraction": 0.5304877758026123, "alphanum_fraction": 0.5609756112098694, "avg_line_length": 22.571428298950195, "blob_id": "c4bd9fa3164844d7772f84a2dcc3c5494cfc8726", "content_id": "369f44dc5b3ede4670c2661be681fdb01edb78ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/02/0203/치킨댄스를 추는 곰곰이를 본 임스 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor _ in range(int(input())):\n gifticon = input().split('D-')\n left_day = int(gifticon[1])\n if left_day <= 90:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.46489858627319336, "alphanum_fraction": 0.4836193323135376, "avg_line_length": 20.399999618530273, "blob_id": "0ef99bed4963ca1319ec162dd9300c8de7196203", "content_id": "b0659c51ed07469826a2d5148dce5d7280db2c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 36, "num_lines": 30, "path": "/알고리즘/온라인저지/2022/12/1204/효율적인 해킹.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndef bfs(i):\n Q = deque([i])\n cnt = 1 # i번 컴퓨터를 해킹하면서 bfs 시작\n visited = [0]*(N+1)\n visited[i] = 1\n while Q:\n i = Q.popleft()\n for nxt in arr[i]:\n if not visited[nxt]:\n visited[nxt] = 1\n Q.append(nxt)\n cnt += 1\n return cnt\n\nN, M = map(int, input().split())\narr = [[] for _ in range(N+1)]\nfor _ in range(M):\n a, b = map(int, input().split())\n arr[b].append(a)\n\nresult = [0]*(N+1)\nfor i in range(1, N+1):\n result[i] = bfs(i)\n\nmaxx = max(result)\nfor i in range(N+1):\n if result[i] == maxx:\n print(i, end=' ')" }, { "alpha_fraction": 0.40740740299224854, "alphanum_fraction": 0.4273504316806793, "avg_line_length": 18.55555534362793, "blob_id": "51d9052bffacada0481557ceb3c8a2f90d044d80", "content_id": "e1bb38ba2468cd75cdaa8b7308bb5bb1ff62de76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/알고리즘/온라인저지/2023/03/0326/선물할인.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, B, A = map(int, input().split())\nP = sorted(list(map(int, input().split())))\nresult = 0\nsumm = 0\n\nfor i in range(N):\n if summ+P[i]<=B:\n summ += P[i]\n result += 1\n else:\n if summ+(P[i]//2)<=B and A:\n summ += P[i]//2\n result += 1\n A -= 1\n else:\n break\n\nprint(summ, result)" }, { "alpha_fraction": 0.44961240887641907, "alphanum_fraction": 0.4961240291595459, "avg_line_length": 15.25, "blob_id": "7d02f29c339aa84625434da74d4a5a46e08955e4", "content_id": "602f705c14dd98823c307d8c14242d70a038077a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/07/0702/문어.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = [1] * N\nfor i in range(N):\n if i%2:\n result[i] += 1\nif N%2:\n result[-1] = 3\nprint(*result)" }, { "alpha_fraction": 0.38655462861061096, "alphanum_fraction": 0.3949579894542694, "avg_line_length": 12.333333015441895, "blob_id": "e8189246c35cb2453ab369ba784bce1a8a14d4f6", "content_id": "5472326aee6ffe8dba35289e2ecb30b6d257de7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/17차시 2. 자료구조 – 리스트, 튜플 - 연습문제 19.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x = input().split(', ')\n\nx.sort()\n\nfor i in x:\n if i == x[-1]:\n print(i)\n else:\n print(i, end=', ')" }, { "alpha_fraction": 0.4084506928920746, "alphanum_fraction": 0.46948355436325073, "avg_line_length": 29.428571701049805, "blob_id": "8fb0d65015489351f59c3fa13562c67141cd6053", "content_id": "a2904dc8e4465a471571900533211eb92e066499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/01/0115/회의실 배정 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = [0] + [list(map(int, input().split())) for _ in range(N)]\ndp = [0, arr[1][2]] + [0]*(N-1)\nif N > 1:\n for i in range(2, N+1):\n dp[i] = max(dp[i-1], dp[i-2]+arr[i][2])\nprint(dp[-1])\n" }, { "alpha_fraction": 0.5537189841270447, "alphanum_fraction": 0.5743801593780518, "avg_line_length": 19.25, "blob_id": "7402bb6b7ce4765d45a3ebf1e00e51558e2c8ea1", "content_id": "7ecc86937e4ab8ad6502a366fc9cabd98fe3ca7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0930/문자열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nA, B = input().rstrip().split()\nresult = int(1e9)\nfor i in range(len(B)-len(A)+1):\n tmp = 0\n for j in range(len(A)):\n if A[j] != B[i+j]: tmp += 1\n result = min(result, tmp)\nprint(result)" }, { "alpha_fraction": 0.41860464215278625, "alphanum_fraction": 0.43410852551460266, "avg_line_length": 13.44444465637207, "blob_id": "18570ce7e55d191b03d44c85373b28eae8d8b29a", "content_id": "32d7a72d6582b61cadc90c420bdd210a3af3c09c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 24, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0213/홀수일까 짝수일까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nfor n in range(N):\n K = int(input()[-1])\n\n if K % 2:\n print('odd')\n else:\n print('even')" }, { "alpha_fraction": 0.388127863407135, "alphanum_fraction": 0.4337899684906006, "avg_line_length": 43, "blob_id": "15c76e18da7ef3d0e60d69e6b4682a0e7e8c23b5", "content_id": "7822fba43ad12751db9f8901b2c2d8210c0a6292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 101, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/03/0324/Report Card Time.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "grades = ['F']*60 + ['D']*7 + ['D+']*3 + ['C']*7 + ['C+']*3 + ['B']*7 + ['B+']*3 + ['A']*7 + ['A+']*4\nfor _ in range(int(input())):\n name, score = input().split()\n score = int(score)\n print(name, grades[score])" }, { "alpha_fraction": 0.5822784900665283, "alphanum_fraction": 0.6202531456947327, "avg_line_length": 25.66666603088379, "blob_id": "20e363df146cc65785f831c994f973ddd722ff5f", "content_id": "782451d0c3693f09f8413c7344bec98deee2da18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 45, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/09/0913/이진수 덧셈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in input().split(): result += int(i, 2)\nprint(bin(result)[2:])" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 31.125, "blob_id": "cea27185dc5e65a0b92ae2d4c144d7bea7dc4880", "content_id": "c2e1fd4b63d6c5ddbcd33fc3188fb9ff8d222ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/10/1012/행렬 곱셈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def multiply(matrix1, matrix2): # 행렬 곱셈\n output = []\n for i in range(N):\n output.append([])\n for j in range(K):\n tmp = 0\n for k in range(M):\n tmp += matrix1[i][k] * matrix2[k][j]\n output[i].append(tmp)\n return output\n\nN, M = map(int, input().split())\nmat1 = [list(map(int, input().split())) for _ in range(N)]\nM, K = map(int, input().split())\nmat2 = [list(map(int, input().split())) for _ in range(M)]\nfor i in multiply(mat1, mat2): print(*i)" }, { "alpha_fraction": 0.49560633301734924, "alphanum_fraction": 0.5184534192085266, "avg_line_length": 16.272727966308594, "blob_id": "705dc9059b8facdf9f2ca190ae9ed19f9fe0af08", "content_id": "f7c8d52ad5f3357072414c6ccd31377de8913750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 42, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/01/0122/게으른 백곰.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\n\npen = [0] * 1000001 # 우리(pen)의 범위\n\nnow = K # K에서 시작해야, 앞으로 K만큼, 뒤로 K만큼\n\nx_max = 0 # 양동이좌표 최대까지만 탐색\n\nfor n in range(N):\n g, x = map(int, input().split())\n\n pen[x] = g # 우리 채우기\n\n if x > x_max:\n x_max = x # 최대 양동이좌표 저장\n\nresult = window = sum(pen[:now+K+1]) # 초기값\n\nnow += 1 # 한 칸 앞으로\n\n# 백만 다 안돌고, 최대양동이까지만 탐색\nwhile now+K <= x_max: \n # 윈도우 한 칸 앞으로 슬라이딩~\n window -= pen[now-K-1]\n window += pen[now+K]\n \n # 최대값 갱신 가능한가?\n if window > result:\n result = window\n \n now += 1 # 한 칸 앞으로~\n\nprint(result)" }, { "alpha_fraction": 0.5423728823661804, "alphanum_fraction": 0.5706214904785156, "avg_line_length": 21.25, "blob_id": "020a634e0b21f6fe0cf69b69c0e67889a0e8e9b0", "content_id": "56786f068f246702eb3be17790ff967a6c41afe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 57, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/08/0827/장신구 명장 임스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "P, N = map(int, input().split())\nA = sorted(list(map(int, input().split()))) # accessories\nresult = 0\nfor a in A:\n if P >= 200: break\n P += a\n result += 1\nprint(result)" }, { "alpha_fraction": 0.375, "alphanum_fraction": 0.42500001192092896, "avg_line_length": 11.100000381469727, "blob_id": "6363fc300a50ebea8f976c000b06391c3de8ee32", "content_id": "e957ef52f3ee14b8d75c07c3cd75aae13d744c3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 20, "num_lines": 10, "path": "/알고리즘/온라인저지/2021/12/1213/평균 점수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = []\n\nfor i in range(5):\n b = int(input())\n if b < 40:\n b = 40\n \n a.append(b)\n\nprint(int(sum(a)/5))" }, { "alpha_fraction": 0.4337811768054962, "alphanum_fraction": 0.44913628697395325, "avg_line_length": 18.33333396911621, "blob_id": "e899caf1c32df269b93f99c6533f014e2c85617f", "content_id": "ed8f87c6378c38c02c65ab7eddc4871475b280e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 35, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/09/0905/스택 수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nn = int(sys.stdin.readline())\nori = list(range(2, n+1))\ntmp = [1]\nope = ['+']\n\nfor _ in range(n):\n if not tmp:\n tmp.append(ori.pop(0))\n ope.append('+')\n num = int(sys.stdin.readline())\n if num == tmp[-1]:\n tmp.pop()\n ope.append('-') \n elif num < tmp[-1]:\n print('NO')\n exit()\n else:\n while num != tmp[-1]:\n tmp.append(ori.pop(0))\n ope.append('+')\n tmp.pop()\n ope.append('-') \n\nfor o in ope:\n print(o)" }, { "alpha_fraction": 0.4351145029067993, "alphanum_fraction": 0.442748099565506, "avg_line_length": 13.55555534362793, "blob_id": "6c8c3bee863f15d8aa7cf4c603b586df546a6a76", "content_id": "2a876075d87ff865b35961e8b2b1628e73fcf8e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0205/단어 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n sen = input().split()\n\n for s in sen:\n print(s[::-1], end=' ')\n \n print()\n" }, { "alpha_fraction": 0.38461539149284363, "alphanum_fraction": 0.4472934603691101, "avg_line_length": 22.399999618530273, "blob_id": "074b00d44aaa41ee9fa6fa69becbc4f19affbe02", "content_id": "24b1a2cc7dea4199b1a971857a3f600c43c8f8a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/06/0628/애너그램.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n result = 'NOT '\n arr = input().split()\n word1 = [0] * 26\n word2 = [0] * 26\n for w in arr[0]:\n word1[ord(w)-97] += 1\n for w in arr[1]:\n word2[ord(w)-97] += 1\n if word1 == word2:\n result = ''\n print('{} & {} are {}anagrams.'.format(\n arr[0], arr[1], result\n ))\n" }, { "alpha_fraction": 0.5296523571014404, "alphanum_fraction": 0.5521472096443176, "avg_line_length": 21.272727966308594, "blob_id": "e693232d62d1b0adc245c7f3439a55e5de1f8b7d", "content_id": "6b94620fc13ed7dfe3dacfa0c0569e76587c675c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/08/0801/예산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def binary(start, end):\n global result, M\n if start > end: return\n budget = (start + end) // 2\n tmp = 0\n for b in B: tmp += min(b, budget)\n if tmp <= M:\n if budget > result: \n result = budget\n binary(budget+1, end)\n else:\n binary(start, budget-1)\n\nN = int(input())\nB = sorted(list(map(int, input().split()))) # budget\nM = int(input())\nS, E = 0, M\nresult = 0\nbinary(S, E)\nprint(min(B[-1], result))\n\n# https://www.acmicpc.net/problem/2512" }, { "alpha_fraction": 0.5460993051528931, "alphanum_fraction": 0.588652491569519, "avg_line_length": 14.777777671813965, "blob_id": "7be936052a2461d09b971f70700ebb30381d2125", "content_id": "88747d0e3bee45dc09230b727dc8a5fbe3b4a0ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/10/1012/체스판 조각.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nodd = False\nif N%2: N += 1; odd = True\nA = N//2 + 1\nprint(A**2 - A if odd else A**2)" }, { "alpha_fraction": 0.5694444179534912, "alphanum_fraction": 0.5972222089767456, "avg_line_length": 11.166666984558105, "blob_id": "7f04d15c1601c41bf900bb7b08e8278fd7c62552", "content_id": "c7d4b3cde04bbfe8da7d8ab56daf64a953f66e95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0205/카드 게임.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\n\nfor _ in range(5):\n result += int(input())\n\nprint(result)" }, { "alpha_fraction": 0.5797545909881592, "alphanum_fraction": 0.6012269854545593, "avg_line_length": 19.4375, "blob_id": "e66de8337469520acf8e46cfc036e9f48f5a38e7", "content_id": "35fc1f6570233cbba1272e696ca1aba9a3e07330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0717/뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n블록(구간)의 개수가 적은 숫자를 뒤집기\n그냥 숫자의 개수가 적은 숫자를 뒤집기\n\"\"\"\narr = list(map(int, list(input())))\n# print(arr)\nswitch = True\nresult = [0, 0, arr.count(0), arr.count(1)]\nswitch = bool(arr[0])\nresult[switch] += 1\nfor a in arr:\n if a != switch:\n switch = not switch\n result[switch] += 1\n# print(result)\nprint(min(result))" }, { "alpha_fraction": 0.5202797055244446, "alphanum_fraction": 0.5398601293563843, "avg_line_length": 20.058822631835938, "blob_id": "d74e66e5cb5770d001c038d4a47cc1abeaf57cbf", "content_id": "90e11bea0639306b4ba7291ff619e95e2d698dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 45, "num_lines": 34, "path": "/알고리즘/온라인저지/2022/09/0917/연결 요소의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\ndef BFS(node):\n global result\n Q.append(node)\n visited[node] = 1\n cnt = 0\n while Q:\n now = Q.popleft()\n for next in arr[now]:\n if not visited[next]:\n Q.append(next)\n visited[next] = 1\n cnt += 1\n result -= cnt\n\nN, M = map(int, input().rstrip().split())\narr = [[] for _ in range(N+1)]\nvisited = [0] * (N+1)\nQ = deque()\nfor m in range(M):\n u, v = map(int, input().rstrip().split())\n arr[u].append(v)\n arr[v].append(u)\nresult = N\nfor i in range(1, N+1): \n BFS(i)\n if sum(visited) == N: break\nprint(result)\n\n# https://www.acmicpc.net/problem/11724" }, { "alpha_fraction": 0.6091370582580566, "alphanum_fraction": 0.624365508556366, "avg_line_length": 28.22222137451172, "blob_id": "a19d6079ab23c3e6fc7b464eb993672af5cb3c55", "content_id": "3dc3828d3ba8f786213e3d2b4717d683c66a2b59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 58, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/08/0807/손익분기점.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nA, B, C = 고정비용, 생산비용, 노트북 가격\n손익분기점 : 판매수입>총 생산비용(고정비용+생산비용)이 되는 시점\n생산대수를 1대부터 점점 증가시켜서(while)\n손익분기점을 만족하는 N값을 찾아서 출력\n할려고 했는데 예시 2만원에 생산해서 1만원에 파는 사람이 어딨냐고ㅋㅋㅋ\n문제 끝에 \"손익분기점이 발생하지 않으면 -1을 출력한다\"고 있네요\n문제 잘 읽겠습니다...\nwhile문으로 21억까지 +1 하면서 가려니까 시간이 너무 오래 걸려서\n직접 손익분기점을 구해서 올림을 통해 값을 주기로 했는데\n아무래도 math모듈을 불러올 수 없는 것 같다\n올림을 못쓰니, 생산대수를 근사한 값으로 구해서 하나씩 더하기로 했다\nZeroDivisionError가 나왔다, 테스트케이스중에 생산비용과 판매비용이 같은 케이스가 있다는건데\n이 상황을 추가하였다\n\"\"\"\n\nA, B, C = map(int, input().split()) # 고정비용, 가변비용, 판매가 입력\nif B >= C: # 손익분기점이 발생하지 않을 경우\n N = -1 # -1을\nelse: # 손익분기점이 발생할 경우\n N = round(A / (C-B)) - 1 # N일수도 있고 아닐수도 있는 근사값을 설정한다\n while True: # 반복문을 돌리면서\n if N > A / (C-B): # 손익분기점이라면\n break # while문을 마치고\n else: # 손익분기점이 아니라면\n N += 1 # +1 해서 다시 돌려보고\nprint(N) # 출력한다" }, { "alpha_fraction": 0.5302491188049316, "alphanum_fraction": 0.5480427145957947, "avg_line_length": 22.41666603088379, "blob_id": "d4b86e3647e17ab8a260fed4960e28cbe82c0817", "content_id": "42365851c22a6223934a063930f0543652bec03a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/알고리즘/온라인저지/2021/08/0809/수들의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nS = int(sys.stdin.readline()) # S값 입력받고\nN = 0 # 더해줄 N값 초기화\ncount = 0 # 최대 개수 초기화\nwhile True: # while문 반복하면서\n N += 1 # 1부터 S값에서 빼기 시작\n if N > S: # 종료조건\n break\n S -= N # S에서 N을 빼주면서 S를 N보다 작게 만들때까지\n # 수의 개수를 \n count += 1 # 세어주고\nprint(count) # 출력\n" }, { "alpha_fraction": 0.38879457116127014, "alphanum_fraction": 0.5670627951622009, "avg_line_length": 13.75, "blob_id": "c60111bafab358f2f649917d396da8975699de37", "content_id": "bc3b74f247a613e52d434b182870b8577f540432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 46, "num_lines": 40, "path": "/알고리즘/온라인저지/2022/03/0323/저울.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nex) 무게추 = [1 2 4 7 8 30]\n1 1 \n2 2\n3 2 1\n4 4\n5 4 1\n6 4 2\n7 4 2 1\n8 7 1\n9 7 2\n10 7 2 1\n11 7 4\n12 7 4 1\n13 7 4 2\n14 7 4 2 1\n15 8 7\n16 8 7 1\n17 8 7 2\n18 8 7 2 1\n19 8 7 4\n20 8 7 4 1\n21 8 7 4 2\n22 8 7 4 2 1\n23 X\n추들을 순회할 때\n진행중인 모든 추를 다 더한 것 + 1\n보다 큰 추가 있거나, 다 더하고 for문이 끝나면\n그 더한 값이 답이 된다\n\"\"\"\nN = int(input())\nweights = list(map(int, input().split())) # 추들\nweights.sort() # 작은 추부터 차례대로 더할 것\ntarget = 1 # 시작점, 혹은 목표 무게\nfor w in weights: # 추들을 순회하면서\n # print(target, w) # 디버깅\n if target < w: # 더 큰 추가 나왔으면\n break # 지금 보는 무게가 답이 된다\n target += w # 다음 추 더하기\nprint(target) # 목표 무게 출력" }, { "alpha_fraction": 0.42934781312942505, "alphanum_fraction": 0.489130437374115, "avg_line_length": 25.428571701049805, "blob_id": "2e05a75fe920cd1bd593fe893505df4c6787fbbe", "content_id": "ebe99b2037c4da7e92bfd309a96fa86a4ebd19e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/02/0207/주사위.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\ndp = [0]*100\nfor a in range(1, A+1):\n for b in range(1, B+1):\n for c in range(1, C+1):\n dp[a+b+c] += 1\nprint(dp.index(max(dp)))" }, { "alpha_fraction": 0.5071770548820496, "alphanum_fraction": 0.540669858455658, "avg_line_length": 26.933332443237305, "blob_id": "73d179e435cf6bdc2766ef7e1a1bcdcf49afe479", "content_id": "edb801b7562c9f481e7a70e2e1707e0cddc4967c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 52, "num_lines": 15, "path": "/알고리즘/온라인저지/2023/01/0127/2021은 무엇이 특별할까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def prime_list(size):\n sieve = [True]*(size+1)\n for i in range(2, int(size**0.5)+1):\n if sieve[i]:\n for j in range(i+i, size+1, i):\n sieve[j] = False\n return [i for i in range(2, size+1) if sieve[i]]\n\nN = int(input())\nprimes = prime_list(5500)\nfor i in range(len(primes)-1):\n is_special = primes[i]*primes[i+1]\n if is_special > N:\n print(is_special)\n break" }, { "alpha_fraction": 0.4900990128517151, "alphanum_fraction": 0.5118811726570129, "avg_line_length": 23.658536911010742, "blob_id": "4ad5c46b849f6e36cb7a616fba3b64aa9d3b52d9", "content_id": "8c3fcf68a0b5d8dedb9d86e236ba8b6545b80940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 61, "num_lines": 41, "path": "/알고리즘/온라인저지/2022/08/0828/녹색 옷 입은 애가 젤다지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nimport heapq # 탐색 시 가장 작은 값을 탐색하기 위함\n\ninput = sys.stdin.readline\nINF = int(1e9) # 가상의 최대값\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef dijkstra(y, x):\n global result\n heapq.heappush(Q, (y, x, arr[y][x]))\n visited[y][x] = arr[y][x]\n while Q:\n y, x, dist = heapq.heappop(Q)\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<N:\n cost = dist + arr[ny][nx]\n if cost < visited[ny][nx]:\n visited[ny][nx] = cost\n heapq.heappush(Q, (ny, nx, cost))\n\ncase = 0\nwhile True:\n case += 1\n N = int(input())\n if N == 0: break\n arr = [list(map(int, input().split())) for _ in range(N)]\n visited = [[INF]*N for _ in range(N)]\n result = INF\n Q = []\n dijkstra(0, 0)\n print(f'Problem {case}: {visited[N-1][N-1]}')\n\n\"\"\"\n최소 경로값을 저장하는 배열 생성\n모든 지점에 대해 최소 경로값을 구하며 진행\n도착점 arr[N-1][N-1]의 최소 경로값을 출력\nheapq 자체의 빠른 속도만을 이용하였고\n별도의 백트래킹문은 넣지 않음\nDP처럼 모든 지점의 최소 경로값을 구하였음\n\"\"\"" }, { "alpha_fraction": 0.37060701847076416, "alphanum_fraction": 0.3993610143661499, "avg_line_length": 19.866666793823242, "blob_id": "0e67eea0bdc031f21e8a4f4afcabe7266281ef3b", "content_id": "d6f65d5ea36ffbaf01c7be72ffdfa6f63fdf04a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 43, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/05/0523/별 찍기 - 17.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nstart = N-1\nlast = N*2-1\nmiddle = 1\nfor n in range(N):\n if n == 0:\n print(' '*start, '*', sep='')\n start -= 1\n elif n == N-1:\n print('*'*last)\n else:\n print(' '*start, end='')\n print('*', ' '*middle, '*', sep='')\n middle += 2\n start -= 1\n" }, { "alpha_fraction": 0.553398072719574, "alphanum_fraction": 0.6019417643547058, "avg_line_length": 25, "blob_id": "82057bf2426ad090e54bfe566b443942d24d3865", "content_id": "44c94bdfc19d8e24d6f0813c2838c91b36121bf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/12/1207/Dog Treats.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in range(1, 4):\n result += int(input())*i\nprint('happy' if result >= 10 else 'sad')" }, { "alpha_fraction": 0.4206642210483551, "alphanum_fraction": 0.4206642210483551, "avg_line_length": 21.66666603088379, "blob_id": "b3ab30184962cd9a4b5fe008a15c925c0208187a", "content_id": "a5969ed2dae7213d8fc37a3fd74dd4467ec04da0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 31, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/05/0520/좋은놈 나쁜놈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n S = input()\n G = S.count('G')\n G += S.count('g')\n B = S.count('B')\n B += S.count('b')\n result = 'NEUTRAL'\n if G > B:\n result = 'GOOD' \n if G < B:\n result = 'A BADDY'\n print(f'{S} is {result}')" }, { "alpha_fraction": 0.46357616782188416, "alphanum_fraction": 0.5430463552474976, "avg_line_length": 15.88888931274414, "blob_id": "6bc66001c1f38c2876f76e0eb0910f11c8b75875", "content_id": "f80c9a225ea758ba8a491161bd7637abe8b5f464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/09/0924/피보나치 비스무리한 수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\ndp = [0, 1, 1, 1] + [0]*114\nfor i in range(4, N+1):\n dp[i] = dp[i-1] + dp[i-3]\nprint(dp[N])" }, { "alpha_fraction": 0.6756756901741028, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 17.66666603088379, "blob_id": "e8068822513fa36a39f272bf33a7889d2ccbd218", "content_id": "3f30abd89d5cb6537429a0271139c5b6d6f350e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 63, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0927/중복 빼고 정렬하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ninput()\nprint(*(sorted(list(set(map(int, input().rstrip().split()))))))" }, { "alpha_fraction": 0.5025996565818787, "alphanum_fraction": 0.5242634415626526, "avg_line_length": 24.66666603088379, "blob_id": "8090ca597ba66828e7c113c9c876ffcb9056804c", "content_id": "bc28444de5390de6bbf3d1e93422f6e6cfc24ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 58, "num_lines": 45, "path": "/알고리즘/온라인저지/2021/10/1016/미로 탐색.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque # popleft() 사용할 것\n\ndx = (-1, 1, 0, 0) # 델타이동 속도를 조금이라도 높이기 위해\ndy = (0, 0, -1, 1) # tuple() 사용\n\nN, M = map(int, input().split())\n\narr = []\n\nfor n in range(N):\n arr.append(list(map(int,input()))) # 공백 없으니, split()없이\n\nq = deque()\n\nq.append((0, 0, 1)) # 출발지점, 출발점부터 거리 카운트함\n\nresult = 0 # 결과값 초기화\n\nwhile q: # == BFS\n tmp = q.popleft() # == pop(0)\n x = tmp[0] # 탐색할 x좌표\n y = tmp[1] # 탐색할 y좌표\n cnt = tmp[2] # 여기까지 오는데 걸린 거리\n if arr[y][x] == 0: # 이전 탐색에서 q에 담았었지만 꺼내보니 방문한 적이 있다면\n # 방문할 곳에 해당하여 q에 담았지만, 막상 차례가 되서 꺼내보니 탐색할 필요가 없다면\n # 아래 과정들을 또 진행하여 중복방문이 일어나지 않도록\n continue\n arr[y][x] = 0 # 방문처리\n # 처음에는 arr과 동일한 크기의 visited 배열을 만들어서 방문처리를 하였으나\n # 메모리가 넉넉하지 않은 문제임을 확인하였고\n # 원본 배열 자체에서 방문처리 하였음\n\n if x == M-1 and y == N-1: # 도착좌표\n result = cnt # 거리를 결과값에 저장\n break # 종료\n\n for i in range(4): # 탐색중인 좌표가 목적지가 아니라면 \n # 범위를 벗어나지 않고 방문한 적이 없는 주변 4칸 담을 것\n nx = x+dx[i] # 좌우\n ny = y+dy[i] # 상하\n if 0 <= nx < M and 0 <= ny < N and arr[ny][nx]:\n # 배열 안에서, 방문한 적이 없다면\n q.append((nx, ny, cnt+1)) # 방문할 곳에 추가\n\nprint(result)" }, { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.4632352888584137, "avg_line_length": 16.0625, "blob_id": "49a816c129f781ec7b37860f285b5f47cc57ef1b", "content_id": "fe5cb498bad4dbd73623cb0c32c22295e7a06efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 31, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/09/0906/수들의 합 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nN = int(input())\ncnt = 0\nfor n in range(1, N+1):\n tmp = N\n for i in range(n, N+1):\n tmp -= i\n if tmp < 0:\n break\n elif not tmp:\n cnt += 1\n break\nprint(cnt)" }, { "alpha_fraction": 0.37567567825317383, "alphanum_fraction": 0.4162162244319916, "avg_line_length": 25.5, "blob_id": "7d5253ea21502a7961567ea0b2277be8effc5554", "content_id": "8a0999cf925fcc4601bbb879bfca13ee051f9f36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 98, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/12/1218/e 계산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print('n e')\nprint('- -----------')\nfor n in range(10):\n e = 0\n for i in range(n+1):\n factorial = 1\n s = 1\n for f in range(1, i+1):\n factorial *= f\n s /= factorial\n e += s\n print(n, end=' ')\n e = str(e).split('.')\n print(f'{float(\".\".join(e)):0.9f}' if len(e[1]) > 9 else \".\".join(e) if e[1] != '0' else e[0])" }, { "alpha_fraction": 0.5170068144798279, "alphanum_fraction": 0.5510203838348389, "avg_line_length": 20.14285659790039, "blob_id": "edf7d7c4f4e8471999694b198aa9de88c2d1ce95", "content_id": "6d5d6e2e360ff0a049fac299157028d5e03a24b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/03/0328/조교는 새디스트야!!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncrew = list(map(int, input().split()))\nresult = 0\nfor i in range(1, N+1):\n if crew[i-1] != i:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.4440000057220459, "alphanum_fraction": 0.47200000286102295, "avg_line_length": 26.88888931274414, "blob_id": "f9e34ae56861de691ac7e1326cba1073f51dd4e1", "content_id": "6b1c695f156f92bb4901fe11bdd9a7d5add97e99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/09/0912/알파벳 거리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n X, Y = input().split()\n result = []\n for i in range(len(X)):\n y, x = ord(Y[i])-64, ord(X[i])-64\n tmp = y-x\n if tmp < 0: tmp += 26\n result.append(tmp)\n print('Distances:', *result)" }, { "alpha_fraction": 0.40692123770713806, "alphanum_fraction": 0.42243435978889465, "avg_line_length": 22.30555534362793, "blob_id": "f32533e75252f83d7c425f40079e5ea48479500e", "content_id": "7aa081ea63ae1b970f2fb5521172e8d8fbd9e856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 63, "num_lines": 36, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/3. 파이썬 SW문제해결 기본 String/4차시 3일차 - 회문.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def palindrome(arr): # 팰린드롬을 찾아서 result에 추가하는 함수\n for i in range(N):\n for j in range(N-M+1):\n if not M%2:\n if arr[i][j:j+M//2] == arr[i][j+M:j+M//2-1:-1]:\n result.append(arr[i][j:j+M])\n break\n else:\n if arr[i][j:j+M//2] == arr[i][j+M:j+M//2:-1]:\n result.append(arr[i][j:j+M])\n break \n\n\nT = int(input())\n\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n\n arr = tuple(input() for _ in range(N))\n\n result = []\n # 가로\n palindrome(arr)\n \n # 세로 : 배열을 90도 뒤집어서 똑같은 코드 실행\n arr = list(arr)\n\n tmp = ['' for _ in range(N)]\n \n for a in arr:\n for i in range(N):\n tmp[i] = tmp[i] + a[i]\n \n palindrome(tmp)\n\n print('#{} {}'.format(t, *result))" }, { "alpha_fraction": 0.5236220359802246, "alphanum_fraction": 0.539370059967041, "avg_line_length": 14.9375, "blob_id": "d80325e272700c470082b17f4ca8b3e9aed87e3a", "content_id": "19344b6ccbe56c6e9a9d3620ab6f9737f617d8a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 26, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/09/0903/30.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\ntmp = 0\nzero = False\nresult = []\nfor n in N:\n n = int(n)\n tmp += n\n result.append(n)\n if n == 0: zero = True\ntmp %= 3\nresult.sort(reverse=True)\nif not tmp and zero:\n for r in result:\n print(r, end='')\nelse:\n print(-1)" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 35, "blob_id": "2e0fc741a6f78fa8c60aa966b5dae4d6e733a5af", "content_id": "ad8efce2b6b676c0a67cf96ff1138971b7875b8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 35, "num_lines": 1, "path": "/알고리즘/온라인저지/2023/04/0427/ゾロ目 (Same Numbers).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a,b=input();print(1 if a==b else 0)" }, { "alpha_fraction": 0.5246636867523193, "alphanum_fraction": 0.5650224089622498, "avg_line_length": 21.399999618530273, "blob_id": "783df0b75a5bf00a31f2c537249a898380d449fb", "content_id": "3defba8010f0187f79044cf1bb7e0fab8df521ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/11/1127/JOI와 IOI.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nresult = [0, 0] # JOI, IOI\nS = input().rstrip()\nfor i in range(len(S)-2):\n if S[i:i+3] == 'JOI': result[0] += 1\n elif S[i:i+3] == 'IOI': result[1] += 1\nfor r in result: print(r)" }, { "alpha_fraction": 0.4480408728122711, "alphanum_fraction": 0.475298136472702, "avg_line_length": 23.95652198791504, "blob_id": "ff2d095d9f15609aed06ca59aaadc5476b20dc01", "content_id": "1d324a72c061935a6e75debf0b2f886da0f4f503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 57, "num_lines": 23, "path": "/알고리즘/온라인저지/2021/10/1003/부분수열의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, S = map(int, input().split())\n\narr = list(map(int, input().split()))\n\nn = len(arr)\n\ncnt = 0 # 부분집합의 합이 S가 될 때마다 +1\n\n# 비트연산 \"<<\"를 통해 부분집합을 구해줄 것\nfor i in range(1<<n): # 2의 len(arr)승 -> arr의 부분집합의 개수\n tmp = [] # 부분집합 초기화\n for j in range(n): \n if i&(1<<j): # 1<<j == arr에서 j번째 원소를 의미\n # 만약 i의 비트가 11100이면\n # tmp == 리스트의 0번째, 1번째, 2번째 원소를 가지는 arr의 부분집합\n tmp.append(arr[j]) # 해당 부분집합의 원소를 부분집합에 추가\n if sum(tmp) == S:\n if len(tmp) == 0: # 공집합은 예외처리\n pass\n else: # \n cnt += 1\n\nprint(cnt) # 출력\n \n" }, { "alpha_fraction": 0.4107142984867096, "alphanum_fraction": 0.5059523582458496, "avg_line_length": 20.125, "blob_id": "164d01b972c587fd9e803973aa97cc7360405223", "content_id": "95b727a63e6144247fdd53c127f0d95cb3b38906", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0206/5와 6의 차이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = input().split()\n\nA5 = A.replace('6', '5')\nA6 = A.replace('5', '6')\nB5 = B.replace('6', '5')\nB6 = B.replace('5', '6')\n\nprint(int(A5) + int(B5), int(A6) + int(B6))" }, { "alpha_fraction": 0.5761589407920837, "alphanum_fraction": 0.6158940196037292, "avg_line_length": 15.88888931274414, "blob_id": "4771af0518336e6f867b278cf383ed248c21a2d1", "content_id": "21e87d2ea81b6865b0cdc77fc842dbd7d6cdfed6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 56, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/03/0301/팀 나누기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "players = list(map(int, input().split()))\n\nplayers.sort()\n\ntmp = [players[0] + players[3], players[1] + players[2]]\n\ntmp.sort()\n\nprint(tmp[1] - tmp[0])" }, { "alpha_fraction": 0.3425000011920929, "alphanum_fraction": 0.38749998807907104, "avg_line_length": 13.851851463317871, "blob_id": "5ea179f9fd0a5408d228219a09a8f79ec0caa652", "content_id": "d65adb19549de5b44d6cfcc3b5a01a7f8e25f272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 34, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/01/0108/1, 2, 3 더하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor tc in range(T):\n N = int(input())\n\n dp = [0 for _ in range(N+1)]\n\n dp[1] = 1\n if N == 1:\n print(dp[1])\n continue\n dp[2] = 2\n\n if N == 2:\n print(dp[2])\n continue\n\n dp[3] = 4\n if N == 3:\n print(dp[3])\n continue\n\n if N > 3:\n for i in range(4, N+1):\n dp[i] = sum(dp[i-3:i])\n\n print(dp[N])" }, { "alpha_fraction": 0.4372294247150421, "alphanum_fraction": 0.5064935088157654, "avg_line_length": 20.090909957885742, "blob_id": "166cfacd163b1397eeb72618e1d3b86aeed98d7e", "content_id": "ebb3675ac542628b3c0ea2815d6a111822966531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/09/0901/진법 변환.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "number = input().split()\nN, B = number[0], int(number[1])\nresult = 0\nN = N[::-1]\nfor i in range(len(N)):\n O = ord(N[i]) # ord\n if 48<=O<=57: O -= 48\n elif 65<=O<=90: O -= 55\n tmp = B**i*O\n result += tmp\nprint(result)" }, { "alpha_fraction": 0.34688347578048706, "alphanum_fraction": 0.38482385873794556, "avg_line_length": 13.800000190734863, "blob_id": "702a8cfb42d9fba87b161cd0a659026b016b94e6", "content_id": "693ad1168fca5d1f5b1057bc0ce5a70242996049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/11/1116/문어 숫자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\noctopus = {\n '-': 0,\n '\\\\': 1,\n '(': 2,\n '@': 3,\n '?': 4,\n '>': 5,\n '&': 6,\n '%': 7,\n '/': -1,\n}\n\nwhile True:\n tmp = list(input().rstrip())[::-1]\n if tmp == ['#']: break\n j = 0\n result = 0\n for i in range(len(tmp)):\n result += 8**j*octopus[tmp[i]]\n j += 1\n print(result)" }, { "alpha_fraction": 0.4805825352668762, "alphanum_fraction": 0.553398072719574, "avg_line_length": 22, "blob_id": "7408283afe8e5563b6c779cdfcfd571b64bf4151", "content_id": "2ec7e41b36ba889233dbc8a4deb8e2ad10b4d5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/10/1003/Generations of Tribbles.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndp = [1, 1, 2, 4] + [0]*66\nfor i in range(4, 70):\n dp[i] = dp[i-1]+dp[i-2]+dp[i-3]+dp[i-4]\nfor i in [0]*int(input().rstrip()):\n print(dp[int(input().rstrip())])" }, { "alpha_fraction": 0.6302083134651184, "alphanum_fraction": 0.6302083134651184, "avg_line_length": 26.571428298950195, "blob_id": "ef4864106946dbfe9ed1c1c4817f1a7280b6878c", "content_id": "348ce8e0edba8c5f35c4d7ae37f146713f0befaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/04/0419/특별한 학교 이름.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "school = {\n 'NLCS': 'North London Collegiate School',\n 'BHA': 'Branksome Hall Asia',\n 'KIS': 'Korea International School',\n 'SJA': 'St. Johnsbury Academy',\n}\nprint(school[input()])" }, { "alpha_fraction": 0.49814125895500183, "alphanum_fraction": 0.5018587112426758, "avg_line_length": 19.769229888916016, "blob_id": "8f8de9852e62d66ca0618f9ebab6440392a2673a", "content_id": "b2cc8304696776c735f7d87dd6327ea8be675e2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 50, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/03/0320/공.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "M = int(input())\nmoves = []\nfor m in range(M):\n moves.append(tuple(map(int, input().split())))\n# print(moves)\nball = 1\nfor move in moves:\n if ball in move:\n for m in move:\n if ball != m:\n ball = m\n break\nprint(ball)" }, { "alpha_fraction": 0.5317460298538208, "alphanum_fraction": 0.60317462682724, "avg_line_length": 24.399999618530273, "blob_id": "883d94378d5c373039dac2304b525982ef02e3be", "content_id": "ca2571aada4b8cc2472f5c372c587c947c754c3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/04/0414/덧셈과 곱셈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split())\nresult = 1\nfor i in range(a, b+1):\n result *= sum(list(range(1, i+1)))\nprint(result%14579)" }, { "alpha_fraction": 0.42352941632270813, "alphanum_fraction": 0.4470588266849518, "avg_line_length": 16, "blob_id": "49d41ac31357400bfdd1a8775289b8902afb7299", "content_id": "c7f3e81358032745e47a5d3d97330d8d638a5c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 36, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/05/0514/팩토리얼 진법.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "table = []\na = 1\nfor i in range(1, 6):\n table.append(a*i)\n a *= i\nwhile True:\n N = input()\n if N == '0':\n break\n M = len(N)\n N = N[::-1]\n result = 0\n for i in range(M):\n result += int(N[i])*table[i]\n print(result)\n" }, { "alpha_fraction": 0.450236976146698, "alphanum_fraction": 0.450236976146698, "avg_line_length": 14.142857551574707, "blob_id": "d52006b5c96a1762fe3eba34428b2789b71833b9", "content_id": "159853587b92910b72bf3805fe2299a14e2ac109", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 40, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/02/0213/얼마.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n S = int(input())\n N = int(input())\n \n result = S\n\n for n in range(N):\n q, p = map(int, input().split())\n\n result += q * p\n \n print(result)" }, { "alpha_fraction": 0.565517246723175, "alphanum_fraction": 0.5793103575706482, "avg_line_length": 35.5, "blob_id": "dcaf2cede807a7219785e05b4fcb672afea1098e", "content_id": "ac5f0fc9ac7cfb1c506886ef2f193d01ed64c6dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 76, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/08/0806/숫자 카드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nC = set(map(int, input().split()))\nM = int(input())\nfor i in list(map(int, input().split())): print(1 if i in C else 0, end=' ')" }, { "alpha_fraction": 0.446393758058548, "alphanum_fraction": 0.4684860408306122, "avg_line_length": 32.4782600402832, "blob_id": "381502c937816e48575f7d696e5c8b885e71f4ff", "content_id": "12b2b6279a1808f853f9153bba8751e12fdff6db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1983, "license_type": "no_license", "max_line_length": 85, "num_lines": 46, "path": "/알고리즘/온라인저지/2022/09/0913/치즈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\nN, M = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\nresult = [0, int(1e9)] # [녹는데 걸리는 시간, 다 녹기 한 시간 전 남은 치즈 수]\nwhile True:\n melt = True # 다 녹았는지 확인할 것\n cheese = 0 # 지금 남아있는 치즈 수\n for i in range(N):\n for j in range(M):\n if arr[i][j]: # 하나라도 안 녹은 치즈가 있다면\n melt = False # 다 녹은 것은 아니지\n cheese += 1 # 치즈 개수 증가\n if melt: # 다 녹았다면\n for r in result: print(r) # 출력\n break\n else: # 다 안녹았다면 녹이러 가자!\n Q, cheese_Q = deque(), deque() # 배열탐색 deque, 녹일 치즈만 담을 deque\n visited = [[0]*M for _ in range(N)]\n Q.append((0, 0)) # 배열의 가장자리는 무조건 공기이다\n visited[0][0] = 1 \n while Q:\n y, x = Q.popleft()\n for k in range(4):\n ny, nx = y+dy[k], x+dx[k] # 상하좌우\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx]:\n visited[ny][nx] = 1 # 치즈든 공기든, 탐색할 것\n if arr[ny][nx]: # 치즈\n cheese_Q.append((ny, nx)) # 녹일 치즈\n else: # 공기\n Q.append((ny, nx)) # 다음 탐색할 공기\n while cheese_Q: # 치즈를 녹이자\n y, x = cheese_Q.popleft()\n arr[y][x] = 0 # 치즈가 녹는다\n result[0] += 1 # 한 시간 경과\n result[1] = min(result[1], cheese) # 배열의 치즈들은 녹았지만, cheese는 녹기 전 치즈 수를 가지고 있다\n\n\"\"\"\n핵심\n1. 공기를 기준으로 주변 치즈를 녹인다\n2. 녹이면서 진행하지 말고\n 녹일 치즈들을 한 데 모아, 한 번에 녹인다\n\"\"\"\n\n# https://www.acmicpc.net/problem/2636" }, { "alpha_fraction": 0.5240174531936646, "alphanum_fraction": 0.5327510833740234, "avg_line_length": 27.75, "blob_id": "568692f7dc0910a82bb61ea04e4b91c952fc2f5b", "content_id": "a0925f50f0880181d80ee561fe6e3c7ad38ba08d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 56, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/12/1210/대충 더해.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(str, sorted(list(map(int, input().split()))))\nresult = ''\nfor i in range(1, len(B)+1):\n try:\n result = str(int(A[-i]) + int(B[-i])) + result\n except:\n result = str(int(B[-i])) + result\nprint(result)" }, { "alpha_fraction": 0.4330601096153259, "alphanum_fraction": 0.4439890682697296, "avg_line_length": 21.90625, "blob_id": "dbdfccaf4c158bd8a1a414f13136687066f71142", "content_id": "c05feedc47e5fc56044244c8be8aaf5cbe0658de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "no_license", "max_line_length": 47, "num_lines": 32, "path": "/알고리즘/온라인저지/2022/04/0403/세 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import copy\nnums = list(map(int, input().split()))\noriginal = copy.deepcopy(nums)\nnums.sort()\nA, B, C = nums[0], nums[1], nums[2]\n# print(A, B, C)\nope = 1 # * or /\nif A+B == C:\n ope = 0 # + or -\n# print(ope)\n# print(nums, original)\nD, E, F = original[0], original[1], original[2]\nresult = ''\nif not ope:\n if D+E == F:\n result = f'{D}+{E}={F}'\n elif E+F == D:\n result = f'{D}={E}+{F}'\n elif D-E == F:\n result = f'{D}-{E}={F}'\n elif E-F == D:\n result = f'{D}={E}-{F}'\nelse:\n if D*E == F:\n result = f'{D}*{E}={F}'\n elif E*F == D:\n result = f'{D}={E}*{F}'\n elif D//E == F:\n result = f'{D}/{E}={F}'\n elif E//F == D:\n result = f'{D}={E}/{F}'\nprint(result)" }, { "alpha_fraction": 0.5584415793418884, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 12, "blob_id": "6480c6993f063ac21f93d51bed3aef955264d14b", "content_id": "e7dbee1d222b155f47cd53e5ad0a5f170309e999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0202/영수증.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "bill = int(input())\n\nfor _ in range(9):\n bill -= int(input())\n\nprint(bill)" }, { "alpha_fraction": 0.4455958604812622, "alphanum_fraction": 0.46113988757133484, "avg_line_length": 18.399999618530273, "blob_id": "79ca20c7a273aad5dcbfaf7770e9f644cf683ce0", "content_id": "d4e88f61dc1833c454a77d3907e808cf7d29430b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0220/ABC.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums = list(map(int, input().split()))\nnums.sort()\n\nfor i in input():\n if i == 'A':\n print(nums[0])\n elif i == 'B':\n print(nums[1])\n elif i == 'C':\n print(nums[2])" }, { "alpha_fraction": 0.4766763746738434, "alphanum_fraction": 0.5199222564697266, "avg_line_length": 22.397727966308594, "blob_id": "ac9780e815a2647a0816600313fad516f4576630", "content_id": "df70e96931cd8eb4b9b272fb63a1421e27eff755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2602, "license_type": "no_license", "max_line_length": 68, "num_lines": 88, "path": "/알고리즘/온라인저지/2022/03/0314/톱니바퀴.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n반시계\n11001110\n-1\n10011101\n\n시계\n11001110\n1\n01100111\n\"\"\"\n\nfrom pprint import pprint\n\n\ndef clockwise(i): # 시계방향\n global gears, visited\n if i not in range(4): # 톱니 인덱스가 범위를 벗어나면\n return # 함수 종료\n # print(\"clockwise!\", i+1) # 디버깅\n try:\n if gears[i][2] != gears[i+1][6]: # 현재 톱니바퀴의 오른쪽 톱니바퀴와 극이 다르면\n visited[i] = 1 # 방문처리\n if not visited[i+1]: # 확인하지 않은 톱니바퀴에 대해\n counter_clockwise(i+1) # 오른쪽 톱니 반시계방향\n except:\n pass\n try:\n if gears[i][6] != gears[i-1][2]: # 현재 톱니바퀴의 왼쪽 톱니바퀴와 극이 다르면\n visited[i] = 1\n if not visited[i-1]:\n counter_clockwise(i-1) # 왼쪽 톱니 반시계방향\n except:\n pass\n tmp = [gears[i][-1]] + gears[i][:7] # 시계방향으로 돌려주기\n gears[i] = tmp\n\ndef counter_clockwise(i): # 반시계방향\n global gears, visited\n if i not in range(4): # 톱니 인덱스가 범위를 벗어나면\n return # 함수 종료\n # print('counter!!', i+1) # 디버깅\n try:\n if gears[i][2] != gears[i+1][6]: # 오른쪽 톱니와 극이 다르면\n visited[i] = 1\n if not visited[i+1]:\n clockwise(i+1) # 오른쪽 톱니 시계방향\n except:\n pass\n try:\n if gears[i][6] != gears[i-1][2]: # 왼쪽 톱니와 극이 다르면\n visited[i] = 1\n if not visited[i-1]:\n clockwise(i-1) # 왼쪽 톱니 시계방향\n except:\n pass\n tmp = gears[i][1:] + [gears[i][0]] # 반시계방향으로 돌려주기\n gears[i] = tmp\n\n\n# 값들 입력받기\ngears = [] # 톱니들\nfor _ in range(4):\n gears.append(list(map(int, input())))\nK = int(input())\norders = [] # 돌리는 톱니번호와 방향들\nfor k in range(K):\n orders.append(tuple(map(int, input().split())))\n\n# pprint(gears) # 디버깅\n\nfor order in orders: \n # print(order) # 디버깅\n visited = [0] * 4 # 매 톱니마다 방문배열 초기화\n if order[1] == 1:\n clockwise(order[0]-1) # 시계방향\n elif order[1] == -1:\n counter_clockwise(order[0]-1) # 반시계방향\n # pprint(gears) # 디버깅\n\nresult = 0 # 결과값 초기화\nnum = 1 # 결과값에 더할 변수값 초기화\nfor i in range(4):\n if gears[i][0]: # 12시 톱니가 1이면\n result += num # 결과값 갱신\n num *= 2 # 1 2 4 8 \n\nprint(result) # 결과값 출력" }, { "alpha_fraction": 0.4950000047683716, "alphanum_fraction": 0.5149999856948853, "avg_line_length": 21.33333396911621, "blob_id": "58c73bbd31ce80e07433c7117e77e02416d75334", "content_id": "0c95248971f536e888baadc3cac34581bae48eb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 38, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/05/0519/나이순 정렬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "age = [[] for _ in range(201)]\nT = int(input())\nfor t in range(T):\n member = input().split()\n age[int(member[0])].append(member)\nfor a in age:\n if a:\n for i in a:\n print(*i)" }, { "alpha_fraction": 0.27710843086242676, "alphanum_fraction": 0.4518072307109833, "avg_line_length": 12.916666984558105, "blob_id": "6e33e24ff9174afac506797062913afb4a99e02e", "content_id": "6a33f7a4b542b779d8d4624dbc2d15fbf4c39976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/05/0531/도미노.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = 0\nresult = 0\nfor i in range(int(input()), -1, -1):\n a += 3\n result += i*a\nprint(result)\n\n\"\"\"\n2 = 0+1+2 = 0+3+9\n3=0+1+2+3 = 0+3+9+18 = 0+3+(3+6)+(3+6+9)\n\n\"\"\"" }, { "alpha_fraction": 0.4956521689891815, "alphanum_fraction": 0.4956521689891815, "avg_line_length": 16.769229888916016, "blob_id": "600a78a3dd5d9d751b8e1e3dab0ff4dbeef146f8", "content_id": "a2a76d0ad887dc2fa74cbee100a56ff7ccd0ec58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/알고리즘/온라인저지/2021/12/1216/TGN.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nfor n in range(N):\n r, e, c = map(int, input().split())\n\n fee = e-c\n\n if fee > r:\n print('advertise')\n elif fee < r:\n print('do not advertise')\n else:\n print('does not matter')" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.550000011920929, "avg_line_length": 12.666666984558105, "blob_id": "ff196def3d709304e1b727e33236d0290332be74", "content_id": "dba783ef9e53faedbc43ed33d75b3c9466b98f24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/38차시 4. 문자열 - 연습문제 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "tmp = input().split()\n\nprint(*tmp[::-1])" }, { "alpha_fraction": 0.4427710771560669, "alphanum_fraction": 0.45783132314682007, "avg_line_length": 21.200000762939453, "blob_id": "b45e1f3596602fda0343822d9f279cc6df6e2b55", "content_id": "1016c790054ead630c171542ec4a7acfaa766c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 41, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0723/폴리오미노.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "board = input().split('.')\n# print(board)\nresult = ''\nfor i in range(len(board)):\n if len(board[i])%2:\n result = -1\n break\n result += 'AAAA' * (len(board[i])//4)\n if len(board[i])%4:\n result += 'BB'\n if i != len(board)-1:\n result += '.'\n # if b == '':\n # result += '.'\nprint(result)" }, { "alpha_fraction": 0.5798816680908203, "alphanum_fraction": 0.5964497327804565, "avg_line_length": 24.636363983154297, "blob_id": "f700b080ff8632ffebd63c839c4583cca59f1a6e", "content_id": "0289e2c412d52bd79e3328ed9df2764663cba35e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1475, "license_type": "no_license", "max_line_length": 69, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/08/0816/쇠막대기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "B, result, S, j = input(), 0, [], 0 # bar, result, stack, laser index\nL = [i for i in range(1, len(B)) if B[i-1]+B[i] == '()'] # laser\nfor i in range(len(B)):\n if B[i] == '(': S.append(B[i])\n elif B[i] == ')':\n if i == L[j]: \n S.pop()\n result += len(S)\n if j < len(L)-1: j += 1\n else: S.pop(); result += 1\nprint(result)\n\n\"\"\"\n쇠막대기의 시작('(')을 스택에 넣는다\n레이저가 등장하면 등장한 모든 쇠막대기는 잘려나가고\nlen(stack) 만큼의 잘린 부분이 생겨난다\n쇠막대기가 끝나면 바로 직전 레이저에서 잘려나갔기 때문에\n잘린 마지막 파편을 result++ 하고, 스택에서 꺼낸다\n위 과정을 반복하되\n레이저는 시작과 끝에 존재할 수 있으니 조심\n\n<코드 리팩토링>\n레이저인지 확인하는 방법으로\n쇠막대기 입력에서 레이저의 인덱스를 추출하고 저장하였다\n')'가 등장할 때의 인덱스가, 레이저인덱스(L)에 있으면 레이저고\n레이저인덱스에 없으면 레이저가 아닌 닫는 쇠막대기다\n이 레이저인덱스를 확인하는 과정에서 in 연산자를 사용했던\n처음 제출한 코드는 608ms의 시간이 걸렸고\n인덱스를 직접 잡아주는 방식으로 코드를 바꿔주니\n156ms가 걸리면서, 약 4배의 시간단축을 보여주었다\nin이 진짜 느리긴 느리구나.. 실버 상위 문제부터는\n사용하기가 망설여진다\n\"\"\"" }, { "alpha_fraction": 0.4560000002384186, "alphanum_fraction": 0.4959999918937683, "avg_line_length": 11.5, "blob_id": "5727dc3f42de44389f632e1e85f90cff7d95ce78", "content_id": "d1514f23d48eef9574fb83d3d0754f06bfc4be86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/01/0131/팩토리얼.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 1\n\nif N > 1:\n for i in range(2, N+1):\n result *= i\n print(result)\nelse:\n print(1)\n" }, { "alpha_fraction": 0.4845132827758789, "alphanum_fraction": 0.508849561214447, "avg_line_length": 20.571428298950195, "blob_id": "423923f14024b5c7867f1043b30e0623642c4379", "content_id": "9ae4bb6f259de670d6fa943e2326c4758f369745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/03/0307/N과 M (4).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(s, depth):\n global N, M\n\n if depth == M: # M개의 수를 담았을 때\n print(*result[1:]) # *로 언패킹해서 출력\n return # 재귀 탈출\n\n # 아직 M개의 수를 담지 않았을 때\n depth += 1 # 하나 더 고르자\n\n for i in range(s, N+1): # 시작하는 수 s부터 N까지\n result[depth] = i # depth번째 수는 i\n dfs(s, depth) # 한 depth더 들어가기\n s += 1 # 들어갔다 나왔으면 s를 하나 올리기\n\n\nN, M = map(int, input().split())\n\nresult = [0] * (M+1) # 인덱스 맞추기 위한 M+1\n\ndfs(1, 0) # 시작은 1, 깊이는 0부터 시작" }, { "alpha_fraction": 0.38823530077934265, "alphanum_fraction": 0.4117647111415863, "avg_line_length": 11.285714149475098, "blob_id": "d8ddeac45dae25d116f633d63f94ec68a0720735", "content_id": "c1cfc5e192f87fb931205176dfa20a7a91d87dbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/05/0522/末尾の文字 (Last Letter).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "input()\nS = input()\nif S[-1] == 'G':\n S = S[:len(S)-1]\nelse:\n S += 'G'\nprint(S)" }, { "alpha_fraction": 0.51171875, "alphanum_fraction": 0.53515625, "avg_line_length": 22.363636016845703, "blob_id": "ff83b866bf74ee84e2c12dbbeb977a5f8cf0ead1", "content_id": "ba8b014e5726aa1edcef68da71ea28d342e1faa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/07/0728/베스트셀러.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = dict()\nfor n in range(N):\n book = input()\n if book not in arr:\n arr[book] = 1\n else: # book in arr\n arr[book] += 1\nresult = [(a, arr[a]) for a in arr]\nresult.sort(key=lambda x:(-x[1], x[0]))\nprint(result[0][0])" }, { "alpha_fraction": 0.4858871102333069, "alphanum_fraction": 0.49193549156188965, "avg_line_length": 26.61111068725586, "blob_id": "ed588628c101b2a57f2d2053901bf053bf7f6355", "content_id": "09cda782898f459038bcc6bc11b09f57ffe21602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 496, "license_type": "no_license", "max_line_length": 59, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/11/1126/기상캐스터.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\nH, W = map(int, input().rstrip().split())\nresult = [[INF]*W for _ in range(H)]\narr = [input().rstrip() for _ in range(H)]\nfor i in range(H):\n for j in range(W):\n if arr[i][j] == 'c':\n for k in range(W):\n if j+k < W:\n result[i][j+k] = min(result[i][j+k], k)\nfor i in range(H):\n for j in range(W):\n if result[i][j] == INF:\n result[i][j] = -1\nfor r in result: print(*r)" }, { "alpha_fraction": 0.4585798680782318, "alphanum_fraction": 0.4822485148906708, "avg_line_length": 24.461538314819336, "blob_id": "b5d675023456534f33549813ddd29a11444266ef", "content_id": "bfc05ec1b2095455a00c26cfe0941665c0646ec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/12/1227/3대 측정.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K, L = map(int, input().split())\nresult = [0, []]\nfor n in range(N):\n A, B, C = map(int, input().split())\n summ = A + B + C\n minn = min(A, B, C)\n if summ >= K and minn >= L:\n result[0] += 1\n result[1].append(A)\n result[1].append(B)\n result[1].append(C)\nprint(result[0])\nprint(*result[1]) " }, { "alpha_fraction": 0.6518518328666687, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 26.200000762939453, "blob_id": "66c20f34d98d555707ddc2725996014ffcd2b13b", "content_id": "650c521bbded45a6afd9bbd5a19ccae9f84ea023", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/05/0519/N과 M (2).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import combinations\n\nN, M = map(int, input().split())\nfor i in list(combinations(list(range(1, N+1)), M)):\n print(*i)" }, { "alpha_fraction": 0.47044631838798523, "alphanum_fraction": 0.5054282546043396, "avg_line_length": 23.41176414489746, "blob_id": "8b523ddc1783b78c958286e79de0230c4b6510d4", "content_id": "bbd064548cffe53384b53bfeb53d311ab709a496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 63, "num_lines": 34, "path": "/알고리즘/온라인저지/2022/10/1002/아기 상어 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy = [-1, -1, 0, 1, 1, 1, 0, -1] # 12시부터 시계방향\ndx = [0, 1, 1, 1, 0, -1, -1, -1]\n\ndef BFS(i, j):\n visited = [[0]*M for _ in [0]*N]\n Q = deque()\n Q.append((i, j, 0))\n visited[i][j] = 1\n while Q:\n y, x, dist = Q.popleft()\n if arr[y][x]: break\n for k in range(8):\n ny, nx = y+dy[k], x+dx[k]\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx]:\n Q.append((ny, nx, dist+1))\n visited[ny][nx] = 1\n return dist\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in [0]*N]\nresult = 0\nfor i in range(N):\n for j in range(M):\n result = max(result,BFS(i, j))\nprint(result)\n\n\"\"\"\n매 좌표마다 상어안전거리를 구한다 : brute force\n새 최소값을 매 회마다 갱신한다\n\"\"\"" }, { "alpha_fraction": 0.5189873576164246, "alphanum_fraction": 0.5189873576164246, "avg_line_length": 14.699999809265137, "blob_id": "b45794a29d8491747869683fe07a39901b7df620", "content_id": "5f16ec828277d86c002c39a058a0fe9777be760d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 43, "num_lines": 10, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/30차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "fruit = [' apple ','banana',' melon']\n\nresult = {}\n\nfor f in fruit:\n f = f.lstrip()\n f = f.rstrip()\n result.update({f:len(f)})\n\nprint(result)\n\n" }, { "alpha_fraction": 0.45185184478759766, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 18.428571701049805, "blob_id": "cf5456cd9601d3bbe7ef8c4de2188bb85be45dbb", "content_id": "cf1126879f42b55b445e9acb94e00230a7ba17e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/08/0810/Site Score.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = list(map(int, input().split()))\nC = [56, 24, 14, 6]\nresult = 0\nfor i in range(4):\n result += S[i]*C[i]\n S *= 10\nprint(result)" }, { "alpha_fraction": 0.6004140973091125, "alphanum_fraction": 0.6169772148132324, "avg_line_length": 24.473684310913086, "blob_id": "5656ccc00fce2f2d2bff3e570a2e7087b77e4290", "content_id": "d6f844d98a7725b20bf052bfb1cba640cd15a191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1022/종이자르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nW, H = map(int, input().rstrip().split())\nrows, cols = [0, H], [0, W]\nN = int(input().rstrip())\nfor n in range(N):\n D, C = map(int, input().rstrip().split()) # direction, coordinate\n if D: cols.append(C)\n else: rows.append(C)\nrows.sort()\ncols.sort()\nwidth, height = 0, 0\nfor i in range(len(rows)-1):\n width = max(width, rows[i+1]-rows[i])\nfor i in range(len(cols)-1):\n height = max(height, cols[i+1]-cols[i])\nprint(width*height)" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.581250011920929, "avg_line_length": 19.125, "blob_id": "05fe42c5321e844645939c10c3d87bbc0b1dd8a6", "content_id": "d13cf8ae43341a7312cf700471c38dfb598f5a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/07/0715/탄산 음료.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "E, F, C = map(int, input().split()) # empty find change\nresult = 0\nbottle = E+F\nwhile bottle >= C:\n result += 1\n bottle -= C\n bottle += 1\nprint(result)" }, { "alpha_fraction": 0.26377952098846436, "alphanum_fraction": 0.29527559876441956, "avg_line_length": 14, "blob_id": "b9fbb6d961f9a2d9566498a3a58185aa1aa149dc", "content_id": "bcb9f4f102d2c3429c11f5b93084a331d9e7bbee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 35, "num_lines": 17, "path": "/알고리즘/온라인저지/2023/02/0225/명장 남정훈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L, R, A = map(int, input().split())\n\nwhile A:\n if L > R:\n R += 1\n A -= 1\n elif L < R:\n L += 1\n A -= 1\n else:\n if A == 1:\n break\n else:\n L += 1\n A -= 1\n\nprint(min(L, R)*2)" }, { "alpha_fraction": 0.43617022037506104, "alphanum_fraction": 0.46072012186050415, "avg_line_length": 21.236364364624023, "blob_id": "135b4507d32569caa05fc9bb53520c10d28bf6ef", "content_id": "f28a68652aab3e2f182025ee2b961333d9c9e5ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1232, "license_type": "no_license", "max_line_length": 61, "num_lines": 55, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/5. 파이썬 SW문제해결 기본 Stack2/7차시 5일차 - 배열 최소 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(N, depth, tmp):\n global result\n if N == depth :\n if tmp < result:\n result = tmp\n \n if tmp > result:\n return\n else:\n for i in range(N):\n if visited[i] == 0:\n visited[i] = 1\n dfs(N, depth+1, tmp+arr[depth][i])\n visited[i] = 0\n \nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n visited = [0]*N\n \n depth = 0\n result = 10*N\n \n dfs(N, 0, 0)\n print('#{} {}'.format(t, result))\n\n\n\"\"\"\n2022-09-01에 다시 풀음\ndef dfs(level):\n global result, tmp\n if level == N:\n result = min(result, tmp)\n return\n if tmp > result: return\n for i in range(N): \n # print(level, i, arr[level][i])\n if not visited[i]:\n tmp += arr[level][i]\n visited[i] = 1\n dfs(level+1)\n tmp -= arr[level][i]\n visited[i] = 0\n\nfor t in range(1, int(input())+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n # for a in arr: print(a)\n visited = [0] * N\n result = int(1e9)\n tmp = 0\n dfs(0)\n print('#{} {}'.format(t, result))\n\"\"\"" }, { "alpha_fraction": 0.5841270089149475, "alphanum_fraction": 0.5841270089149475, "avg_line_length": 23.30769157409668, "blob_id": "d2e751895220c69fd8d09932b02e41aa6a698747", "content_id": "3bc76cdbe11369b994895155ce9069d4bf493cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0729/대칭 차집합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nA_list = list(map(int, input().split()))\nA_set = set(A_list)\nB_list = list(map(int, input().split()))\nB_set = set(B_list)\nresult = []\nfor a in A_list:\n if a not in B_set:\n result.append(a)\nfor b in B_list:\n if b not in A_set:\n result.append(b)\nprint(len(result))" }, { "alpha_fraction": 0.351005494594574, "alphanum_fraction": 0.35283362865448, "avg_line_length": 19.884614944458008, "blob_id": "5389ecc35c91ed4ac9ec61061dd4c17202329c9e", "content_id": "771d947536fe3653be95cb1c2e671bd9ffe471d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 60, "num_lines": 26, "path": "/알고리즘/온라인저지/2021/09/0905/균형잡힌 세상.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "bracket = {\n ')': '(',\n '}': '{',\n ']': '[',\n}\n\nwhile True:\n sen = input() # sys.stdin.readline() 쓰니까 종료조건을 회피하지 못하더라\n if sen == '.':\n break\n stack = []\n for s in sen:\n if s in '[{(':\n stack.append(s)\n elif s in ')}]':\n if stack:\n if bracket[s] == stack[-1]:\n stack.pop()\n else:\n stack.append(s)\n else:\n stack.append(s)\n if stack:\n print('no')\n else:\n print('yes')\n " }, { "alpha_fraction": 0.5792349576950073, "alphanum_fraction": 0.5846994519233704, "avg_line_length": 15.727272987365723, "blob_id": "9efababed0f87f34dd080672c7febe1e107d3a36", "content_id": "13e8c2b663b2a740a5e7cb509832fb6c73f58bea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/07/0726/귀찮음.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nP = sorted(list(map(int, input().split())))\nresult, S = 0, sum(P)\nfor p in P:\n result += p * (S-p)\n S -= p\nprint(result)" }, { "alpha_fraction": 0.2741433084011078, "alphanum_fraction": 0.30841121077537537, "avg_line_length": 12.391304016113281, "blob_id": "d718d4c5ed37facaaf6461d05c82f8a03b6cf98c", "content_id": "2be348ccedfd89cfd99a5809b796d43203f42a81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 36, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/02/0201/하얀 칸.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "i = 1\n\nresult = 0\n\nfor _ in range(8):\n line = input()\n\n j = 1\n\n for l in line:\n if i%2:\n if j%2 and l == 'F':\n result += 1\n \n else:\n if not j%2 and l == 'F':\n result += 1\n \n j += 1\n \n i += 1\n\nprint(result)\n \n " }, { "alpha_fraction": 0.2732732594013214, "alphanum_fraction": 0.39039039611816406, "avg_line_length": 10.066666603088379, "blob_id": "28b268a28a1f446198d2c7d004f2585000a6f4b4", "content_id": "a09525f598db7b1c4733143cafd2b84781204e9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 33, "num_lines": 30, "path": "/알고리즘/온라인저지/2022/01/0104/2×n 타일링 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ndp = [0 for _ in range(N+1)]\n\ndp[1] = 1\nif N >= 2:\n dp[2] = 3\n if N == 2:\n print(dp[2])\n exit()\nif N >= 3:\n dp[3] = 5\n if N == 3:\n print(dp[3])\n exit()\n\nfor i in range(4, N+1):\n dp[i] = dp[i-1] + dp[i-2] * 2\n\nprint(dp[N]%10007)\n\n# 0\n# 1\n# 3\n# 5\n# 11\n# 21\n# 43\n# 85\n# 171\n\n" }, { "alpha_fraction": 0.4626334607601166, "alphanum_fraction": 0.4839857518672943, "avg_line_length": 12.428571701049805, "blob_id": "f3b5808de2af6f929c3bb35a87784d8e27b38b35", "content_id": "ac388b59e6cb072b16fa3274a9263c384a9ca452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 26, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/10/1012/수 이어 쓰기 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nD = 9\nsep = []\nwhile True:\n if N >= D:\n sep.append(D)\n N -= D\n D *= 10\n else:\n sep.append(N)\n break\nresult = 0\ni = 1\nfor s in sep: \n result += i*s\n i += 1\nprint(result)" }, { "alpha_fraction": 0.3025210201740265, "alphanum_fraction": 0.3235294222831726, "avg_line_length": 25.44444465637207, "blob_id": "e6277b30e4fc7e8a35d8a080d1cf51687447a491", "content_id": "99da6e60f7674adbd4da1fb615eb23342721dc6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 42, "num_lines": 36, "path": "/알고리즘/온라인저지/2021/09/0908/프린터 큐.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nT = int(input())\nfor t in range(T):\n N, M = map(int, input().split())\n docs = list(map(int, input().split()))\n cnt = 0\n end = True\n while end:\n if docs[M] != max(docs):\n if docs[0] != max(docs):\n docs.append(docs.pop(0))\n M -= 1\n if M < 0:\n M += N\n else: # docs[0] == max(docs)\n docs.pop(0)\n cnt += 1\n N -= 1\n M -= 1 \n else: # docs[M] == max(docs)\n if M != 0:\n if docs[0] == docs[M]:\n docs.pop(0)\n cnt += 1\n M -= 1\n else: # docs[0] != docs[M]\n docs.pop(0)\n M -= 1\n else: # M == 0\n cnt += 1\n end = False\n break\n print(cnt)\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5572916865348816, "avg_line_length": 18.299999237060547, "blob_id": "8c8bb12f2f8e8519e155f6d3d494ca0c2d372173", "content_id": "2c31c9c5b85a016342bb921fd52c8ac9a54f0878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/알고리즘/온라인저지/2021/08/0802/쉽게 푸는 문제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nnumbers = []\nfor i in range(1, B+2):\n for j in range(i):\n numbers.append(i)\nls = []\nfor i in range(A-1, B):\n ls.append(numbers[i])\n\nprint(sum(ls))" }, { "alpha_fraction": 0.4839743673801422, "alphanum_fraction": 0.5256410241127014, "avg_line_length": 21.35714340209961, "blob_id": "5de89e8473a4126b47d44360cbf370b1e4cb0d5d", "content_id": "1b00554f47b389f94b22363138a3d9f46d17b8da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/11/1118/좋은 자동차 번호판.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n number = input().rstrip()\n front = number[:3]\n rear = number[4:]\n tmp = 0\n j = 0\n for f in front[::-1]:\n tmp += 26**j*(ord(f)-65)\n j += 1\n print('nice' if abs(tmp-int(rear)) <= 100 else 'not nice')" }, { "alpha_fraction": 0.5081967115402222, "alphanum_fraction": 0.5245901346206665, "avg_line_length": 30, "blob_id": "4d41bf1041cea1f4c1830ea1eaed491db541b828", "content_id": "21b62b699e0561e07bc396aa1ee332473d61fea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/02/0209/WARBOY.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nprint(eval('C*3*(B//A)'))" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 18.5625, "blob_id": "804c5a2af7ddb14784ae35df2162fbde344d4f2a", "content_id": "b85ad088ea9778ee02ebfa8a8bd7c9a540aaa38e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/10/1027/치킨 쿠폰.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nwhile True:\n try:\n N, K = map(int, input().rstrip().split())\n result = N\n stamp = N\n while stamp>=K:\n tmp = stamp//K\n stamp %= K\n result += tmp\n stamp += tmp\n print(result)\n except: break" }, { "alpha_fraction": 0.4871794879436493, "alphanum_fraction": 0.5192307829856873, "avg_line_length": 25.16666603088379, "blob_id": "b37c146e11723be6e0547c43baacd05f7da73177", "content_id": "a160a2216c7f4b8e5ed99aa6aba4bdb89d40a49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/01/0120/삼각수의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N = int(input())\n result = 0\n for k in range(1, N+1):\n result += k*sum(list(range(1, k+2)))\n print(result)" }, { "alpha_fraction": 0.46612149477005005, "alphanum_fraction": 0.4754672944545746, "avg_line_length": 21.526315689086914, "blob_id": "8b484550b4bfef6ee0be15cd610492e0fd364835", "content_id": "e05db2367aa6e0630341b096c10e0841c870863f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 54, "num_lines": 38, "path": "/알고리즘/온라인저지/2022/07/0703/test.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(start):\n global is_dfs, result, visited\n print(start)\n if start == 1:\n for j in arr[1]:\n result = []\n visited = [True] + [False] * N \n visited[start] = True\n result.append(start)\n dfs(j)\n tmp.append(result)\n if result == answer:\n is_dfs = True\n else:\n visited[start] = True\n result.append(start)\n for i in arr[start]:\n if not visited[i]:\n dfs(i)\n\nN = int(input())\narr = [[] for _ in range(N+1)]\nfor n in range(N-1):\n s, e = map(int, input().split())\n arr[s].append(e)\n arr[e].append(s)\nanswer = list(map(int, input().split()))[::-1]\nis_dfs = False\nresult = []\ntmp = []\nvisited = [True] + [False] * N\ntmp = []\ndfs(1)\nprint(tmp)\nif is_dfs:\n print(1)\nelse:\n print(0)\n" }, { "alpha_fraction": 0.42702701687812805, "alphanum_fraction": 0.5027027130126953, "avg_line_length": 25.5, "blob_id": "e78d2f4475de576407e092e2eb17a8937ace5594", "content_id": "ba43116ef0143428dc9417eb823c109973c2c845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 55, "num_lines": 14, "path": "/알고리즘/[템플릿]/Dynamic Programming/계단 오르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nS = [0 for _ in range(301)] # stairs \ndp = [0 for _ in range(301)] # 미리 선언하여 N<=3 일 때 에러를 방지함\nfor i in range(N): S[i] = int(input())\ndp[0] = S[0]\ndp[1] = S[0] + S[1]\ndp[2] = max(S[1]+S[2], S[0]+S[2])\nfor i in range(3, N):\n A = dp[i-3] + S[i-1] + S[i]\n B = dp[i-2] + S[i]\n dp[i] = max(A, B)\nprint(dp[N-1])\n\n# https://www.acmicpc.net/problem/2579" }, { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.602173924446106, "avg_line_length": 34.46154022216797, "blob_id": "4c972b8f9953bfd2311707560877b896010ea685", "content_id": "a2f31c245bdf2da6cdcd9e44b2fd5050b884b2ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0927/부분수열의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, S = map(int, input().rstrip().split())\narr = list(map(int, input().rstrip().split()))\nresult = 0\nfor i in range(1<<N): # N개의 원소를 포함한다, 안한다의 경우가 있으므로 총 경우의 수는 1<<N == 2**N\n seq = [] # 부분수열 초기화\n for j in range(N): # N개 원소들에 대하여 부분수열인지 확인\n if i&(1<<j): seq.append(arr[j]) # &연산을 거친 값이 True라면, j번째 원소는 부분수열에 존재한다\n if sum(seq) == S and len(seq) != 0: result += 1 # 공집합이 아니면서, 부분수열의 합이 S이면 카운트\nprint(result)" }, { "alpha_fraction": 0.5051020383834839, "alphanum_fraction": 0.5204081535339355, "avg_line_length": 18.700000762939453, "blob_id": "f76fbaf731e5ac896de99dda409f31b888404c9d", "content_id": "6f9133bc61c9b364a942f2cee06ce58e9b7f7316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/05/0529/수빈이와 수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = list(map(int, input().split()))\n# print(arr)\nfor i in range(N):\n arr[i] *= i+1\n# print(arr)\nif N > 0:\n for i in range(1, N):\n arr[i] -= sum(arr[:i])\nprint(*arr)" }, { "alpha_fraction": 0.40740740299224854, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 15.399999618530273, "blob_id": "5a94c070f0c057635141a9fe7471796e9f0cfc09", "content_id": "78ad9fb612b4e9fbc22373d6439d9cbb5a1b918c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 23, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/05/0524/Gömda ord.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\ni = 0\nwhile i < len(S):\n print(S[i], end='')\n i += ord(S[i])-64" }, { "alpha_fraction": 0.5045045018196106, "alphanum_fraction": 0.522522509098053, "avg_line_length": 22.421052932739258, "blob_id": "9c7da66eaba53648b2d64ff39456a63d652c048d", "content_id": "6a50bcfce6adc29477389f9d66068a14f5f01a10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 38, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/10/1003/무한이진트리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split()) # 끝노드\n\n# 시작노드에서 왼쪽으로 내려가면 a가 커지고\n# 오른쪽으로 내려가면 b가 커진다\n# 주어진 (a, b)노드에서 a가 크면 왼쪽으로 내려온 노드\n# b가 크면 오른쪽으로 내려온 노드임을 알 수 있다\n# 이를 이용해 (a, b)에서 (1, 1)까지 거꾸로 올라간다\n\nl = r = 0 # 올라가면서 체크할 왼쪽 오른쪽 이동 횟수\n\nwhile a+b != 2: # 루트노드에 도달할 때 까지\n if a > b: # 왼쪽으로 내려왔던 노드이면\n a -= b # 부모노드의 a\n l += 1 # 왼쪽 카운트 +1\n elif a < b: # 오른쪽으로 내려왔던 노드이면\n b -= a # 부모노드의 b\n r += 1 # 오른쪽 카운트 +1\n\nprint(l, r) # 출력" }, { "alpha_fraction": 0.4974271059036255, "alphanum_fraction": 0.5403087735176086, "avg_line_length": 21.461538314819336, "blob_id": "bedab36d73699ed8a4f33bfa5a3f553ee83f9267", "content_id": "7926594e4824120282c82228fc2019c639ea6429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 64, "num_lines": 26, "path": "/알고리즘/온라인저지/2023/01/0114/숨바꼭질 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from heapq import heappush, heappop\n\nN, K = map(int, input().split())\nQ = [(0, N)]\nvisited = [int(1e9)]*(int(1e6)+1)\nvisited[N] = 0\nwhile Q:\n time, now = heappop(Q)\n go, back, jump = max(0, now+1), max(0, now-1), max(0, now*2)\n\n if now == K:\n break\n\n if visited[go] > time+1:\n visited[go] = time+1\n heappush(Q, (time+1, go))\n\n if visited[back] > time+1:\n visited[back] = time+1\n heappush(Q, (time+1, back))\n \n if visited[jump] > time and jump<=100000:\n visited[jump] = time\n heappush(Q, (time, jump))\n\nprint(time)" }, { "alpha_fraction": 0.5367646813392639, "alphanum_fraction": 0.5808823704719543, "avg_line_length": 16.125, "blob_id": "ede47a184c3cbccd5c80ea47ac04168bf8199a75", "content_id": "858755e24bc6dd0c7f896c18205492fdced4985b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/04/0426/Covid-19.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 'Yellow'\np = int(input())\nq = int(input())\nif p<=50 and q<=10:\n result = 'White'\nelif q>30:\n result = 'Red'\nprint(result)" }, { "alpha_fraction": 0.5107526779174805, "alphanum_fraction": 0.5215053558349609, "avg_line_length": 14.583333015441895, "blob_id": "9749ff6e6df404d7c8b48fafdccb0472ec503475", "content_id": "b35f547d9ff358abe44a928562996fae89c27d8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 27, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/05/0507/골뱅이 찍기 - 돌아간 ㅍ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def new_print(line):\n for _ in range(N):\n print(line)\n\nN = int(input())\nA = '@'*N + ' '*3*N + '@'*N\nB = '@'*N*5\nnew_print(A)\nnew_print(B)\nnew_print(A)\nnew_print(B)\nnew_print(A)" }, { "alpha_fraction": 0.5596330165863037, "alphanum_fraction": 0.5688073635101318, "avg_line_length": 21, "blob_id": "765e3806800e17b0909bb65c1b6eca9444ce7c29", "content_id": "1035a17b589c42898a8c2cbf2a59d52cb92ba224", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/01/0119/謎紛芥索紀 (Small).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor _ in range(int(input())):\n A, B = map(int, input().split())\n result += A*B\nprint(result)" }, { "alpha_fraction": 0.5743243098258972, "alphanum_fraction": 0.587837815284729, "avg_line_length": 20.285715103149414, "blob_id": "a99af847b7922f62c9249d6d80edcb7394500c44", "content_id": "a0abe24cfcd058f9b0b5871ea2d6423e9b665239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/11/1122/쿠키애호가.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n N, C = map(int, input().rstrip().split())\n print((N-1)//C + 1)" }, { "alpha_fraction": 0.48322147130966187, "alphanum_fraction": 0.4899328947067261, "avg_line_length": 15.666666984558105, "blob_id": "394b3f5a09da0c587816dbd81dc3ce587daa1042", "content_id": "7a596ad3b0f8be6f14db10cf3ea4c19c139a95e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/03/0325/골뱅이 찍기 - 정사각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = []\nA, B = '@'*(N+2), '@'+' '*N+'@'\narr.append(A)\nfor _ in range(N):\n arr.append(B)\narr.append(A)\nfor a in arr:\n print(a)" }, { "alpha_fraction": 0.40514469146728516, "alphanum_fraction": 0.4244372844696045, "avg_line_length": 25, "blob_id": "6bcab918b29c0cc0e41a1d49dadc74d74a0dd5e6", "content_id": "65a60d7271a1e37d14e7e5db89002243889bb697", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/03/0329/J박스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n N = int(input())\n jbox = [['J']*N for _ in range(N)]\n jbox[0] = jbox[-1] = ['#']*N\n for i in range(1, N-1):\n jbox[i][0] = jbox[i][-1] = '#'\n for i in range(N):\n for j in range(N):\n print(jbox[i][j], end='')\n print()\n print()" }, { "alpha_fraction": 0.5210526585578918, "alphanum_fraction": 0.5368421077728271, "avg_line_length": 26.285715103149414, "blob_id": "975d2257e76aac02f51262c9e04c0fd238284d5a", "content_id": "bbb0abd27461e4642ac3edaf54a0ba37c8b6b992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/08/0804/럭키 스트레이트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\nM = len(N)//2 # middle\nF, B = N[:M], N[M:] # front, back\nF_int, B_int = 0, 0\nfor f in F: F_int += int(f)\nfor b in B: B_int += int(b)\nprint('LUCKY' if F_int == B_int else 'READY')" }, { "alpha_fraction": 0.4161849617958069, "alphanum_fraction": 0.4393063485622406, "avg_line_length": 18.22222137451172, "blob_id": "3c6f10a8c922f09d4c945d730531b48d43bbcff0", "content_id": "7329872dddc97da0a947eb7b74a0102a478f0c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/12/1230/트리 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n, m = map(int, input().split())\n\nfor i in range(n-1):\n if i >= n-m+1:\n print('{} {}'.format(n-m, i+1))\n continue\n \n print(i, end=' ')\n print(i+1)\n" }, { "alpha_fraction": 0.5315126180648804, "alphanum_fraction": 0.5441176295280457, "avg_line_length": 20.68181800842285, "blob_id": "cb2b0db2bcba2a91c20932ce159717a4ca3f8fd2", "content_id": "552a0da153073f5a618b9bffd161f5cf864f87c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0807/링.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n8 4 2의 지름이 각각 같아야한다\n파이를 생략하고\n반지름 각각 A, B, C, ...\n각각의 바퀴수는 A / B\n이 바퀴수를 기약분수형태로 출력\nA/B A/C ... \n테스트케이스 개수 T\n\"\"\"\n\nT = int(input()) # 테스트케이스 개수, 쓰지는 않았다\nrs = list(map(int, input().split())) # 반지름 들 입력받아서 list()\n\ndef gcd(x, y): # 최대공약수 구하는 함수, 유클리드 호제법\n while y:\n x, y = y, x % y\n return x\n\nfor i in range(1, len(rs)): # 첫 링 제외하고 링의 반지름들 순회\n gcd_rs = gcd(rs[0], rs[i]) # 반지름들의 최대공약수 구해서\n print(f'{rs[0] // gcd_rs}/{rs[i] // gcd_rs}')\n # 각 링들을 기약분수 형태로 출력" }, { "alpha_fraction": 0.5203620195388794, "alphanum_fraction": 0.5429864525794983, "avg_line_length": 21.200000762939453, "blob_id": "fc1819857a0d41e83a146329b48f7cf1fc8bab9c", "content_id": "31e3a71f061e51cb5d8d5704cbfb31775dc2a5c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/알고리즘/온라인저지/2021/08/0816/카드1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ndeck = list(range(1, N+1))\nnew_deck = []\nfor n in range(N):\n if len(deck) == 1:\n new_deck += deck\n else:\n new_deck.append(deck.pop(0))\n deck.append(deck.pop(0))\nprint(*new_deck)" }, { "alpha_fraction": 0.5535714030265808, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 15.142857551574707, "blob_id": "c730d281ddd10286cc81c5aa565e29fc1656cbda", "content_id": "91dcb474f695881ba0a3aae1cfba440e2a6430d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 22, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/03/0319/팬들에게 둘러싸인 홍준.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "name = ':'+input()+':'\nfan = ':fan:'\nprint(fan*3)\nprint(fan, end='')\nprint(name, end='')\nprint(fan)\nprint(fan*3)" }, { "alpha_fraction": 0.4375655949115753, "alphanum_fraction": 0.45802727341651917, "avg_line_length": 25.48611068725586, "blob_id": "3f84f5f0c5f621805d16cb3efff7205baacf45d8", "content_id": "00f01995da28c777ccd05aedbbcf18d578b49c7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2014, "license_type": "no_license", "max_line_length": 67, "num_lines": 72, "path": "/알고리즘/SW역량테스트/2022.03.15 A형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n어딜 갈 수 있는가\n잡기 전\n1 2 3\n1을 잡은 후\n2 3 -1\n3을 잡은 후\n2 -1 -3\n\ndfs돌 필요도 없다 좌표찾고 더하기빼기로도 가능\n\n가지고 있는 최소값보다 커지면 가지치기\n\"\"\"\nimport tabnanny\n\n\ndef dfs(c, y, x, h, can_go):\n global monster_visited, customer_visited, minn\n # print(c)\n for mnc in monsters_and_customers:\n if mnc[0] == c:\n h += abs(y-mnc[1]) + abs(x-mnc[2])\n if h > minn:\n continue\n y = mnc[1]\n x = mnc[2]\n if c > 0:\n can_go.append((can_go.pop(can_go.index(c)) * (-1)))\n elif c < 0:\n can_go.pop(can_go.index(c))\n if not can_go:\n if h < minn:\n minn = h\n # print('renew minn!!', minn)\n\n # print(can_go)\n for g in can_go:\n if g > 0:\n if not monster_visited[g-1]:\n dfs(g, y, x, h, can_go)\n elif g < 0:\n if not customer_visited[g*(-1)-1]:\n dfs(g, y, x, h, can_go)\n\n\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n arr = []\n how_many_customer = 0\n for n in range(N):\n tmp = list(map(int, input().split()))\n if max(tmp) > how_many_customer:\n how_many_customer = max(tmp)\n arr.append(tmp)\n # print(how_many_customer)\n monsters_and_customers = []\n for i in range(N):\n for j in range(N):\n if arr[i][j] != 0:\n monsters_and_customers.append([arr[i][j], i, j])\n # print(monsters_and_customers)\n monsters = list(range(1, how_many_customer+1))\n # print(monsters)\n minn = 10**6\n for m in monsters:\n monster_visited = [0] * how_many_customer\n customer_visited = [0] * how_many_customer\n h = 0\n x = y = 0 \n dfs(m, y, x, h, list(range(1, how_many_customer+1)))\n print('#{} {}'.format(t, minn))" }, { "alpha_fraction": 0.38434162735939026, "alphanum_fraction": 0.41281139850616455, "avg_line_length": 20.69230842590332, "blob_id": "2d011cc0192baa79d0d1de7095a3565f76391c28", "content_id": "b39abe6774776d1188633ec7f584be2364f7a02e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 37, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0910/연속구간.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(3):\n num = input()\n prev = num[0]\n tmp = 1\n result = 1\n for i in range(1, 8):\n if prev == num[i]:\n tmp += 1\n else:\n result = max(result, tmp)\n tmp = 1\n prev = num[i]\n print(max(result, tmp))" }, { "alpha_fraction": 0.4141414165496826, "alphanum_fraction": 0.4343434274196625, "avg_line_length": 18.799999237060547, "blob_id": "b8545d1dafbaa50490cdd27c8cfbba171dd5ca5f", "content_id": "389eca97a08bda5a29435854df943b3f5228aa71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/02/0208/TV 크기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "D, H, W = map(int, input().split())\n\nC = ((H*H)+(W*W))**0.5\n\nprint(int(D * H / C), int(D * W / C))\n" }, { "alpha_fraction": 0.5050504803657532, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 15.666666984558105, "blob_id": "2851c4a56704245b21de115b2258d11a0c3aae10", "content_id": "95b18739a64a4fe55be542240351a8b1818eefc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 22, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/03/0316/Boiling Water.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "P = 5*int(input())-400\nresult = 0\nif P>100: result = -1\nif P<100: result = 1\nprint(P)\nprint(result)" }, { "alpha_fraction": 0.4259776473045349, "alphanum_fraction": 0.4427374303340912, "avg_line_length": 24.60714340209961, "blob_id": "e4692880145dc3b5862b6b57c8397c286f77334c", "content_id": "fce679379195ee2938ec0367d9b12dd3facd697f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/07/0714/체스판 다시 칠하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def check_tile(y, x):\n cnt = 0\n switch = False # Black:True, White:False\n for i in range(8):\n for j in range(8):\n tmp = arr[y+i][x+j]\n switch = not switch\n if j == 0:\n switch = not switch\n if switch: # Black\n if tmp == 'W':\n cnt += 1\n elif not switch: # White\n if tmp == 'B':\n cnt += 1\n result = min(cnt, 64-cnt)\n return result\n\n\nN, M = map(int, input().split())\narr = [list(input()) for _ in range(N)]\nresult = 1e9\nfor i in range(N-7):\n for j in range(M-7):\n tmp = check_tile(i, j)\n if tmp < result:\n result = tmp\nprint(result)" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 24.66666603088379, "blob_id": "f6cf29c3b02ff99496ea4be6ca648a5e60fab672", "content_id": "9e3e11cf907e9a3b818f82fca56d840e13501d0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/02/0223/Time to Decompress.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n A, B = input().split()\n print(B*int(A))" }, { "alpha_fraction": 0.4464285671710968, "alphanum_fraction": 0.4553571343421936, "avg_line_length": 21.600000381469727, "blob_id": "ffa0acd52e59018c8b7f83ff0ee8f4a217055890", "content_id": "920b24f3f9d4839e52cbe0d5248fc9e6624145a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/04/0428/남욱이의 닭장.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(int(input())):\n N, M = map(int, input().split())\n U = M*2 - N\n T = M - U\n print(U, T)" }, { "alpha_fraction": 0.477761834859848, "alphanum_fraction": 0.4964132010936737, "avg_line_length": 23.928571701049805, "blob_id": "f564f852e9593ada3ab3f75a7400dd748b443f8a", "content_id": "c51cc1c877d3ed2d4d9431d71a1d545dfeb484f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 45, "num_lines": 28, "path": "/알고리즘/[템플릿]/이분탐색/기타 레슨.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def binary(start, end):\n global result\n if start > end: return\n Blu_ray = (start + end) // 2\n tmp = [0] * M\n i = 0\n for l in L:\n if tmp[i] + l <= Blu_ray:\n tmp[i] += l\n else:\n i += 1\n try: tmp[i] += l\n except: binary(Blu_ray+1, end)\n if i>=M: break\n if i >= M: # 블루레이 개수가 모자람\n binary(Blu_ray+1, end)\n else: # i < M : 블루레이 크기를 더 줄일 수 있음\n if Blu_ray < result: result = Blu_ray\n binary(start, Blu_ray-1)\n\nN, M = map(int, input().split())\nL = list(map(int, input().split())) # lecture\nS, E = max(L), sum(L)\nresult = int(1e9)\nbinary(S, E)\nprint(result)\n\n# https://www.acmicpc.net/problem/2343" }, { "alpha_fraction": 0.4312500059604645, "alphanum_fraction": 0.46875, "avg_line_length": 16.88888931274414, "blob_id": "944784d3c1b7b5138d19afaebcae7a603aa2782a", "content_id": "120308a0e85148adb60ba2243cf0f46229004dc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/05/0524/대회 or 인턴.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M, K = map(int, input().split())\nresult = 0\nwhile True:\n N -= 2\n M -= 1\n if N < 0 or M < 0 or (N+M) < K:\n break\n result += 1\nprint(result)" }, { "alpha_fraction": 0.4752066135406494, "alphanum_fraction": 0.5206611752510071, "avg_line_length": 26, "blob_id": "89d8952aace376e4b1a63ed3abe0b192a960279b", "content_id": "c4f26dcab171151fcb9a027412c52bc5baf26002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/05/0526/Hurra!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(1, int(input())+1):\n result = ''\n if not i%7 and i%11:\n result = 'Hurra!'\n if i%7 and not i%11:\n result = 'Super!'\n if not i%7 and not i%11:\n result = 'Wiwat!'\n print(result if result else i)" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.5925925970077515, "avg_line_length": 33, "blob_id": "d91695adf060f4055ff573195301c8541df6470a", "content_id": "739c486f9b1aaa99037fe57b6afc4679efa485fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 58, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/07/0714/좌표 정렬하기 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = [tuple(map(int, input().split())) for _ in range(N)]\narr.sort(key=lambda x:(x[1], x[0]))\nfor a in arr: print(*a)" }, { "alpha_fraction": 0.5462962985038757, "alphanum_fraction": 0.5648148059844971, "avg_line_length": 53.5, "blob_id": "b4f3ccef19ef6965e1caef22b97258033b1bb863", "content_id": "b229e2d18c02cab0880877c1e09ba771b09788ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 73, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/03/0323/주사위.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(1, int(input())+1):\n print('Case {}: {}'.format(t, sum(tuple(map(int, input().split())))))" }, { "alpha_fraction": 0.4819277226924896, "alphanum_fraction": 0.4819277226924896, "avg_line_length": 19.83333396911621, "blob_id": "65b4d3967ee67845517d14fa1fff3523fac7fa3e", "content_id": "3c6d7c104b55581c40f14a89e3c006ce0e8d93e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0622/스캐너.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "R, C, ZR, ZC = map(int, input().split())\nresult = []\nfor r in range(R):\n tmp = input()\n a = ''\n for t in tmp:\n for zc in range(ZC):\n a = a + t\n for zr in range(ZR):\n result.append(a)\nfor r in result:\n print(r)" }, { "alpha_fraction": 0.6186440587043762, "alphanum_fraction": 0.6186440587043762, "avg_line_length": 22.799999237060547, "blob_id": "9e78bae6d849710cdd29da26e51a1d63771b3421", "content_id": "19df9d8f4d961f7b47d0d7ba5282e6280109b39a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/06/0609/정보섬의 대중교통.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, A, B = map(int, input().split())\nresult = 'Anything'\nif A>B: result = 'Subway'\nif A<B: result = 'Bus'\nprint(result)" }, { "alpha_fraction": 0.359281450510025, "alphanum_fraction": 0.359281450510025, "avg_line_length": 17.66666603088379, "blob_id": "fe79171095303e80d80e4fe9e9bd04c3549621d5", "content_id": "df71cbef3af4590c9fadfefaa7fbe69d1e9b3b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/12/1225/연길이의 이상형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "yeongil = input()\ndictt = {\n 'E': 'I', 'I':'E',\n 'S': 'N', 'N':'S',\n 'T': 'F', 'F':'T',\n 'J': 'P', 'P':'J',\n}\nfor y in yeongil:\n print(dictt[y], end='')" }, { "alpha_fraction": 0.4533762037754059, "alphanum_fraction": 0.4887459874153137, "avg_line_length": 16.33333396911621, "blob_id": "5ffacf7db43de070240c1390ae0e402be69fe67c", "content_id": "962039166d8c4452747d5c610a12e161cf33eb3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 33, "num_lines": 18, "path": "/알고리즘/온라인저지/2023/05/0512/Don’t pass me the ball!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(size, idx):\n global result\n\n if arr[2] != 0:\n result += 1\n return\n\n for player in range(size, N):\n if player not in arr:\n arr[idx] = player\n dfs(player+1, idx+1)\n arr[idx] = 0\n\nN = int(input())\narr = [0]*3\nresult = 0\ndfs(1, 0)\nprint(result)" }, { "alpha_fraction": 0.443452388048172, "alphanum_fraction": 0.449404776096344, "avg_line_length": 15.800000190734863, "blob_id": "45aab3a7faca59934bd86f2c08252c6b76e3e390", "content_id": "e4e2f308e21d3e4a2daeec2c912e648b58f13eae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 32, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/02/0201/명령 프롬프트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ninputs = [''] * N\n\nfor i in range(N):\n inputs[i] = input()\n\nfor i in range(len(inputs[0])):\n flag = True\n word = inputs[0][i]\n\n for j in range(N):\n if word != inputs[j][i]:\n flag = False\n break\n \n if flag:\n print(word, end='')\n else:\n print('?', end='')\n" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5857142806053162, "avg_line_length": 14.666666984558105, "blob_id": "3f550a2448527c5f254d1d25aeb3a8cb3e15660e", "content_id": "77b3679fbd4542793cfc3a846989b1ce587d2d27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 36, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/12/1205/피보나치 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nmod = int(1e9)+7\na, b = 1, 1\nfor _ in range(N-2): a, b = b, (a+b)\nprint(b)" }, { "alpha_fraction": 0.3098958432674408, "alphanum_fraction": 0.3151041567325592, "avg_line_length": 20.38888931274414, "blob_id": "f4d6dc40eb26aa7b795a83c9414bfa1e789524bf", "content_id": "b3a663e427f355581771035efa44be48aac45d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 31, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/07/0725/부분 문자열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n try:\n S, T = input().split()\n j = 0\n tmp = ''\n result = 'No'\n for i in range(len(T)):\n if T[i] == S[j]:\n tmp += T[i]\n j += 1\n if j >= len(S):\n break\n # print(tmp)\n if tmp == S:\n result = 'Yes'\n print(result)\n except:\n exit()" }, { "alpha_fraction": 0.43877550959587097, "alphanum_fraction": 0.4413265287876129, "avg_line_length": 22.117647171020508, "blob_id": "0e47b41cbdc1747fa9ca97a1c3bc23693338d6ee", "content_id": "501ebbfc931611c33abbf0cd85ff017b9a782d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/08/0816/괄호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n P = input() # parenthesis string : 괄호\n S = [] # stack\n for p in P:\n if p == '(': S.append(p)\n elif p == ')':\n if S: \n if S[-1] == '(': S.pop() \n else: S.append(p)\n print('NO' if S else 'YES')\n\n\"\"\"\n괄호를 하나씩 스택에 집어넣어서\n짝이 맞으면 pop해주고\n스택이 딱 맞아 떨어지면 괄호의 짝이 맞는 것\n스택이 남아있다면 괄호가 짝이 맞지 않는 것\n\"\"\"" }, { "alpha_fraction": 0.578125, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 20.44444465637207, "blob_id": "bedaf56a7a0d2fd341410431bf91cf8ccf568187", "content_id": "ce325de2f38eb1e736e85d258a686b9f7c368156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/09/0927/아름다운 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# import sys\n\n# input = sys.stdin.readline\n\n# for _ in range(int(input().rstrip())):\n# num = set(input().rstrip())\n# print(len(num))\n\nfor _ in [0]*int(input()):print(len(set(input())))" }, { "alpha_fraction": 0.5954545736312866, "alphanum_fraction": 0.5977272987365723, "avg_line_length": 18.173913955688477, "blob_id": "520b5c7ce84a76b34eeef9ffd638cd9da367f39e", "content_id": "bd434113fff4777d2d2bc6472fb51a29521817b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/01/0122/N번째 큰 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import heapq # 우선순위 큐\n\nN = int(input())\n\nhq = heapq # 힙큐 메서드 사용하기 위함\n\nresult = [] # 힙큐로 담을 리스트\n\n# 일단 첫 줄을 힙큐로 넣는다\nfor i in list(map(int, input().split())):\n hq.heappush(result, i)\n\n# 두번째줄부터\nfor n in range(1, N):\n # 매 줄 마다 힙큐에 넣는 동시에 pop 해주면서\n # N개로 개수를 맞춘다\n for i in list(map(int, input().split())):\n # 개사기메서드 힙푸시팝\n hq.heappushpop(result, i)\n\n# 입력받는 동시에 힙푸시팝을 진행하고 남은 리스트의\n# 첫번째 값이 정답 \nprint(hq.heappop(result))" }, { "alpha_fraction": 0.5592592358589172, "alphanum_fraction": 0.5777778029441833, "avg_line_length": 17.066667556762695, "blob_id": "76d0f07dedb2c51c15255e7a0b096af1a302fbe2", "content_id": "1d3767c7902c82671add213838bb6778c0a242bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 63, "num_lines": 15, "path": "/알고리즘/온라인저지/2023/02/0214/8진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "tmp = 0\ni = 1\nfor j in input()[::-1]:\n tmp += i * int(j)\n i *= 2\n\nimport string\n\nN, B = tmp, 8\ntmp = string.digits + string.ascii_uppercase # 자릿수를 담은 문자열\nresult = ''\nwhile N:\n result = tmp[N%B] + result # B진법이므로 B로 나눈 나머지번째 문자를 계속 추가한다\n N //= B\nprint(result)" }, { "alpha_fraction": 0.4029850661754608, "alphanum_fraction": 0.46268656849861145, "avg_line_length": 33, "blob_id": "0978cdb1082167250694ae15542de18b31331681", "content_id": "05de05185a11935b1df8d9612b27753a6fa4df52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0812/Rats.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nprint(((A+1)*(B+1))//(C+1) - 1)" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.5, "avg_line_length": 19.5, "blob_id": "76e3bb9c246fc4948eb62815b8c00168ad0286af", "content_id": "0d88ca4d9094393e76f4df431b71fafc24b2c876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0822/Zadanie próbne 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprint((N+1)*2, (N+1)*3)" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 18, "blob_id": "0728081c0a9a931d93b23b639d1c601b0fd25cb2", "content_id": "ba6d012a8b669a9ca3b490f99ae2cabb06b0c415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/07/0730/[기초-산술연산] 정수 2개 입력받아 나눈 몫 계산하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 각각 입력받아서 split으로 각각 저장해주고\na, b = input().split()\n# 무조건 각각 int 적용해줘야 한다\n# 입력받는 과정에서 하려니까 에러가 나버리네\na = int(a)\nb = int(b)\nprint(a//b)" }, { "alpha_fraction": 0.6106870174407959, "alphanum_fraction": 0.6106870174407959, "avg_line_length": 65, "blob_id": "3ed0733bd904b124988b83d907ea8155bf5cf48f", "content_id": "6d0972d98752d3642a5b7d6184783e224b980bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 114, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/07/0705/욱제는 도박쟁이야!!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprint(sum(list(map(abs, list(map(int, input().split()))))) + sum(list(map(abs, list(map(int, input().split()))))))" }, { "alpha_fraction": 0.5736842155456543, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 30.83333396911621, "blob_id": "0a4f30936bc46cc946d78e1ebbc8091a21f60fd4", "content_id": "22b292a087022bf6418d527bdbe56f2fc7c5e02e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0910/아이들은 사탕을 좋아해.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N, K = map(int, input().split())\n candies = list(map(int, input().split()))\n result = 0\n for candy in candies: result += candy//K\n print(result)" }, { "alpha_fraction": 0.542553186416626, "alphanum_fraction": 0.542553186416626, "avg_line_length": 14.833333015441895, "blob_id": "0f49f6a61721ff3f5d25a7c8093c9adebee50b72", "content_id": "a2a9b4b5f7564f73c450317a82ca1a690aed2241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/03/0323/전체 계산 횟수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nresult = N\nwhile N:\n N //= M\n result += N\nprint(result)" }, { "alpha_fraction": 0.5791505575180054, "alphanum_fraction": 0.5965250730514526, "avg_line_length": 19.760000228881836, "blob_id": "93bf35b0eaa61fb2300a3519f385ddc1a1c04474", "content_id": "e845e5e0059e195aba781fb250a217dedac75fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/알고리즘/[템플릿]/이분탐색/랜선 자르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef cut(length):\n cutted = 0\n for cable in cables:\n cutted += cable//length\n if cutted>=N: return True\n return False\n\nK, N = map(int, input().rstrip().split())\ncables = [int(input().rstrip()) for _ in range(K)]\nresult = 1\nstart, end = result, sum(cables)//N\nwhile start<=end:\n mid = (start+end) // 2\n if cut(mid):\n start = mid+1\n result = max(result, mid)\n else:\n end = mid-1\nprint(result)\n\n# https://www.acmicpc.net/problem/1654" }, { "alpha_fraction": 0.5112359523773193, "alphanum_fraction": 0.5280898809432983, "avg_line_length": 18.88888931274414, "blob_id": "7da2d26ccaf16740989662214d791e34004d3e1a", "content_id": "d392be358a76cb8392372237bf1d99fb4f96d200", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/07/0730/[기초-산술연산] 정수 3개 입력받아 합과 평균 출력하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 변수 세 개 받아서\n# 공백으로 쪼개서\n# int로 바꿔주고\n# map으로 각각 저장해준다\na, b, c = map(int, input().split())\nd = a + b + c\ne = round(d / 3, 3)\n# f\"{:.?f}\"로 소수점 아래 자리수 고정시킬 수 있다\nprint(f\"{d} {e:.2f}\")" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.49047619104385376, "avg_line_length": 14.071428298950195, "blob_id": "1e91eb399b26a92be8b5e65bcc8b04170ed0849f", "content_id": "b6d9e4da16bbe3648aa424a54c91934c68867d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/02/0201/분산처리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n a, b = map(int, input().split())\n\n result = 1\n\n for i in range(b):\n result = (result*a)%10\n\n if result:\n print(result)\n else:\n print(10)" }, { "alpha_fraction": 0.38268792629241943, "alphanum_fraction": 0.4441913366317749, "avg_line_length": 26.5, "blob_id": "6e6e0f3727b34932bc5930d70e5496956105fe79", "content_id": "128ea8b4c3305b1de233d41a8cf46b7e47994ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 104, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/08/0828/한수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef arithmetic_sequence(N):\n if N < 100:\n return N\n else:\n cnt = 0\n for n in range(100, N+1):\n a, b, c = n//100, (n-(n//100)*100)//10, n%10\n for k in range(6):\n if (a+k == b and b+k == c and a+(k*2) == c) or (c+k == b and b+k == a and c+(k*2) == a):\n cnt += 1\n return 99 + cnt\n\nN = int(sys.stdin.readline())\nprint(arithmetic_sequence(N))" }, { "alpha_fraction": 0.4635658860206604, "alphanum_fraction": 0.48217055201530457, "avg_line_length": 21.275861740112305, "blob_id": "475808a6e4e5e7d9f058286f2f577adb08a61ee8", "content_id": "ddc47017f7e459dee1c03d151fa4057d87c2096b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 56, "num_lines": 29, "path": "/알고리즘/온라인저지/2022/01/0122/수들의 합 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\nseq = list(map(int, input().split())) # 수열 입력\n\ns = e = 0 # 두포인터\n\nnow = sum(seq[s:e+1]) # 초기값(seq[0])\n\nresult = 0 # 크기가 M과 같은 수열 부분합의 개수 초기화\n\nwhile True: # 무지성 while True\n # 종료조건\n # 1. e를 뒤로 밀어봤자 M보다 크거나 같아질 수 없음\n # 2. s를 뒤로 땡겨봤자 M보다 작거나 같아질 수 없음\n if (now < M and e == N-1) or (now > M and s == N-1):\n break\n\n if now == M: # 수열 부분합이 M과 같으면\n result += 1\n now -= seq[s]\n s += 1 # 앞에서 한 칸 땡기기\n elif now < M: # 부분합이 M보다 작으면\n e += 1 # 뒤로 한 칸 밀기\n now += seq[e]\n elif now > M: # 부분합이 M보다 크면\n now -= seq[s]\n s += 1 # 앞에서 한 칸 땡기기\n\nprint(result)" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.4047619104385376, "avg_line_length": 14.363636016845703, "blob_id": "966ab38e930649b80051442398272ea1b1fd3b45", "content_id": "3abe518c1ea369b0b33a6f86ccc476eb9d779781", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 22, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/09/0901/방 번호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\ndp = [0]*9\nfor n in N:\n tmp = int(n)\n if tmp == 9:\n dp[6] += 1\n else:\n dp[tmp] += 1\nif dp[6]%2: dp[6] += 1\ndp[6] //= 2\nprint(max(dp))" }, { "alpha_fraction": 0.3842794895172119, "alphanum_fraction": 0.4410480260848999, "avg_line_length": 17.360000610351562, "blob_id": "1af1e8c971600bc02e3e69a246fe238cd3e8144b", "content_id": "5f26a1cec754f42ecee4e420bd21882d6e1046bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 46, "num_lines": 25, "path": "/알고리즘/온라인저지/2023/05/0507/너의 평점은.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "grade_to_score = {\n 'A+': 4.5,\n 'A0': 4.0,\n 'B+': 3.5,\n 'B0': 3.0,\n 'C+': 2.5,\n 'C0': 2.0,\n 'D+': 1.5,\n 'D0': 1.0,\n 'F': 0.0,\n}\n\ntime_sum = 0\nresult = 0\nwhile True:\n try:\n subject, time, grade = input().split()\n if grade == 'P':\n continue\n time = int(time[0])\n time_sum += time\n result += time*grade_to_score[grade]\n except EOFError:\n break\nprint(f'{result/time_sum:.6f}')" }, { "alpha_fraction": 0.39915966987609863, "alphanum_fraction": 0.4453781545162201, "avg_line_length": 13.9375, "blob_id": "ab1d6c66c529b88445baf50d6c1913acf2711d9f", "content_id": "5089422109a20652efc07b473d030c2acaab43e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 39, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0212/핸드폰 요금.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ncalls = list(map(int, input().split()))\n\nY = M = 0\n\nfor c in calls:\n Y += (c//30 + 1) * 10\n M += (c//60 + 1) * 15\n\nif Y > M:\n print('M', min(Y, M))\nelif M > Y:\n print('Y', min(Y, M))\nelse:\n print('Y M', Y)" }, { "alpha_fraction": 0.5779816508293152, "alphanum_fraction": 0.5871559381484985, "avg_line_length": 21, "blob_id": "4590535f7c9a97d407080e7be4377e35fb9d4c44", "content_id": "9c3447ab8a02b8806cc54970d03476d4ec4937d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/03/0303/특식 배부.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nfor i in list(map(int, input().split())):\n result += min((N, i))\nprint(result)" }, { "alpha_fraction": 0.4789719581604004, "alphanum_fraction": 0.5046728849411011, "avg_line_length": 22.83333396911621, "blob_id": "af55433231398a15d8c08433aaec5da0dc7a49f9", "content_id": "16727945fa36123f9fd1407a35457faea6f81e8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/09/0929/피카츄.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nword = list(input().rstrip())\ni = 0\nwhile i<len(word)-1:\n pikachu = word[i] + word[i+1]\n if pikachu in ['pi', 'ka']:\n for _ in [0]*2: word.pop(i); \n continue\n if i<len(word)-2:\n pikachu = word[i] + word[i+1] + word[i+2]\n if pikachu == 'chu':\n for _ in [0]*3: word.pop(i); \n continue\n i += 1\nprint('NO' if len(word) else 'YES')" }, { "alpha_fraction": 0.6047903895378113, "alphanum_fraction": 0.6407185792922974, "avg_line_length": 23, "blob_id": "445b84c5a07a7d4ef531c59388eac39d1805d75c", "content_id": "997e80b0c499d720a5864f86a1c51767697b2b09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/09/0914/숫자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nresult = list(range(min(A, B)+1, max(A, B)))\nprint(len(result))\nif result:\n print(*result)\n\n# https://www.acmicpc.net/problem/10093" }, { "alpha_fraction": 0.3644859790802002, "alphanum_fraction": 0.40186914801597595, "avg_line_length": 20.5, "blob_id": "441fe2552aa24f9c481c2fe2b1d8417bd32da5be", "content_id": "6fb1ac81ae6dd40877fd437ecaaddd80b2efd354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/01/0106/암호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nP = input()\ni = 0\nfor s in S:\n if s != ' ':\n tmp = ord(s) - (ord(P[i])-96)\n if not tmp >= 97: tmp += 26\n print(chr(tmp), end='')\n else: print(' ', end='')\n i = (i+1)%len(P)" }, { "alpha_fraction": 0.47783932089805603, "alphanum_fraction": 0.4819944500923157, "avg_line_length": 21.59375, "blob_id": "11f8a919ccf624eaf5a259ec464cf724d04eae05", "content_id": "12638af50a068f8a190e0fff9ee7277b5ce90301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 51, "num_lines": 32, "path": "/알고리즘/온라인저지/2022/10/1002/바닥 장식.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef check(N, M, arr):\n board = 0\n for i in range(N):\n flag = False\n for j in range(M):\n if arr[i][j] == '-' and not flag:\n flag = True\n board += 1\n elif not arr[i][j] == '-': flag = False\n return board\n\ndef rotate(arr):\n rotated = []\n for j in range(M):\n tmp = ''\n for i in range(N): \n if arr[i][j] == '-': tmp += '|'\n else: tmp += '-'\n rotated.append(tmp)\n return rotated\n\nN, M = map(int, input().rstrip().split())\narr = [input().rstrip() for _ in range(N)]\nresult = 0\nresult += check(N, M, arr)\narr = rotate(arr)\nresult += check(M, N, arr)\nprint(result)" }, { "alpha_fraction": 0.5561797618865967, "alphanum_fraction": 0.5561797618865967, "avg_line_length": 21.375, "blob_id": "72d814e14f21f67d5eb95adeb5d1f0d15368d6e1", "content_id": "5bb88dc6844058bdbc92eaaa3b37163bf5a2c7ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 94, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0220/배수 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nwhile True:\n num = int(input())\n if not num:\n break\n\n print(f'{num} is a multiple of {N}.' if not num%N else f'{num} is NOT a multiple of {N}.')" }, { "alpha_fraction": 0.5537037253379822, "alphanum_fraction": 0.5657407641410828, "avg_line_length": 26, "blob_id": "8898086a59a10313fbf356996787082e72c03ecf", "content_id": "896be0e6a16031a34fd02c2468cdeb3aeaf49f18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1504, "license_type": "no_license", "max_line_length": 67, "num_lines": 40, "path": "/알고리즘/온라인저지/2021/10/1003/집합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nM = int(input())\n\nS = set() # set자료형으로 생성\n# add과정에서 x가 이미 있는 경우 무시할 것이기 때문에 데이터들은 절대 중복될 수 없다\n# 따라서 순서를 지니지 않는 set자료형으로 만들어서 시간초과를 회피\n# 추가하고 제거하는 과정에서 list자료형의 append와 remove혹은 pop을 쓰는 것 보다\n# set자료형의 add와 discard를 쓰는 것이 빠르다\n\nfor m in range(M):\n orders = sys.stdin.readline().split()\n\n if len(orders) == 1: # all이 명령어로 들어왔을 때\n order = orders[0] # add remove check toggle all empty등, 명령어\n else: # all을 제외한 명령어들일 때\n order = orders[0] # 명령어\n num = int(orders[1]) # 목적 숫자 x\n\n if order == 'add':\n S.add(num) # list자료형의 append와 같음\n elif order == 'remove':\n S.discard(num) # list자료형의 remove혹은 pop와 같음\n elif order == 'check':\n if num in S:\n print(1)\n else:\n print(0)\n elif order == 'toggle':\n if num in S:\n S.discard(num)\n else:\n S.add(num)\n elif order == 'all':\n S = set([i for i in range(1, 21)]) # 1~20까지 전부 채워진 set로 리셋\n else:\n S = set() # set를 초기화\n\n# 추가적으로 해당 문제는 메모리도 많이 적은 편이라\n# python의 경우 평소처럼 pypy3로 제출하면 메모리부족으로 오답처리 된다\n" }, { "alpha_fraction": 0.5719298124313354, "alphanum_fraction": 0.582456111907959, "avg_line_length": 25, "blob_id": "a70c780f0cb8be9918390b30635239b1deb84d00", "content_id": "ab32a4a159be360f1f947ba42d7f6b7224b59e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/09/0911/민균이의 비밀번호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ntext_list = [input() for _ in range(N)]\ntext_set = set(text_list)\nresult = ''\nfor text in text_list:\n if result: break\n if text == text[::-1]:\n result = text\n elif text[::-1] in text_set:\n result = text\nprint(len(result), result[len(result)//2])" }, { "alpha_fraction": 0.42607003450393677, "alphanum_fraction": 0.48249027132987976, "avg_line_length": 21.39130401611328, "blob_id": "9db1e9255814c1ab7d5a44a6d8fa63fac88aa300", "content_id": "98b4fa02f1ea9f9efe638ab2ee7d6047a322e5db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/10/1012/롤 케이크.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nresult = [[0, 0], [0, 0]]\nL = int(input().rstrip())\nN = int(input().rstrip())\ncakes = [1]*L\nfor i in range(1, N+1):\n P, K = map(int, input().rstrip().split())\n if K-P+1 > result[1][1]:\n result[1][1] = K-P+1\n result[1][0] = i\n cake = 0\n for j in range(P-1, K):\n if cakes[j]:\n cakes[j] = 0\n cake += 1\n if cake > result[0][1]:\n result[0][1] = cake\n result[0][0] = i\nprint(result[1][0])\nprint(result[0][0])" }, { "alpha_fraction": 0.4491978585720062, "alphanum_fraction": 0.4946524202823639, "avg_line_length": 14.625, "blob_id": "3aeb4e6ae390550ca410e54c3e7c7f0c79daa230", "content_id": "f56101f22ded30ce19544a0112e7cc4400ff0da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 40, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/02/0213/슈퍼 마리오.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "now = int(input())\nans_found = False\nresult = 0\n\nfor _ in range(9):\n num = int(input())\n tmp = now + num\n\n if ans_found:\n continue\n\n if tmp >= 100 and num < 100:\n if abs(tmp-100) <= abs(100-now):\n result = tmp\n else:\n result = now\n ans_found = True\n\n now += num\n\nif now <= 100:\n result = now\n\nprint(result)" }, { "alpha_fraction": 0.4876847267150879, "alphanum_fraction": 0.5024630427360535, "avg_line_length": 16, "blob_id": "70cef0592aaea48e2e110017dc8a174d5bedd16d", "content_id": "8322b1a564f368e87e2f3832202da0018beaa77e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/06/0601/암호제작.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "P, K = map(int, input().split())\nresult = 'GOOD'\nnum = 0\nfor k in range(2, K):\n if P%k == 0:\n result = 'BAD'\n num = k\n break\nif num:\n print(result, num)\nelse:\n print(result)" }, { "alpha_fraction": 0.5446808338165283, "alphanum_fraction": 0.5446808338165283, "avg_line_length": 12.11111068725586, "blob_id": "d4f51ef4aa3e0d6b2b42f6a52778dba03743a947", "content_id": "34edccf3d114e5aaa0ed4b4510ec9c32a91c6876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 32, "num_lines": 18, "path": "/알고리즘/온라인저지/2021/10/1016/듣보잡.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\na = set()\n\nfor n in range(N):\n a.add(input())\n\nresult = []\n\nfor m in range(M):\n b = input()\n if b in a:\n result.append(b)\n\nprint(len(result))\nresult.sort()\nfor r in result:\n print(r)" }, { "alpha_fraction": 0.45039165019989014, "alphanum_fraction": 0.48433420062065125, "avg_line_length": 24.566667556762695, "blob_id": "172d8d7567868b150c20efa532dc9a7e1df897ab", "content_id": "ed33f66f910809f83be208e20f6ea2ba83f80c6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/알고리즘/[템플릿]/BFS/토마토.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\nM, N = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\nQ = deque()\nfor i in range(N):\n for j in range(M):\n if arr[i][j] == 1:\n Q.append((i, j, 0))\n arr[i][j] = 0\nresult = 0\nwhile Q:\n y, x, day = Q.popleft()\n if arr[y][x] == 1: continue\n arr[y][x] = 1\n result = max(result, day)\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M and arr[ny][nx] == 0:\n Q.append((ny, nx, day+1))\nfor i in range(N):\n if result == -1: break\n for j in range(M):\n if arr[i][j] == 0:\n result = -1\n break\nprint(result)\n\n# https://www.acmicpc.net/problem/7576" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 30, "blob_id": "6bc47d3c2df3673c502e5828f8558fa4e69d0306", "content_id": "884bc73ed3fcbf78f04cae5bd9694cca0efa2264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/04/0413/Mathematics.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nprint(sum([int(input()) for _ in range(N)]))" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 22, "blob_id": "fe8d31df2c1e6b746fc7b2035ac4bb3406610995", "content_id": "8c4c75b645f53fcf629faf9cce60bfbb4cb82073", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/04/0412/샤틀버스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x, y = map(int, input().split())\nif x>y: print(x+y)\nelse: print(y-x)" }, { "alpha_fraction": 0.525073766708374, "alphanum_fraction": 0.5545722842216492, "avg_line_length": 27.33333396911621, "blob_id": "2abbf5356b1b5fa36834bad8b97fce086551c9d9", "content_id": "abcc75e52ceed980ec180a4945731b9c561622a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/알고리즘/프로그래머스/Level1/로또의 최고 순위와 최저 순위.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def solution(lottos, win_nums):\n answer = []\n # print(lottos, win_nums)\n # 일치하는 번호의 개수만이, 오직 등수에 영향을 줌\n minn = 0\n for l in lottos:\n if l in win_nums:\n minn += 1\n result = [6, 6, 5, 4, 3, 2, 1] # 등수 테이블\n answer.append(result[lottos.count(0)+minn])\n answer.append(result[minn]) \n return answer" }, { "alpha_fraction": 0.38860103487968445, "alphanum_fraction": 0.41450777649879456, "avg_line_length": 18.399999618530273, "blob_id": "c2bafa392890ad09ee73a9bd0f3718818556e535", "content_id": "84961694ffb6f3e1989b7aef238a63bbe6ae5f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/05/0508/탁구 경기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "D, P = 0, 0\nfor i in range(int(input())):\n winner = input()\n if abs(D-P) == 2:\n continue\n if winner == 'D':\n D += 1\n if winner == 'P':\n P += 1\nprint(f'{D}:{P}')" }, { "alpha_fraction": 0.3870967626571655, "alphanum_fraction": 0.4516128897666931, "avg_line_length": 20.375, "blob_id": "c9bdb0a33128ceb90ede7a41c189981ae3e75802", "content_id": "ab0aab0acc44229549a72e9e0d9b861cd995c308", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 35, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0731/파스칼 삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "R, C, W = map(int, input().split())\nN = R+W-1\nP = [[1], [1, 1], [1, 2, 1]]\nfor n in range(N-3):\n tmp = [0] * (2+n)\n for i in range(len(P[-1])-1):\n tmp[i] = sum(P[-1][i:i+2])\n tmp = [1]+tmp+[1]\n P.append(tmp)\nresult = 0\nj = 0\nfor i in range(R-1, N):\n j += 1\n tmp = P[i][C-1:C+j-1]\n result += sum(tmp)\nprint(result)" }, { "alpha_fraction": 0.43581080436706543, "alphanum_fraction": 0.5033783912658691, "avg_line_length": 20.214284896850586, "blob_id": "d34ad401d79957409e2f9384ac3a6c2b50601d45", "content_id": "23ca1b22d73d24a849b947d78375cc4a95902b19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 44, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/09/0925/돌 게임 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = [0, 1, 0, 1, 1] + [0]*(N-4)\nif N>4:\n for i in range(5, N+1):\n if 0 in [dp[i-1], dp[i-3], dp[i-4]]:\n dp[i] = 1\n else: dp[i] = 0\nprint('SK' if dp[N] else 'CY')\n\n# https://www.acmicpc.net/problem/9657" }, { "alpha_fraction": 0.39919355511665344, "alphanum_fraction": 0.4193548262119293, "avg_line_length": 14.5625, "blob_id": "efbd15895c22a7437250e6a8eafe2b23e548af02", "content_id": "dd07afe1f4b9026444310f745d0b58ae8c0d13cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 42, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0220/짝수를 찾아라.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n nums = list(map(int, input().split()))\n\n minn = 100\n summ = 0\n\n for n in nums:\n if not n%2:\n summ += n\n\n if n < minn:\n minn = n\n \n print(summ, minn)" }, { "alpha_fraction": 0.5467422008514404, "alphanum_fraction": 0.5524079203605652, "avg_line_length": 22.600000381469727, "blob_id": "c4ac5f8d81323f084109d8e0e2ac0a7dc8e905ab", "content_id": "a54067bdd6c3a28c7e50cef89bb8648b0ba6c1fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 58, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/08/0802/자료구조는 정말 최고야.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nresult = True\nfor _ in range(M):\n K = int(input())\n B = list(map(int, input().split()))\n for k in range(K-1):\n if B[k] < B[k+1]: # 한 권의 책이라도 오름차순이 아니면 정답이 될 수 없다\n result = False\n break\n if not result: break\nprint('Yes' if result else 'No')" }, { "alpha_fraction": 0.418552041053772, "alphanum_fraction": 0.4570135772228241, "avg_line_length": 16.038461685180664, "blob_id": "0b3dfddd6de5acc67b7869a1dafdd86d247f16d2", "content_id": "310402c0c2607f5acf7688e4af97e67a39fa1429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "no_license", "max_line_length": 38, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/02/0203/소수 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dp = [False, False] + [True] * (999)\nprimes = []\n\nfor i in range(2, 1001):\n if dp[i]:\n primes.append(i)\n for j in range(i*2, 1001, i):\n dp[j] = False\n\nN = int(input())\nnums = list(map(int, input().split()))\n\nmaxx = nums[-1]\nresult = 0\n\nfor p in primes:\n for n in nums:\n if n == p:\n if n == 1:\n continue\n result += 1\n \n if p == maxx:\n break\n\nprint(result)" }, { "alpha_fraction": 0.41428571939468384, "alphanum_fraction": 0.4285714328289032, "avg_line_length": 13.199999809265137, "blob_id": "993788a65893448161bc850bcac60ff23afac79d", "content_id": "4b574b750852b72afc2588711880588e64e9379a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/01/0131/별 찍기 - 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = M = int(input())\n\nfor i in range(N):\n print('*' * M)\n M -= 1" }, { "alpha_fraction": 0.6212121248245239, "alphanum_fraction": 0.6287878751754761, "avg_line_length": 25.600000381469727, "blob_id": "cf77ef42934391e16653138c272e94e4b245edea", "content_id": "66b8ce8a746125ab664c56802df919e6f6bc0b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/06/0625/이항 계수 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from math import factorial as fact\n\nN, K = map(int, input().split())\nresult = fact(N) / (fact(K) * fact(N-K))\nprint(f'{result:.0f}')" }, { "alpha_fraction": 0.40361446142196655, "alphanum_fraction": 0.42168673872947693, "avg_line_length": 10.928571701049805, "blob_id": "f002b8fc3503a1d4e74d0c0f5b29250e384a1869", "content_id": "f4d09a223421acb7db97d10c7ae83dd010353439", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/12/1216/주사위 게임 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nA = B = 100\n\nfor n in range(N):\n a, b = map(int, input().split())\n\n if a > b:\n B -= a\n elif b > a:\n A -= b\n\nprint(A)\nprint(B)" }, { "alpha_fraction": 0.4909090995788574, "alphanum_fraction": 0.5272727012634277, "avg_line_length": 17.66666603088379, "blob_id": "17143390e1734347d223ecd4b828b1b0d21b3177", "content_id": "3ec6307de83f70a53933e5bd90ae5250f0e33040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/알고리즘/온라인저지/2021/08/0822/기찍 N.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor n in range(N, 0, -1):\n print(n)" }, { "alpha_fraction": 0.5445544719696045, "alphanum_fraction": 0.5742574334144592, "avg_line_length": 16, "blob_id": "c6a6bdba710f5fd9a949e0adb74d3505714ec50e", "content_id": "d4d356d755423560fb45b3bcdba82517e472e134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 21, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/05/0509/三方比較 (Three-Way Comparison).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nA = int(input())\nB = int(input())\nif A < B: result = -1\nif A > B: result = 1\nprint(result)" }, { "alpha_fraction": 0.4621676802635193, "alphanum_fraction": 0.5296523571014404, "avg_line_length": 26.22222137451172, "blob_id": "5ac9a8334f81fa4079428604e8bb436a4127eca3", "content_id": "7416bc3d3b07ee580bc527a30a664b7d45568178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/10/1016/나는 학급회장이다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nresult = [[0]*5 for _ in range(3)]\nfor i in range(3): result[i][4] = i+1\nfor _ in range(int(input().rstrip())):\n A, B, C = map(int, input().rstrip().split())\n result[0][0] += A\n result[1][0] += B\n result[2][0] += C\n result[0][A] += 1\n result[1][B] += 1\n result[2][C] += 1\nresult.sort(key=lambda x:(-x[0], -x[3], -x[2], -x[1]))\nif result[0][:4] == result[1][:4]:\n print(0, result[0][0])\nelse: print(result[0][4], result[0][0])" }, { "alpha_fraction": 0.5081080794334412, "alphanum_fraction": 0.545945942401886, "avg_line_length": 22.25, "blob_id": "8e3f1ae768f3bf09c28efd483b8ac96d76d964d7", "content_id": "58ee747b1b6bae1d668fcc49b468f6802f49b1e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/06/0607/모비스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nresult = 0\nif 'M' in S: result += 1\nif 'O' in S: result += 1\nif 'B' in S: result += 1\nif 'I' in S: result += 1\nif 'S' in S: result += 1\nprint('YES' if result == 5 else 'NO')" }, { "alpha_fraction": 0.277688592672348, "alphanum_fraction": 0.29293739795684814, "avg_line_length": 24.97916603088379, "blob_id": "bafc2ddff1439b35f011ee518a59a217e03cc984", "content_id": "6e7e31b0968b078ae81d32a5d3ac8a00c6ff175d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1256, "license_type": "no_license", "max_line_length": 54, "num_lines": 48, "path": "/알고리즘/온라인저지/2022/06/0611/양.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\nR, C = map(int, input().split())\narr = []\nfor _ in range(R):\n tmp = list(input())\n arr.append(tmp)\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\nq = deque()\nV = O = 0\n\nfor r in range(R): # 행\n for c in range(C): # 열\n if arr[r][c] in '.vo': # BFS\n q.append((r, c))\n v = o = 0\n while q:\n tmp = q.popleft()\n y, x = tmp[0], tmp[1]\n if arr[y][x] != '1':\n if arr[y][x] == 'o':\n o += 1\n elif arr[y][x] == 'v':\n v += 1\n arr[y][x] = '1'\n for i in range(4):\n ny = y+dy[i]\n nx = x+dx[i]\n try:\n if arr[ny][nx] in '.vo':\n if arr[ny][nx] != '1':\n if (ny, nx) not in q:\n q.append((ny, nx))\n except:\n q = deque()\n v = o = 0\n # print(o, v) # 디버깅\n if o > v:\n O += o\n else:\n V += v\n\nprint(O, V)" }, { "alpha_fraction": 0.4951830506324768, "alphanum_fraction": 0.5202311873435974, "avg_line_length": 33.63333511352539, "blob_id": "f17e6b03692ec2f3ad72fed33bf42c063daa9128", "content_id": "0be5c3cda7c4887a139e317b7e36412d0ee6abc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1330, "license_type": "no_license", "max_line_length": 61, "num_lines": 30, "path": "/알고리즘/온라인저지/2022/09/0912/토마토.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque # BFS\n\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1] # delta 이동\nM, N = map(int, input().split()) # M부터 받는 거, 낚시 주의\narr = [list(map(int, input().split())) for _ in range(N)]\nQ = deque()\nfor i in range(N):\n for j in range(M):\n if arr[i][j] == 1: # 첫 날 토마토인 좌표들 추가\n Q.append((i, j, 0)) # y, x, day\n arr[i][j] = 0\nresult = 0\nwhile Q: # BFS\n y, x, day = Q.popleft() # day를 추가하면서 가장 마지막에 나온 day를 잡을 것\n if arr[y][x] == 1: continue # 다른 토마토를 통해서 이미 익은 토마토\n arr[y][x] = 1 # 잘 익은 토마토\n result = max(result, day) # 더 오랜시간이 걸려서 익은 토마토가 있으면 갱신\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i] # 전후좌우 토마토\n if 0<=ny<N and 0<=nx<M and arr[ny][nx] == 0:\n Q.append((ny, nx, day+1)) # 다음 날에 이 토마토는 익는다\nfor i in range(N):\n if result == -1: break # 모든 토마토가 익을 수 없었다? -> 종료\n for j in range(M):\n if arr[i][j] == 0: # 충분한 시간이 흘렀는데 안익은 토마토가 있다?\n result = -1 # 모든 토마토가 익을 수 없었다\n break\nprint(result)\n\n# https://www.acmicpc.net/problem/7576" }, { "alpha_fraction": 0.3545454442501068, "alphanum_fraction": 0.40303030610084534, "avg_line_length": 18.352941513061523, "blob_id": "9c63da119c55bc957be00ccfc650158f112da147", "content_id": "617f9913e5a8ae33d082b78caa663dd45d70d51f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "no_license", "max_line_length": 24, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/04/0417/공백 없는 A+B.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "num = input()\nA = B = 0\nif len(num) == 2:\n A = int(num[0])\n B = int(num[1])\nelif len(num) == 3:\n if num[1] == '0':\n A = int(num[:2])\n B = int(num[-1])\n elif num[-1] == '0':\n A = int(num[0])\n B = int(num[1:])\nelse: # len 4\n A = int(num[:2])\n B = int(num[2:])\n# print(A, B)\nprint(A+B)\n\n" }, { "alpha_fraction": 0.5306122303009033, "alphanum_fraction": 0.5510203838348389, "avg_line_length": 10, "blob_id": "97baab3aa42d5e126377dcf6a3ce81e63ce6bcf5", "content_id": "b43d26137f3c389b8232c4f3486e8ed2e2628841", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0205/모음의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\n\ntext = input()\n\nfor t in text:\n if t in 'aeiou':\n result += 1\n\nprint(result)" }, { "alpha_fraction": 0.43478259444236755, "alphanum_fraction": 0.4434782564640045, "avg_line_length": 15.571428298950195, "blob_id": "85e388e17493395c68fa2abaa35a5bf9f0f9bc77", "content_id": "8b63a298555ad7ee477415ec8b5ee13953082307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/08/0825/최대공약수와 최소공배수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split())\nc, d = a, b\nwhile a % b != 0:\n a, b = b, a % b\n\nprint(b)\nprint(b*(c//b)*(d//b))" }, { "alpha_fraction": 0.5570470094680786, "alphanum_fraction": 0.5973154306411743, "avg_line_length": 20.428571701049805, "blob_id": "b76e6691b5004590fe6901e5ca007a58669ba7fe", "content_id": "362251c1e4598117dae8c927a5a94400a43d6b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/02/0220/콘테스트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "W = [int(input()) for _ in range(10)]\nK = [int(input()) for _ in range(10)]\n\nW.sort(reverse=True)\nK.sort(reverse=True)\n\nprint(sum(W[:3]), sum(K[:3]))" }, { "alpha_fraction": 0.42105263471603394, "alphanum_fraction": 0.6736842393875122, "avg_line_length": 15, "blob_id": "77117f5b7f31de18c79563c19a8512036e56c7a4", "content_id": "36788e9fa08a24afc2ea3f0c0afdc92e8899f6d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/25차시 2. 자료구조 – 리스트, 튜플 - 연습문제 27.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [12,24,35,24,88,120,155,88,120,155]\n\nresult = list(set(arr))\n\nresult.sort()\nprint(result)" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.5580357313156128, "avg_line_length": 19.409090042114258, "blob_id": "2f89ffa455e3e09160886712c6bc1a06f7a74238", "content_id": "a8cdbfac89b0e0f1cdd09ea909a539c73312911e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 510, "license_type": "no_license", "max_line_length": 38, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/04/0402/네 번째 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n까먹은 수의 위치\n1. 맨 앞 or 맨 뒤\n2. 중간\n\n맨 앞 or 맨 뒤 일 경우 둘 중 아무거나 편한 거 출력\n\"\"\"\nnums = list(map(int, input().split()))\nnums.sort()\ndiff = 1e9\nfor i in range(1, len(nums)):\n if nums[i] - nums[i-1] < diff:\n diff = nums[i] - nums[i-1]\nin_middle = False\nresult = 0\nfor i in range(1, len(nums)):\n if nums[i] != nums[i-1] + diff:\n in_middle = True\n result = nums[i-1] + diff\nif not in_middle:\n result = nums[-1] + diff\nprint(result)" }, { "alpha_fraction": 0.5075757503509521, "alphanum_fraction": 0.5227272510528564, "avg_line_length": 13.777777671813965, "blob_id": "df8187d3494e734176500ffe8819edcf2e410a68", "content_id": "d41dfe637a71b9781e89b03a921cbeac322cc91f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/03/0326/골뱅이 찍기 - ㄷ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nA = '@'*N\nB = A*5\nfor i in range(N):\n print(B)\nfor i in range(N*3):\n print(A)\nfor i in range(N):\n print(B)" }, { "alpha_fraction": 0.5315315127372742, "alphanum_fraction": 0.5525525808334351, "avg_line_length": 18.58823585510254, "blob_id": "91e0c9b32f9686c8e6ec90e4d5ddf8199d4516d9", "content_id": "a27039bf792ebba55ced1dc68a1d746a01421861", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/10/1016/구간 합 구하기 4 .py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nN, M = map(int, sys.stdin.readline().split())\n\narr = tuple(map(int, sys.stdin.readline().split()))\n\ntmp = [arr[0]]\n\nfor n in range(1, N):\n tmp.append(arr[n]+tmp[-1])\n\nfor m in range(M):\n a, b = map(int, sys.stdin.readline().split())\n if a == 1:\n print(tmp[b-1])\n else:\n print(tmp[b-1]-tmp[a-2])\n" }, { "alpha_fraction": 0.451977401971817, "alphanum_fraction": 0.4858756959438324, "avg_line_length": 21.25, "blob_id": "5a47f42c6a214b56e4dfd5a53dbb0e220366c8a2", "content_id": "ad119848ff2abb1695c4767a5289384768ab1a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "no_license", "max_line_length": 28, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/01/0114/문자가 몇갤까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n tmp = input()\n if tmp == '#': break\n result = set()\n for t in tmp.lower():\n if 97<=ord(t)<97+26:\n result.add(t)\n print(len(result))" }, { "alpha_fraction": 0.4675324559211731, "alphanum_fraction": 0.523809552192688, "avg_line_length": 13.5, "blob_id": "4fa98cf5e46475e07f2165b76369d734d547e0b2", "content_id": "0052547a85b53c9637149484ce73d605bfeecbd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 31, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0201/수 정렬하기 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef input():\n return sys.stdin.readline()\n\ndp = [0] * 10001\n\nN = int(input())\n\nfor n in range(N):\n dp[int(input())] += 1\n\nfor i in range(1, 10001):\n if dp[i]:\n for j in range(dp[i]):\n print(i)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5169082283973694, "avg_line_length": 16.25, "blob_id": "03ddc92c1d87c4246b6110b11a30b348ecb5251d", "content_id": "65d5f04548de5f5a21507359d266348f51303281", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 35, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/02/0205/백설 공주와 일곱 난쟁이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dwarfs = []\n\nfor _ in range(9):\n dwarfs.append(int(input()))\n\nsum_dwarfs = sum(dwarfs)\n\nfakes_idx = []\n\nfor i in range(8):\n temp = sum_dwarfs - dwarfs[i]\n\n for j in range(i, 9):\n if temp - dwarfs[j] == 100:\n fakes_idx.append(i)\n fakes_idx.append(j)\n break\n \n if fakes_idx:\n break\n\nfor i in range(9):\n if i not in fakes_idx:\n print(dwarfs[i])\n" }, { "alpha_fraction": 0.3867403268814087, "alphanum_fraction": 0.4254143536090851, "avg_line_length": 19.22222137451172, "blob_id": "b3d962521170bc06ff4b3468ce4d95aa3a4f022c", "content_id": "4c9ceacd5d829ddc5664eaba5028ac2603d9a683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/01/0101/희주의 수학시험.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\narr = [0]*(B+1)\nk = 0\nfor i in range(1, B+1):\n for j in range(i):\n k += 1\n if k <= B:\n arr[k] = i\nprint(sum(arr[A:B+1]))" }, { "alpha_fraction": 0.43455496430397034, "alphanum_fraction": 0.46422338485717773, "avg_line_length": 30.236364364624023, "blob_id": "160117f4df71d0d513cd36b37dda433797558530", "content_id": "8ff24c71fadcbec1ff404e45dda08170959dcd0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "no_license", "max_line_length": 86, "num_lines": 55, "path": "/알고리즘/온라인저지/2021/08/0829/경비원.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nW, H = map(int, sys.stdin.readline().split())\nmarket_count = int(sys.stdin.readline())\nmarkets = []\nfor _ in range(market_count):\n markets.append(list(map(int, sys.stdin.readline().split())))\ndgnswe, dgdist = map(int, sys.stdin.readline().split())\n\nresult = 0\n\nfor i in range(market_count):\n if (markets[i][0] == 1 and dgnswe == 2) or (markets[i][0] == 2 and dgnswe == 1):\n result += H\n tmp1 = markets[i][1] + dgdist\n tmp2 = W - markets[i][1] + W - dgdist\n if tmp1 < tmp2:\n result += tmp1\n else:\n result += tmp2\n\n elif (markets[i][0] == 3 and dgnswe == 4) or (markets[i][0] == 4 and dgnswe == 3):\n result += W\n tmp1 = markets[i][1] + dgdist\n tmp2 = H - markets[i][1] + H - dgdist\n if tmp1 < tmp2:\n result += tmp1\n else:\n result += tmp2\n\n elif markets[i][0] == dgnswe:\n result += abs(markets[i][1] - dgdist)\n else: # 좌 우 일때\n if markets[i][0] == 1:\n if dgnswe == 3:\n result += markets[i][1] + dgdist\n else:\n result += W - markets[i][1] + dgdist\n if markets[i][0] == 2:\n if dgnswe == 3:\n result += markets[i][1] + H - dgdist\n else:\n result += W - markets[i][1] + H - dgdist\n if markets[i][0] == 3:\n if dgnswe == 1:\n result += markets[i][1] + dgdist\n else:\n result += H - markets[i][1] + dgdist\n if markets[i][0] == 4:\n if dgnswe == 1:\n result += markets[i][1] + W - dgdist\n else:\n result += H - markets[i][1] + W - dgdist\n\nprint(result)\n\n" }, { "alpha_fraction": 0.45012786984443665, "alphanum_fraction": 0.46547314524650574, "avg_line_length": 28.769229888916016, "blob_id": "446e7697ac7ac2096f66107f7341ef94dbccb03d", "content_id": "7c5a0d0fe753e9ed4d9219a7f75eb7b00fec577b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/알고리즘/온라인저지/2021/08/0810/캠핑.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nL P V\nresult = (V//P) * L\"\"\"\ncount = 0\nwhile True:\n L, P, V = map(int, input().split()) # 각각 변수를 입력받고\n count += 1 # Case count: ? 출력해줄 count변수\n if L + P + V == 0: # 마지막항으로 0 0 0 들어오면\n break # 종료\n C = V - ((V//P) * P) # 다 나눠주고 나머지 더할 상수 C\n if C > L: # C가 L보다 크면\n C = L # L보다 클 때 모두 다 C로 치환\n print(f'Case {count}: {((V//P) * L) + C}') # 테스트케이스마다 결과값 출력\n " }, { "alpha_fraction": 0.5040983557701111, "alphanum_fraction": 0.5778688788414001, "avg_line_length": 29.625, "blob_id": "f002a95df185def78ebdffd781308f50f2a3ee37", "content_id": "52394af2094eb192267b99647699d67250dc441d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 117, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/08/0830/Speed fines are not fine!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "F = [100, 270, 500] # fine\nA = int(input())\nB = int(input())\ni = 0\nif B > A:\n if 21<=B-A<=30: i = 1\n elif 31<=B-A: i = 2\nprint(f'You are speeding and your fine is ${F[i]}.' if B > A else 'Congratulations, you are within the speed limit!')" }, { "alpha_fraction": 0.39175257086753845, "alphanum_fraction": 0.4192439913749695, "avg_line_length": 16.176469802856445, "blob_id": "ed2874f1dcf496dcc51c17fa419800b27e7c0b6d", "content_id": "446a9e3bef8d5b453b6d9520d4ba5c4d0bcf24c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/알고리즘/온라인저지/2023/04/0401/루트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n B, N = map(int, input().split())\n \n if B == 0 and N == 0: break\n\n minn = int(1e9)\n result = 0\n\n for A in range(1, int(1e9)):\n D = abs(B - A**N)\n\n if D < minn: \n minn = D\n result = A\n else: break\n \n print(result)" }, { "alpha_fraction": 0.6292682886123657, "alphanum_fraction": 0.6536585092544556, "avg_line_length": 16.16666603088379, "blob_id": "0622a76f978d8e7406aa818fb617ba1f12793709", "content_id": "2da5655f4a4edcb70340d2633538e1fe8a53b360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 47, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/11/1125/거리의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ninput()\nnums = list(map(int, input().rstrip().split()))\nnums.sort()\nresult = 0\nfor num1 in nums:\n for num2 in nums:\n result += abs(num1-num2)\nprint(result)" }, { "alpha_fraction": 0.5130434632301331, "alphanum_fraction": 0.5565217137336731, "avg_line_length": 11.88888931274414, "blob_id": "095f712f4bdab121946e0ccf6b7f8f3758d1452f", "content_id": "666082decbae1fa6b676961515c6a8fa3a738e34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/08/0828/벌집.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nN = int(sys.stdin.readline())\n\nlayer = 1\ni = 1\nwhile layer < N:\n i += 1\n layer += (i-1)*6\nprint(i)" }, { "alpha_fraction": 0.43478259444236755, "alphanum_fraction": 0.54347825050354, "avg_line_length": 46, "blob_id": "57a13229b5ab104a9c7e8a75bd8498aea806ec71", "content_id": "387239b75a8a35bab4d8dbe592b107a40f5c5773", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/03/0312/17배.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(str(bin(int('0b'+input(), 2) * 17))[2:])" }, { "alpha_fraction": 0.3802816867828369, "alphanum_fraction": 0.44014084339141846, "avg_line_length": 34.5, "blob_id": "2dd58aa7f1183cf3655b36e1fdeb81b82d1abecb", "content_id": "15227be07a035cf18de054f870a233abb5ad9b26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 78, "num_lines": 8, "path": "/알고리즘/[템플릿]/정렬/버블 정렬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [55, 7, 42, 78, 12]\nfor i in range(len(arr)-1, 0, -1):\n for j in range(i):\n print(i, j)\n if arr[j] > arr[j+1]:\n print(f'{arr[j]}가 {arr[j+1]}보다 크므로, {arr[j]}와 {arr[j+1]}의 자리를 변경')\n arr[j], arr[j+1] = arr[j+1], arr[j]\n print(arr)\n" }, { "alpha_fraction": 0.4858657121658325, "alphanum_fraction": 0.5141342878341675, "avg_line_length": 19.25, "blob_id": "2bf6936e97abd2cf4806a845e9944f0e55fa0e92", "content_id": "be871ee39f58ec47c01000e610e51c7aa65c2ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 73, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/09/0917/못생긴 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom heapq import heappush, heappop\n\ninput = sys.stdin.readline\n\ndef ugly(num):\n result = 0\n HQ = []\n heappush(HQ, 1)\n cnt = 0\n while HQ:\n num = heappop(HQ)\n cnt += 1\n if cnt == N: \n result = num\n break\n for i in (2, 3, 5):\n next = num*i\n if next not in HQ:\n heappush(HQ, next)\n return result\n\nwhile True:\n N = int(input().rstrip())\n if N == 0: break\n print(ugly(N))\n\n# http://www.jungol.co.kr/bbs/board.php?bo_table=pbank&code=1318&sca=4030" }, { "alpha_fraction": 0.44291090965270996, "alphanum_fraction": 0.4667503237724304, "avg_line_length": 22.47058868408203, "blob_id": "39f1d9e4f85fb5217bce6039082cf1f6dac70d74", "content_id": "5a55d484521baf7231fbf186721e319274676f52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 49, "num_lines": 34, "path": "/알고리즘/[템플릿]/문자열 탐색/KMP 알고리즘/찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef get_pi(pattern):\n j = 0\n for i in range(1, len(pattern)):\n while j > 0 and pattern[i] != pattern[j]:\n j = pi[j-1]\n if pattern[i] == pattern[j]:\n j += 1\n pi[i] = j\n\ndef KMP(string, pattern):\n get_pi(pattern)\n j = 0\n for i in range(len(string)):\n while j > 0 and string[i] != pattern[j]:\n j = pi[j-1]\n if string[i] == pattern[j]:\n if j == len(pattern)-1:\n result.append(i-j+1)\n result[0] += 1\n j = pi[j] # 다음 패턴 찾는 인덱싱 주의\n else:\n j += 1\n\nS, P = input().rstrip(), input().rstrip()\npi = [0] * len(P)\nresult = [0]\nKMP(S, P)\nfor r in result: print(r)\n\n# https://www.acmicpc.net/problem/1786" }, { "alpha_fraction": 0.4868420958518982, "alphanum_fraction": 0.5065789222717285, "avg_line_length": 14.947368621826172, "blob_id": "8ff6c20589f8934c9be0b7aadaf60440e9c38872", "content_id": "dc6371fd5843b3f992ba1f6c0ffb912bd97e3c2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 74, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/02/0201/덩치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nwnhs = []\n\nresult = []\n\nfor n in range(N):\n wnhs.append(tuple(map(int, input().split())))\n\nfor i in range(N):\n cnt = 1\n\n for j in range(N):\n if i != j and wnhs[i][0] < wnhs[j][0] and wnhs[i][1] < wnhs[j][1]:\n cnt += 1\n\n result.append(cnt)\n\nprint(*result)\n\n" }, { "alpha_fraction": 0.47752809524536133, "alphanum_fraction": 0.5, "avg_line_length": 18.83333396911621, "blob_id": "7b250ef340653b42f12db4340c6720cd27c8e1a5", "content_id": "31fd26b84973f5933fabb7aca107357696aab2ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 356, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/10/1028/기숙사 바닥.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nR, B = map(int, input().rstrip().split())\ndivisors = []\nfor i in range(1, int((R+B)**0.5+1)):\n if not (R+B)%i:\n divisors.append((i, (R+B)//i))\nfor divisor in divisors:\n W, L = divisor\n red = 0\n if W > 1:\n red += (W+L-2)*2\n else: red = L\n if red == R:\n print(L, W)\n break" }, { "alpha_fraction": 0.5775076150894165, "alphanum_fraction": 0.6003039479255676, "avg_line_length": 22.535715103149414, "blob_id": "84cea56092292d36844c6845260b465ec6f6bf50", "content_id": "a4ffcf7764b8d5b1c3f6a76659bb0d5702aefdcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1008, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/10/1023/태상이의 훈련소 생활.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\narr = list(map(int, input().rstrip().split()))\nacc = [0]*(N+1)\nfor m in range(M):\n a, b, k = map(int, input().rstrip().split())\n acc[a-1] += k\n acc[b] -= k\ndp = [0]*(N+1)\ndp[0] = acc[0]\nfor i in range(1, N+1):\n dp[i] = dp[i-1] + acc[i]\n print(arr[i-1] + dp[i-1], end=' ')\n\n\"\"\"\n누적합 배열인 acc를 생성한다\nacc에는 누적합의 시작과 끝만 들어있으며\n누적합이 시작될 때 k만큼 더했다가 끝나면서 k만큼 빼준다\n현재 누적합은 변수로 가지고 있지는 않고\n전에 계산한 누적합(dp[i-1])을 불러와서 acc[i]와 더한다\nacc[i]가 0일 경우, 이전 누적합을 그대로 사용하며 그 값은 dp[i-1]에 같이 들어있다\n해당 누적합이 종료되는 경우 k만큼 빼주어 해당 누적합을 종료한다\ndp에 들어있는 값은 m줄 만큼 들어온 입력들의 누적합이며\n이를 원 배열과 합한 결과를 출력한다\n\"\"\"" }, { "alpha_fraction": 0.4058355391025543, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 22.625, "blob_id": "abf13c9c74924b326531475eb19cb10d728c874d", "content_id": "aaddab8f944d7d10ef549937cf24166b4b49f8ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/10/1003/달나라 토끼를 위한 구매대금 지불 도우미.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN = int(input().rstrip())\ndp = [INF]*(int(1e5)+10)\ndp[0], dp[1], dp[2], dp[5], dp[7] = 0, 1, 1, 1, 1\nfor i in range(1, int(1e5)+1):\n tmp = dp[i]+1\n if dp[i]:\n dp[i+1] = min(dp[i+1], tmp)\n dp[i+2] = min(dp[i+2], tmp) \n dp[i+5] = min(dp[i+5], tmp) \n dp[i+7] = min(dp[i+7], tmp)\nprint(dp[N])" }, { "alpha_fraction": 0.46932005882263184, "alphanum_fraction": 0.5107794404029846, "avg_line_length": 34.52941131591797, "blob_id": "610a8a13dd70da3a712496e2a80da51279831284", "content_id": "65180dab07673799bfc7fcca60b0c9f2e58a7576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 775, "license_type": "no_license", "max_line_length": 80, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/10/1020/개미.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nants1, ants2 = list(input()), list(input())\nT = int(input())\narr = []\nfor i in range(N): arr.append((ants1[::-1][i], 1)) # 오른쪽으로 가려고 함, 배열이 뒤집히는 점에 주의\nfor i in range(M): arr.append((ants2[i], -1)) # 왼쪽으로 가려고 함\ntime = 0\nwhile time < T: # T초동안 움직일 것\n time += 1\n i = 0\n while i < N+M-1:\n ant1, ant2 = arr[i], arr[i+1]\n if ant1[1] == 1 and ant2[1] == -1: # 진행하려는 방향이 교차하는 경우\n arr[i], arr[i+1] = arr[i+1], arr[i] # 서로 자리를 바꿔준다 == 두 개미중 하나가 점프\n i += 1 # 인덱스 추가로 하나 더 늘려주기\n i += 1 # 다음 개미 탐색\nfor a in arr: print(a[0], end='')" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 74, "blob_id": "40d124aa2870a85ba5367356709e60c92989de1f", "content_id": "86918264e4c5bda9ca7d40c6bdf7dc64a47d3187", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 74, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/08/0825/성택이의 은밀한 비밀번호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())): print('yes' if 6<=len(input())<=9 else 'no')" }, { "alpha_fraction": 0.4747474789619446, "alphanum_fraction": 0.49494948983192444, "avg_line_length": 18.899999618530273, "blob_id": "1c0a2e49b87e3d5c3f0785a2ce2a4c93e9fa697e", "content_id": "32578c8e1c7aa32ef2419038fb4d8ffe2ff6452c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 33, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/09/0909/지영 공주님의 마법 거울.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = [input() for _ in range(N)]\nK = int(input())\nif K == 1:\n for a in arr: print(a)\nelif K == 2:\n for a in arr: print(a[::-1])\nelse:\n for a in arr[::-1]:\n print(a)" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5625, "avg_line_length": 25.272727966308594, "blob_id": "2b053e33c44bc7540abf65a19ec323e070c2f6a4", "content_id": "7718290e102e87952bb763a9dbd9108e39859964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/11/1105/해밍 거리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n nums = [input().rstrip() for i in range(2)]\n result = 0\n for i in range(len(nums[0])):\n if nums[0][i] != nums[1][i]:\n result += 1\n print('Hamming distance is {}.'.format(result))" }, { "alpha_fraction": 0.5492957830429077, "alphanum_fraction": 0.577464759349823, "avg_line_length": 13.300000190734863, "blob_id": "3b10d57aa2c43d315707e48a469db74632d0d66f", "content_id": "3d37083c698ecfb2c734b2d116847979d0008758", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/알고리즘/온라인저지/2023/02/0218/회문은 회문아니야!!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nS = input().strip()\nl = len(S)\n\nif S == S[0]*l: print(-1)\nelif S == S[::-1]: print(l-1)\nelse: print(l)" }, { "alpha_fraction": 0.47413793206214905, "alphanum_fraction": 0.47413793206214905, "avg_line_length": 28.08333396911621, "blob_id": "5e68bcf6566601e625145a08f22f84697f7cd385", "content_id": "d4d32e30008f31dbad65eea6317ea51f505c6075", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 60, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/12/1224/사칙연산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n A, ope, B, equal, answer = input().split()\n A, B, answer = map(int, [A, B, answer])\n if ope == '+':\n result = A+B\n elif ope == '-':\n result = A-B\n elif ope == '*':\n result = A*B\n elif ope == '/':\n result = A//B\n print('correct' if result == answer else 'wrong answer')" }, { "alpha_fraction": 0.5234899520874023, "alphanum_fraction": 0.5335570573806763, "avg_line_length": 18.933332443237305, "blob_id": "521c7e12f97e690cafe762e9124f0230edd96e98", "content_id": "12a5ef32ee62cacd54ea0a33308d0f845a02c54f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/10/1026/유진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = input().rstrip()\nflag = False\nfor i in range(1, len(N)):\n A, B = N[:i], N[i:]\n front, rear = 1, 1\n for a in A:\n front *= int(a)\n for b in B:\n rear *= int(b)\n if front == rear: flag = True; break\nprint('YES' if flag else 'NO')" }, { "alpha_fraction": 0.44718310236930847, "alphanum_fraction": 0.5211267471313477, "avg_line_length": 12.571428298950195, "blob_id": "de3c948ae43b86f88aa5127864984822b76bf77b", "content_id": "207f6e5660aa28ae440eaf3eb99103b066314f44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 35, "num_lines": 21, "path": "/알고리즘/[템플릿]/유클리드 호제법/유클리드 호제법.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import timeit\nstart = timeit.default_timer()\n\n\na, b = 12368711, 2678178\n\nwhile a % b != 0:\n a, b = b, a % b\n\nprint(b)\n\n\n\n\n# for i in range(max(a,b),0,-1):\n# if a % i == 0 and b % i == 0:\n# print(i)\n# break\n\nend = timeit.default_timer()\nprint(f'{end-start:.5f}')" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5608108043670654, "avg_line_length": 20.214284896850586, "blob_id": "8ce7e43e5fa89d92e94d907b524eac0e9941a384", "content_id": "7534a2259b78d0bdad9cf5a2e000d46e6d5b4b10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/11/1109/지우개.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = list(range(1, N+1))\nwhile len(arr) > 1:\n pop_idx_list = []\n for i in range(len(arr)):\n if not i%2:\n pop_idx_list.append(i)\n for pop_idx in pop_idx_list[::-1]:\n arr.pop(pop_idx)\nprint(arr[0])" }, { "alpha_fraction": 0.49528300762176514, "alphanum_fraction": 0.5141509175300598, "avg_line_length": 24, "blob_id": "f07290d36abc2eb61d77e8c4da725cd0903bc131", "content_id": "7199c117e430453677f2e7cc6f3ed5a9a19dae83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/10/1008/첼시를 도와줘!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n P = int(input().rstrip())\n cost, name = [], []\n for p in range(P):\n player = input().rstrip().split()\n cost.append(int(player[0]))\n name.append(player[1])\n result = [0, 0]\n for i in range(P):\n if cost[i] > result[0]:\n result[0] = cost[i]\n result[1] = i\n print(name[result[1]])" }, { "alpha_fraction": 0.5017921328544617, "alphanum_fraction": 0.5232974886894226, "avg_line_length": 22.33333396911621, "blob_id": "fd9b98cfcbb969b138d0826b553da5f2147db3ac", "content_id": "1571e63c2fb1ad23453a4aff1c8e7761ded850f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/10/1003/Maximum Subarray.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in [0]*int(input().rstrip()):\n N = int(input().rstrip())\n arr = list(map(int, input().rstrip().split()))\n dp = [0]*N\n dp[0] = arr[0]\n for i in range(1, N):\n dp[i] = max(dp[i-1]+arr[i], arr[i])\n print(max(dp))" }, { "alpha_fraction": 0.4647887349128723, "alphanum_fraction": 0.49295774102211, "avg_line_length": 34.75, "blob_id": "9dd2f49874fe37c9153133107a8207728691bfcb", "content_id": "54ebf63835bcf8c14bac6d6d71d7dca75bfeb429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/알고리즘/온라인저지/2023/03/0306/백발백준하는 명사수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nD, E, F = map(int, input().split())\ntmp = ((A-D)**2 + (B-E)**2) ** 0.5\nprint('YES' if tmp < C+F else 'NO')" }, { "alpha_fraction": 0.5824742317199707, "alphanum_fraction": 0.5979381203651428, "avg_line_length": 23.375, "blob_id": "27fa5ee763e6ee8c32dc89cc1202d911b4dbe01b", "content_id": "a8d2ab868644aebcf2962bf18f6b33b95e0cadee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/09/0924/연속부분최대곱.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = [float(input().rstrip()) for _ in range(N)]\nfor i in range(1, N): dp[i] = max(dp[i], dp[i]*dp[i-1])\nprint(f'{max(dp):.3f}')" }, { "alpha_fraction": 0.5766870975494385, "alphanum_fraction": 0.5981594920158386, "avg_line_length": 20.799999237060547, "blob_id": "2e9ea56b06fab675e78195b0bc6c94e3d5701dde", "content_id": "2dbee2c71ac3d2500e195daaa0b5946279196ba4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/10/1003/두 수의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = sorted(list(map(int, input().rstrip().split())))\nK = int(input().rstrip())\nresult = 0\nstart, end = 0, N-1\nwhile start<end:\n tmp = arr[start]+arr[end]\n if tmp > K: end -= 1\n elif tmp < K: start += 1\n else: result += 1; start += 1\nprint(result)" }, { "alpha_fraction": 0.5569105744361877, "alphanum_fraction": 0.577235758304596, "avg_line_length": 26.44444465637207, "blob_id": "99d07dccd5f757a19e860473aa22ab761bf74e70", "content_id": "0c933460f10d0fa7e079da08281aa7c545b5a807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 59, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/11/1107/한다 안한다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n word = input().rstrip()\n prev, rear = word[:len(word)//2], word[len(word)//2:]\n rear = rear[::-1]\n print('Do-it' if prev[-1] == rear[-1] else 'Do-it-Not')" }, { "alpha_fraction": 0.4350649416446686, "alphanum_fraction": 0.46320345997810364, "avg_line_length": 26.235294342041016, "blob_id": "21c3f39199275cf4a031a5d15098d3ea68032a4b", "content_id": "8fd0d968d99f14f94ca0ce73edcc0a15f41a15ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/08/0817/순열 사이클.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N = int(input())\n P = [list(range(1, N+1)), list(map(int, input().split()))]\n used, result = [0] * N, 0\n for i in range(N):\n if not used[i]:\n used[i], start, now = 1, P[0][i], P[1][i]\n while now != start:\n used[now-1] = 1\n now = P[1][now-1]\n result += 1\n print(result)\n\n\"\"\"\n문제 조건이, 1부터 N까지의 자연수로 이루어졌다고 \n주어졌기 때문에, 한 사이클을 돌 때 인덱스를 고민하지 않아도 되었다\n\"\"\"" }, { "alpha_fraction": 0.5480769276618958, "alphanum_fraction": 0.6282051205635071, "avg_line_length": 27.454545974731445, "blob_id": "1485f432d58b7213f6c81b97ae80a75cc00a2940", "content_id": "e61498fcab4ef789082b8b431aa62d3148cad1aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/알고리즘/온라인저지/2023/01/0115/나의 학점은.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "scores = list(map(int, input().split()))\nhongik = int(input())\nrank = scores.index(hongik)+1\nif 1<=rank<6: result = 'A+'\nelif 6<=rank<16: result = 'A0'\nelif 16<=rank<31: result = 'B+'\nelif 31<=rank<36: result = 'B0'\nelif 36<=rank<46: result = 'C+'\nelif 46<=rank<49: result = 'C0'\nelse: result = 'F'\nprint(result)" }, { "alpha_fraction": 0.43801653385162354, "alphanum_fraction": 0.43801653385162354, "avg_line_length": 15.199999809265137, "blob_id": "f387025a3fb539b5d8507eaecea275d7639558ce", "content_id": "b95b7b427073e1c2c3e2eab53c452ea200e72e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/알고리즘/온라인저지/2023/05/0517/Andando no tempo.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nresult = 'N'\nif A == B: \n result = 'S'\nif C == B: \n result = 'S'\nif A == C: \n result = 'S'\nif A+C == B: \n result = 'S'\nif A == B+C: \n result = 'S'\nif A+B == C: \n result = 'S'\nprint(result)" }, { "alpha_fraction": 0.40856030583381653, "alphanum_fraction": 0.4591439664363861, "avg_line_length": 18.846153259277344, "blob_id": "c150362ddca8e9e21887bcafcd44cc48859de273", "content_id": "862003390eb7a0ba29bb02a1899ba17182f64689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 32, "num_lines": 13, "path": "/알고리즘/온라인저지/2023/05/0527/Electric Bill.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nN = int(input())\nfor i in range(N):\n C = int(input())\n print(C, end=' ')\n result = 0\n if C > 1000:\n result += A*1000\n C -= 1000\n result += B*C\n else:\n result = A*C\n print(result)" }, { "alpha_fraction": 0.5373134613037109, "alphanum_fraction": 0.6268656849861145, "avg_line_length": 16, "blob_id": "be411eec98f587d7ef761fd8f415f9214e018343", "content_id": "aaf9ba812bca29e30ed71f9ae0b42a3574b51789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/12/1231/Even or Odd.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nif N%2:print(0)\nelif N//2%2:print(1)\nelse:print(2)" }, { "alpha_fraction": 0.6019417643547058, "alphanum_fraction": 0.6019417643547058, "avg_line_length": 25, "blob_id": "b57ba08f0ccecd4c0ac5f00568d2ac72b5074b73", "content_id": "480eabf627f685057794bfcf64e27afdc70b9a4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/02/0205/시험 점수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "mk = list(map(int, input().split()))\nms = list(map(int, input().split()))\n\nprint(max(sum(mk), sum(ms)))" }, { "alpha_fraction": 0.4843304753303528, "alphanum_fraction": 0.5327635407447815, "avg_line_length": 22.46666717529297, "blob_id": "edbb4049033f0319e1c814f230bbaf55f40e9499", "content_id": "cd595490d148a0e79a92d95f3dec6a881bd8d2d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/09/0911/RGB거리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ndp = [list(map(int, input().split())) for _ in range(N)]\nfor i in range(1, len(dp)):\n dp[i][0] += min(dp[i-1][1], dp[i-1][2])\n dp[i][1] += min(dp[i-1][0], dp[i-1][2])\n dp[i][2] += min(dp[i-1][0], dp[i-1][1])\nprint(min(dp[N-1]))\n\n\"\"\"\nDP문제 진짜 현타오네\n코드가 이렇게 간단할 수가 있나\n배열 그대로 들고 이동하면서\n해당 집이 해당 색으로 집을 칠할 때의 \n최소값들을 전부 구하면서 이동한다\n\"\"\"" }, { "alpha_fraction": 0.5537634491920471, "alphanum_fraction": 0.5645161271095276, "avg_line_length": 30.16666603088379, "blob_id": "d08dd5cdd8fad69d413bd7e570bdccc10d6af51b", "content_id": "382be1ec83a5e76a1f39a4d004a9b3659035c269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/01/0107/암기왕.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n input()\n notes = set(map(int, input().split()))\n input()\n for note in list(map(int, input().split())):\n print(1 if note in notes else 0)" }, { "alpha_fraction": 0.4583333432674408, "alphanum_fraction": 0.46296295523643494, "avg_line_length": 18.727272033691406, "blob_id": "a1cc1aea855633f1eb7b7e3449bb76e36a83f038", "content_id": "4f2faf58669f42eac5d3f516615e416b3c99ca4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/40차시 4. 문자열 - 연습문제 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# while True:\n# tmp = input()\n# print('>> {}'.format(tmp.upper()))\n# if not tmp:\n# exit()\n\nfor i in range(3):\n tmp = input()\n print('>> {}'.format(tmp.upper()))\n if not tmp:\n break" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.5054945349693298, "avg_line_length": 29.5, "blob_id": "3fe44d4a3ac3ceae0e69bb902d1f05e070d49e92", "content_id": "79374eea1e74a122f91bfa38ec1d89197f842164", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/08/0826/Football Scoring.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "C = [6, 3, 2, 1, 2] # change\nfor _ in range(2):\n S = list(map(int, input().split())) # score\n result = 0\n for i in range(5): result += C[i] * S[i]\n print(result, end=' ')" }, { "alpha_fraction": 0.5773195624351501, "alphanum_fraction": 0.5979381203651428, "avg_line_length": 16.727272033691406, "blob_id": "cb7e8599953499fb8eb60355540fd6a5cef010fc", "content_id": "dcc19782001d138bb7a80bb3640f34d79669c01c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/10/1002/내 선물을 받아줘 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = input().rstrip()\nresult = 0\nfor i in range(N-1):\n tmp = arr[i] + arr[i+1]\n if tmp == 'EW': result += 1\nprint(result)" }, { "alpha_fraction": 0.5378151535987854, "alphanum_fraction": 0.5462185144424438, "avg_line_length": 11, "blob_id": "e0c9b7601595cc1aa76f73f594f1d174db3a652b", "content_id": "edfe5a70b9e7e7fb5db21302baca0b88a9487c5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0205/사과.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 0\n\nfor n in range(N):\n s, a = map(int, input().split())\n\n result += a%s\n\nprint(result)" }, { "alpha_fraction": 0.6412213444709778, "alphanum_fraction": 0.6412213444709778, "avg_line_length": 25.399999618530273, "blob_id": "3f54bb447531d5ba55947d9f493ff8f696ffd578", "content_id": "c195c344aeb47dc5bca5908bbeff9d083593ebc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/알고리즘/온라인저지/2021/08/0822/직사각형에서 탈출.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x, y, w, h = map(int, input().split())\ndistance = []\ndistance.append(min(x, w-x))\ndistance.append(min(y, h-y))\nprint(min(distance))" }, { "alpha_fraction": 0.6243094205856323, "alphanum_fraction": 0.6243094205856323, "avg_line_length": 35, "blob_id": "63a0c7356245db393cdcaaeca142cf66c8291356", "content_id": "08256a863a5058f128b1548992acf9a7f342aec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 116, "num_lines": 5, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/4차시 2. 자료구조 – 리스트, 튜플 - 연습문제 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "s = 'Python is powerful... and fast; plays well with others; runs everywhere; is friendly & easy to learn; is Open.'\n\nfor i in s:\n if i not in 'aeiou':\n print(i, end='')\n\n" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.38181817531585693, "avg_line_length": 17.5, "blob_id": "32b1b3caec9718e0d9b91be7dabb8f7d1617c653", "content_id": "c464d215f6d79ec64626296ac2d079b1ca4f224a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/05/0526/별 찍기 - 20.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor n in range(N):\n if n%2 == 0:\n print('* ' * N)\n else:\n print(' *' * N)" }, { "alpha_fraction": 0.5142624378204346, "alphanum_fraction": 0.5509372353553772, "avg_line_length": 34.08571243286133, "blob_id": "042dbbb25cd79bd3eba22a38f8da120c83eaff27", "content_id": "30078684c569aae908149f1c6bf23d09d27c5f86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1567, "license_type": "no_license", "max_line_length": 100, "num_lines": 35, "path": "/알고리즘/온라인저지/2022/09/0916/공주님을 구해라!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\nN, M, T = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nvisited = [[0]*M for _ in range(N)]\ndistance = int(1e9) # 임의의 거리 최대값\nQ = deque()\nif arr[0][0] == 2: distance = N+M-2 # 시작부터 그람인 경우, 무지성 택시거리 이동\nelse: # 시작이 그람은 아닌 경우\n Q.append((0, 0, 0)) # move, y, x\n visited[0][0] = 1\nwhile Q:\n move, y, x = Q.popleft()\n if y == N-1 and x == M-1: distance = min(distance, move) # 공주님한테 도착했을 때 걸린 시간 갱신\n for i in range(4): # 델타 이동\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx] and arr[ny][nx] != 1: # 방문한 적이 없으며, 마법벽이 아닐 때\n Q.append((move+1, ny, nx)) # 빈 방이든 그람이든 일단 탐색\n visited[ny][nx] = 1\n if arr[ny][nx] == 2: # 그람인 경우\n gram = move+1 + (N+M-2-ny-nx) # 무지성 택시이동, 왔던 길 무시 가능\n distance = min(distance, gram) # 돌고 돌아 그람을 줍는 경우는 배제된다, BFS이니까!\nprint(distance if distance<=T else 'Fail') # T시간 이내에 공주님을 구할 수 있습니까?\n\n\"\"\"\n그람을 주운 자리에서 무지성 택시거리로 이동 가능\n택시거리 : 두 좌표 (x1, y1), (x2, y2)의 가로+세로\n abs(x1-x2) + abs(y1-y2)\n\"\"\"\n\n# https://www.acmicpc.net/problem/17836" }, { "alpha_fraction": 0.4115842580795288, "alphanum_fraction": 0.47708407044410706, "avg_line_length": 24.781394958496094, "blob_id": "9956cd3b26e271a99c53ef00bdb8a36dc307571d", "content_id": "cd2949dd5072ba3030dc351fe49eaf42388d2afb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6852, "license_type": "no_license", "max_line_length": 101, "num_lines": 215, "path": "/알고리즘/온라인저지/2022/08/0812/말이 되고픈 원숭이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\ndy = [-1, 1, 0, 0] # delta y\ndx = [0, 0, -1, 1] # delta x\nky = [-2, -1, 1, 2, 2, 1, -1, -2] # knight y\nkx = [1, 2, 2, 1, -1, -2, -2, -1] # knight x\n\ndef bfs():\n Q = deque()\n Q.append((0, 0, K, 0)) # y, x, k, move\n # 3차원 방문배열\n # 만약 K가 2일 때, 해당 좌표까지 나이트 이동을 0번 혹은 1번 혹은 2번 사용하여\n # 해당 좌표에 도착할 수 있는지 체크하기 위함\n visited = [[[False]*(K+1) for _ in range(W)] for _ in range(H)] \n # 아래 코드에서 방문 좌표를 추가하면서 방문처리 할 것이므로\n # 시작 점 방문 처리\n visited[0][0][K] = True \n while Q: # 탐색할 좌표가 있을 때\n y, x, k, move = Q.popleft() # 세로, 가로, 남은 나이트 이동 횟수, 이동한 횟수\n if y == H-1 and x == W-1: # 도착했다면\n return move # 그 때의 이동 횟수\n if k > 0: # 나이트 이동 횟수가 남아있을 때\n for i in range(8):\n ny = y+ky[i] # 나이트 이동 y\n nx = x+kx[i] # 나이트 이동 x\n # 범위 안에 있고, 장애물이 아니면서, 나이트이동으로 탐색할 수 있다면\n if 0<=ny<H and 0<=nx<W and not arr[ny][nx] and not visited[ny][nx][k-1]:\n # 해당 좌표를 탐색할 것이며\n # 아직 나이트 이동을 k-1번 할 수 있습니다\n visited[ny][nx][k-1] = True \n Q.append((ny, nx, k-1, move+1))\n for i in range(4):\n ny = y+dy[i] # 델타 이동 y\n nx = x+dx[i] # 델타 이동 x\n # 범위 안에 있고, 장애물이 아니면서\n # 현재 사용가능한 나이트이동의 횟수로 해당 좌표를 방문한 적이 없을 때\n if 0<=ny<H and 0<=nx<W and not arr[ny][nx] and not visited[ny][nx][k]:\n # 해당 좌표를 탐색할 것이며\n # 나이트 이동을 사용하지 않고 탐색합니다\n # 여전히 k번의 나이트 이동을 사용할 수 있습니다\n visited[ny][nx][k] = True\n Q.append((ny, nx, k, move+1))\n return -1 # 도착할 수 없음\n\nK = int(input())\nW, H = map(int, input().split())\narr = tuple(tuple(map(int, input().split())) for _ in range(H)) # 시간 단축을 위한 tuple (list보다 빠름, 아마도..?)\nprint(bfs())\n\n\n\"\"\"\n시간 : 120분, 미해결, 정답 풀이 확인 \n\n풀이\n 나이트이동이 가능하다고 해서 아무때나 그리디하게 적용하게 되면 오답이다\n 세로 가로 에서 하나 더 추가한 3차원 방문배열이 필요하다\n 방문 배열의 해당 좌표에 몇번의 나이트 이동으로 도달하였는지 기록하며 진행한다\n 나이트이동과 그냥 이동을 통한 탐색을 동시에 진행한다\n\n반례 모음\n\n1\n6 6\n0 0 1 1 0 0\n1 1 1 0 0 0\n0 0 1 1 0 0\n1 0 1 1 1 1\n1 0 0 1 1 1\n0 0 0 0 0 0\nanswer : 8\n\n2\n6 6\n0 0 1 1 0 0\n1 1 1 0 0 0\n0 0 1 1 0 0\n1 0 1 1 1 1\n1 0 0 1 1 1\n0 0 0 0 0 0\nanswer : 6\n\n1\n2 3\n0 1\n1 1\n1 0\nanswer : 1\n\n4\n6 6\n0 0 1 1 0 0\n1 1 1 0 0 0\n0 0 1 1 0 0\n1 0 1 1 1 1\n1 0 0 1 1 1\n0 0 0 0 0 0\nanswer : 4\n\n3\n4 5\n0 1 1 1\n1 1 0 1\n1 1 1 1\n1 1 1 0\n1 1 1 0\nanswer : 3\n\n1\n5 5\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 0 0\n0 0 0 1 1\n0 0 0 1 0\nanswer : 6\n\n1\n4 4\n0 0 1 1\n0 0 1 1\n0 0 1 1\n1 1 1 0\nanswer : 4\n\"\"\"\n\n\n# 쾅\n# from collections import deque\n\n# K = int(input())\n# W, H = map(int, input().split())\n# arr = [list(map(int, input().split())) for _ in range(H)]\n# result = -1\n# K_dy = [-2, -1, 1, 2, 2, 1, -1, -2] # Knight dy\n# K_dx = [1, 2, 2, 1, -1, -2, -2, -1] # Knight dx\n# dy = [-1, 1, 0, 0] # 나이트 이동 이후 델타 dy\n# dx = [0, 0, -1, 1] # 나이트 이동 이후 델타 dx\n# Q = deque()\n# Q.append((H-1, W-1, 0, 0)) # (y, x, K_cnt, move)\n# visited = [[0]*W for _ in range(H)] # 방문배열\n# K_visited = [[0]*W for _ in range(H)] # 나이트 이동 방문배열\n# visited[H-1][W-1] = 1\n# K_visited[H-1][W-1] = 1\n# tmp = int(1e9) # 가상의 최소값\n# while Q:\n# y, x, K_cnt, move = Q.popleft()\n# if y == 0 and x == 0: # 도착했을 경우\n# if move < tmp: tmp = move\n# continue\n# if visited[y][x]: continue\n# if K_cnt < K: # 나이트 이동을 할 기회가 남아있을 때\n# for i in range(8):\n# ny = y + K_dy[i]\n# nx = x + K_dx[i]\n# # 이동하려는 좌표가 범위안에 있고, 방문한 적이 없고, 장애물이 아닐 때\n# if 0<=ny<H and 0<=nx<W and not K_visited[ny][nx] and not arr[ny][nx]:\n# Q.append((ny, nx, K_cnt+1, move+1))\n# K_visited[ny][nx] = 1\n# for i in range(4):\n# ny = y + dy[i]\n# nx = x + dx[i]\n# # 이동하려는 좌표가 범위안에 있고, 방문한 적이 없고, 장애물이 아닐 때\n# if 0<=ny<H and 0<=nx<W and not visited[ny][nx] and not arr[ny][nx]:\n# Q.append((ny, nx, K_cnt, move+1))\n# visited[ny][nx] = 1\n# for v in visited: print(v)\n# print(tmp if tmp != int(1e9) else result)\n\n\n# 쾅쾅\n# from collections import deque\n\n# K = int(input())\n# W, H = map(int, input().split())\n# arr = [list(map(int, input().split())) for _ in range(H)]\n# result = -1\n# K_dy = [-2, -1, 1, 2, 2, 1, -1, -2] # Knight dy\n# K_dx = [1, 2, 2, 1, -1, -2, -2, -1] # Knight dx\n# dy = [-1, 1, 0, 0] # 나이트 이동 이후 델타 dy\n# dx = [0, 0, -1, 1] # 나이트 이동 이후 델타 dx\n# Q = deque()\n# Q.append((H-1, W-1, 0, 0)) # (y, x, K_cnt, move)\n# visited = [[0]*W for _ in range(H)] # 방문배열\n# tmp = int(1e9) # 가상의 최소값\n# while Q:\n# y, x, K_cnt, move = Q.popleft()\n# if y == 0 and x == 0: # 도착했을 경우\n# tmp = move\n# # print(K_cnt, K)\n# if K_cnt < K: tmp -= (K-K_cnt)*2\n# break\n# if visited[y][x]: continue\n# visited[y][x] = 1\n# blocked = True\n# for i in range(4):\n# ny = y + dy[i]\n# nx = x + dx[i]\n# # 이동하려는 좌표가 범위안에 있고, 방문한 적이 없고, 장애물이 아닐 때\n# if 0<=ny<H and 0<=nx<W and not visited[ny][nx] and not arr[ny][nx]:\n# Q.append((ny, nx, K_cnt, move+1))\n# blocked = False\n# for i in range(8):\n# ny = y + K_dy[i]\n# nx = x + K_dx[i]\n# if ny == 0 and nx == 0:\n# Q.append((ny, nx, K_cnt+1, move+1))\n# elif blocked and K_cnt < K:\n# # 이동하려는 좌표가 범위안에 있고, 방문한 적이 없고, 장애물이 아닐 때\n# if 0<=ny<H and 0<=nx<W and not visited[ny][nx] and not arr[ny][nx]:\n# Q.append((ny, nx, K_cnt+1, move+1))\n# # for v in visited: print(v)\n# print(tmp if tmp != int(1e9) else result)" }, { "alpha_fraction": 0.5086206793785095, "alphanum_fraction": 0.5258620977401733, "avg_line_length": 13.625, "blob_id": "11a29653a73b79585508e44040e8bc92445b4942", "content_id": "41e535c930a05a2dfc83558fb403bdc7e59ce00d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0503/줄 세기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nwhile True:\n try:\n input()\n result += 1\n except EOFError:\n break\nprint(result)" }, { "alpha_fraction": 0.34090909361839294, "alphanum_fraction": 0.3636363744735718, "avg_line_length": 19.384614944458008, "blob_id": "3120419b70924cf2f36cba3f4c93dce807e0f0cc", "content_id": "02577ddb77a5f6470844d4600fc62d90abbea5ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0701/피로도.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C, M = map(int, input().split())\nresult = 0\nF = 0 # fatigue : 피로도\nif not A > M:\n for i in range(24):\n if F+A <= M:\n F += A\n result += B\n else:\n F -= C\n if F < 0:\n F = 0\nprint(result)" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5299999713897705, "avg_line_length": 19.066667556762695, "blob_id": "6bab7dfd68e31ce14c5677d36a208b81fcfc71af", "content_id": "fff09b0ec6d02517d3145f3888e1b27445f444c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0729/나는야 포켓몬 마스터 이다솜.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nN, M = map(int, input().split())\nP = ['' for _ in range(N+1)] # pokemon\nfor n in range(1, N+1):\n P[n] = input()\nfor m in range(M):\n Q = input()\n try: # int\n print(P[int(Q)])\n except: # str\n print(P.index(Q))" }, { "alpha_fraction": 0.41903170943260193, "alphanum_fraction": 0.4407345652580261, "avg_line_length": 30.552631378173828, "blob_id": "13df03152c058a237f88e9d4b61c3b486a64c0f3", "content_id": "9eee1eaef83d9566c9767fb33e674cbd048e6991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 95, "num_lines": 38, "path": "/알고리즘/온라인저지/2022/04/0420/영역 구하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nN, M, K = map(int, input().split())\n# 모눈종이를 전부 1로 채우고, 직사각형으로 채워지는 곳을 0으로 바꿈\narr = [[1]*M for _ in range(N)]\nfor k in range(K):\n area = list(map(int, input().split()))\n for i in range(area[1], area[3]):\n for j in range(area[0], area[2]):\n arr[i][j] = 0\nresult = [] # 넓이를 담을 리스트\n# 델타이동\ndx = [0, 1, 0, -1]\ndy = [1, 0, -1, 0]\nq = deque() # BFS\ncnt = 0 # 넓이(영역)의 개수\n# 모눈종이를 돌다가\nfor n in range(N):\n for m in range(M):\n if arr[n][m]: # 분리된 영역이 있으면\n q.append((m, n)) # 출발\n square = 0 # 넓이값 초기화\n while q: # BFS\n tmp = q.popleft()\n x, y = tmp[0], tmp[1]\n if arr[y][x]:\n arr[y][x] = 0 # 방문 처리\n square += 1 # 넓이 계산\n for i in range(4): # 델타이동 하였을 때\n # 모눈종이를 벗어나지 않으며, 이웃한 분리된영역일 경우\n if (0 <= x+dx[i] < M) and (0 <= y+dy[i] < N) and arr[y+dy[i]][x+dx[i]]:\n q.append((x+dx[i], y+dy[i])) # 탐색점 추가\n result.append(square) # 영역의 넓이 리스트에 저장\n cnt += 1 # 영역 한 개 확인\nresult.sort() # 넓이 오름차순으로 정렬\n# 출력\nprint(cnt)\nprint(*result)" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.4523809552192688, "avg_line_length": 16.58333396911621, "blob_id": "b747a114050e1890192fd0efb31e1f9ea47d826d", "content_id": "ba5b2da593487a4176cb8ad4db8dfa6e1cdb2639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 43, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/03/0320/감정이입.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dec_to_bin(X):\n m = 1\n output = 0\n for x in X[::-1]:\n if int(x):\n output += m\n m *= 2\n return output\n\nA = input()\nB = input()\nprint(bin(dec_to_bin(A)*dec_to_bin(B))[2:])" }, { "alpha_fraction": 0.4429530203342438, "alphanum_fraction": 0.45413869619369507, "avg_line_length": 39.727272033691406, "blob_id": "88d0c0c0dd85a0dca3322528dbdb06d962333cd1", "content_id": "4381b38cefd92c98c9d99a4c28a9598f27eb5484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 72, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/04/0429/숫자 정사각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\narr = [list(map(int, input())) for _ in range(N)] # 숫자 배열 입력 받기\nfor i in range(N, -1, -1): # 가장 큰 정사각형부터 확인\n # 정사각형의 길이가 i+1 일 때\n for j in range(N-i):\n for k in range(M-i):\n # arr[j][k] 는 정사각형 가장 왼쪽 위\n # 보고있는 정사각형의 꼭지점 숫자들이 전부 같을 때\n if arr[j][k] == arr[j][k+i] == arr[j+i][k+i] == arr[j+i][k]:\n print((i+1)**2) # 넓이 구해서 출력\n exit() # 코드 종료" }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 18.44444465637207, "blob_id": "007fe795296c04c0818c0c1f67fef61f077cc86e", "content_id": "761140506af3dc4a9ad355329530938ea5c2522c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/03/0327/서버.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, T = map(int, input().split())\nworks = list(map(int, input().split()))\nresult = 0\nfor work in works:\n T -= work\n if T < 0:\n break\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5281456708908081, "alphanum_fraction": 0.5645695328712463, "avg_line_length": 22.269229888916016, "blob_id": "8d1828b06acc1f1d40a55e352bf246d574600fb8", "content_id": "67a7c1323419224dfcf75fad4feda8f4d97ac467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/09/0913/참외밭.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nbig = small = 1\nlines = []\nsmall_idx = []\nfor _ in range(6):\n lines.append(list(map(int, input().split())))\nresult = [[] for _ in range(4)]\nwhile lines[2][0] != lines[4][0]:\n lines.append(lines.pop(0))\n lines.append(lines.pop(0))\nfor i in range(6):\n ewsn, length = lines[i][0], lines[i][1]\n ewsn -= 1\n if result[ewsn]:\n small_idx.append(ewsn)\n result[ewsn].append(length)\nfor r in result: \n if len(r) == 1:\n big *= r[0]\ni = 0\nfor s in small_idx[::-1]:\n small *= result[s][i]\n i += 1\nprint((big-small)*K)\n\n# https://www.acmicpc.net/problem/2477" }, { "alpha_fraction": 0.5952380895614624, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 20.25, "blob_id": "1035d45dae73e20ded71863a6a431362589341ea", "content_id": "1462fcfa17324a03b5cffd4a1a39a4dda820c539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/08/0805/Bottle Return.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in list(map(int, input().split())):\n result += i*5\nprint(result)" }, { "alpha_fraction": 0.47806355357170105, "alphanum_fraction": 0.5128592848777771, "avg_line_length": 29.090909957885742, "blob_id": "1548df1aeed4c9fa748cf6948a3e6b47fdf84f2e", "content_id": "9cf1ca9977314a2ba9d115b0336e9376da7f4d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0821/직사각형 네개의 합집합의 면적 구하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n좌표평면배열을 만들어서\n직사각형들을 1로 저장하고\n저장하는 과정에서 겹치면 continue\n저장된 1들을 전부 출력\"\"\"\n\narr = [[0]*101 for _ in range(101)] # 0번째칸 추가해서 인덱스와 숫자 일치\nfor tc in range(4): # 모든 케이스는 4개의 직사각형\n x1, y1, x2, y2 = map(int, input().split()) # 좌표들\n for i in range(y1, y2): # y좌표 for문 먼저\n for j in range(x1, x2): # x좌표\n if arr[i][j]: # 값이 들어있으면\n continue # 컨티뉴\n else: # 아직 저장된 값이 없으면\n arr[i][j] = 1 # 1을 저장\n # 합집합은 or연산\n # True or로 넘어가기 때문에 값이 있으면 저장이 되지 않고\n # 합해지는 부분을 연산할 필요가 없어짐\narea = 0 # 면적\nfor ar in arr: # 저장된 배열을 돌면서\n area += sum(ar) # 각 줄을 더해서 면적에 저장\nprint(area) # 출력" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 20.75, "blob_id": "cd49df5bd2dcd5318ea22a7aa9bb6230a1c5178b", "content_id": "8cf4d7f7af75810ca162549a862456b6e976058c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 49, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/02/0209/쿠폰.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nfor n in range(N):\n print('${:.2f}'.format(float(input()) * 0.8))" }, { "alpha_fraction": 0.46979865431785583, "alphanum_fraction": 0.48322147130966187, "avg_line_length": 29, "blob_id": "655f226db7d4262ee5eefca28cd0d686b8f8d0bc", "content_id": "827de99c8d418dae8d758cd36ba071dd69398581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/09/0904/수 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n num = input()\n A, B = int(num), int(num[::-1])\n tmp = str(A+B)\n print('YES' if tmp == tmp[::-1] else 'NO')" }, { "alpha_fraction": 0.4054054021835327, "alphanum_fraction": 0.4864864945411682, "avg_line_length": 11.666666984558105, "blob_id": "df02734a1decd16cb4f930582d8b87974d1a12d5", "content_id": "c81fc81d02fea6cefb76dc897d999e948ac7dc83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/02/0213/조별과제를 하려는데 조장이 사라졌다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L = int(input()) - 1\n\nprint(L//5 + 1)" }, { "alpha_fraction": 0.47093021869659424, "alphanum_fraction": 0.4883720874786377, "avg_line_length": 13.416666984558105, "blob_id": "96f20e699840e5de2aff0ada4036ab18fb3cdd95", "content_id": "873e782408626c7225957daca916c4bc4876d0cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 20, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/05/0519/골뱅이 찍기 - 돌아간 ㄹ.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nA = '@'*N\nB = ' '*N\nS = A*3+B+A\nM = A+B+A+B+A\nE = A+B+A*3\nfor i in range(N):\n print(S)\nfor i in range(N*3):\n print(M)\nfor i in range(N):\n print(E)" }, { "alpha_fraction": 0.5115875005722046, "alphanum_fraction": 0.5428736805915833, "avg_line_length": 27.31147575378418, "blob_id": "3315b5923cc493c203477967692db618954b7837", "content_id": "5a8ce88572db0a31507e8223037fc782a7d7aba4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2338, "license_type": "no_license", "max_line_length": 84, "num_lines": 61, "path": "/알고리즘/[템플릿]/BFS/채굴.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\nfrom copy import deepcopy\n\ninput = sys.stdin.readline\n\ndef delta_move(y, x, D, mine, Q, visited):\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx]:\n if mine[ny][nx]<=D: # 공기or캘 수 있는 광물\n visited[ny][nx] = 1\n Q.append((ny, nx))\n else: # 캘 수 없는 광물\n visited[ny][nx] = 1 # 방문하였으나 캘 수 없었다\n\ndef BFS(D): # 채굴기의 성능 D\n mine = deepcopy(arr) # 광__산\n mineral = 0 # 채굴기 성능이 D일때 캘 수 있는 광물의 수 초기화\n visited = [[0]*M for _ in range(N)] # 광산 방문배열\n Q = deque()\n Q.append((0, 0)) # 공기층을 추가하였으니, 0,0부터 시작\n visited[0][0] = 1\n while Q: # 캘 수 있는 광물 체크\n y, x = Q.popleft()\n if mine[y][x]: # 광물이면\n mine[y][x] = 0 # 캐고\n mineral += 1 # 저장\n delta_move(y, x, D, mine, Q, visited)\n return mineral # 채굴기 성능이 D일때 캘 수 있는 광물 수\n\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1] # 4방향 델타이동\nN, M, K = map(int, input().rstrip().split())\nM += 2\n# 배열 테두리에 공기층 생성\narr = [[0]*M] + [[0]+list(map(int, input().rstrip().split()))+[0] for _ in range(N)]\nN += 1\n# 1<=N,M<=1000, 1<=K<=1e6\n# 채굴기의 성능을 1부터 +1하면서 전수조사 할 수 없다\n# 채굴기의 성능 이분탐색 필요\nresult = int(1e7)\nstart, end = 1, int(1e7)\nwhile start<=end:\n mid = (start+end) // 2\n mineral = BFS(mid) # 현재 채굴기의 성능으로 광물을 채굴해보자\n if mineral>=K: # K개의 광물을 충분히 채굴할 수 있음, 성능 D를 좀 더 높여도 됌\n end = mid-1\n result = min(result, mid)\n else: # 현재 성능으로는 K개의 광물을 채굴할 수 없음\n start = mid+1\nprint(result)\n\n\"\"\"\n이분탐색과 BFS의 혼합문제다\nBFS를 함수화시켜서 이분탐색안에 넣는 편이 좋다\n고난이도 문제로 오면서 \"코드 구조화\"가 중요해지는 것 같다\n탭 3번정도 단위로 끊어서 함수화 시키자\n브론즈 실버등, 간단한 문제도 함수화(구조화)연습을 많이 하자\n\"\"\"\n\n# https://www.acmicpc.net/problem/15573" }, { "alpha_fraction": 0.4810126721858978, "alphanum_fraction": 0.5316455960273743, "avg_line_length": 39, "blob_id": "722940d6e14ac6e3f2a13f9ffcd6fccede3691f9", "content_id": "47abeb54b1a36dfc85b8c3e4e2948cb9c4f189b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/03/0302/Laptop Sticker.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C, D = map(int, input().split())\nprint(1 if ((A>C+1) and (B>D+1)) else 0)" }, { "alpha_fraction": 0.4702194333076477, "alphanum_fraction": 0.5023511052131653, "avg_line_length": 23.09433937072754, "blob_id": "e78c9ad5ad6bb1a454aa717ee5e9caa52bf7d28e", "content_id": "26e07503338452c10745e817f95823ba2cffce06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1276, "license_type": "no_license", "max_line_length": 52, "num_lines": 53, "path": "/알고리즘/온라인저지/2022/09/0930/빙고.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ncross = [\n (0, 0), (1, 1), (3, 3), (4, 4),\n (2, 2), \n (4, 0), (3, 1), (1, 3), (0, 4),\n ]\n\ndef check(call):\n bingo = 0\n y, x = arr_dict[call]\n arr[y][x] = 1\n for i in range(5): # ga-ro\n is_bingo = True\n for j in range(5):\n if not arr[i][j]: is_bingo = False\n if is_bingo: bingo += 1\n\n for j in range(5): # se-ro\n is_bingo = True\n for i in range(5):\n if not arr[i][j]: is_bingo = False\n if is_bingo: bingo += 1\n\n is_bingo = True # dae-gak-seon 1\n for c in cross[:5]:\n i, j = c\n if not arr[i][j]: is_bingo = False\n if is_bingo: bingo += 1\n\n is_bingo = True # dae-gak-seon 2\n for c in cross[4:]:\n i, j = c\n if not arr[i][j]: is_bingo = False\n if is_bingo: bingo += 1\n if bingo>=3: return True\n\narr_dict = dict()\nfor i in range(5):\n nums = list(map(int, input().rstrip().split()))\n for j in range(5): arr_dict[nums[j]] = (i, j)\nresult = 0\narr = [[0]*5 for _ in range(5)]\nflag = False\nfor i in range(5):\n calls = list(map(int, input().rstrip().split()))\n if flag: continue\n for call in calls:\n if flag: continue\n result += 1\n if check(call): flag = True\nprint(result)" }, { "alpha_fraction": 0.5492957830429077, "alphanum_fraction": 0.6056337952613831, "avg_line_length": 22.33333396911621, "blob_id": "b00e7a89b9c487a1b4dc5afdc2571214cfec6529", "content_id": "6d0ceb89c12bd97606505acc02c6881821e78187", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 54, "num_lines": 3, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/22차시 2. 자료구조 – 리스트, 튜플 - 연습문제 24.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = [[[0]*4 for i in range(3)] for j in range(2)]\n\nprint(result)\n\n" }, { "alpha_fraction": 0.37569060921669006, "alphanum_fraction": 0.469613254070282, "avg_line_length": 17.149999618530273, "blob_id": "1a8a1ed3c30406a2fd164b29d1a178010bb59862", "content_id": "fb74fce9ed63ced4256141431fca86fbabbe1d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 37, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/09/0911/수학은 비대면강의입니다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from copy import deepcopy\n\nnum = list(map(int, input().split()))\nX, Y = deepcopy(num), deepcopy(num)\nx, y = 0, 0\nX[0] *= num[4]\nX[1] *= num[4]\nX[2] *= num[4]\nX[3] *= num[1]\nX[4] *= num[1]\nX[5] *= num[1]\nx = (X[2]-X[5])//(X[0]-X[3])\nY[0] *= num[3]\nY[1] *= num[3]\nY[2] *= num[3]\nY[3] *= num[0]\nY[4] *= num[0]\nY[5] *= num[0]\ny = (Y[2]-Y[5])//(Y[1]-Y[4])\nprint(x, y)" }, { "alpha_fraction": 0.41355931758880615, "alphanum_fraction": 0.4440678060054779, "avg_line_length": 21.769229888916016, "blob_id": "eb0488ba01812142344f4cb0b1eae7d4534a51c6", "content_id": "b19ebe4bb903a740103e7a0c1b05d9e0cea54ed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 44, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/06/0623/소가 길을 건너간 이유 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\ncow = ['Zero'] + [0]*10 # 0:미발견, 1:왼쪽, 2:오른쪽\nresult = 0\nfor n in range(N):\n c, lr = map(int, input().split())\n lr += 1\n if cow[c]:\n if cow[c] != lr: # 길을 건넜음\n result += 1\n cow[c] = lr\n else: # not cow[c]\n cow[c] = lr\nprint(result)" }, { "alpha_fraction": 0.45856353640556335, "alphanum_fraction": 0.4806629717350006, "avg_line_length": 10.375, "blob_id": "0f0ed25d7e6977676013aaee9f0d3fd9a00b1623", "content_id": "ff83e0eecfad42e6a18167e546a6fea6be381b55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0205/점수계산.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nanswers = list(map(int, input().split()))\n\np = 0\nresult = 0\n\nfor a in answers:\n if a:\n p += 1\n else:\n p = 0\n \n result += p\n\nprint(result)" }, { "alpha_fraction": 0.35537189245224, "alphanum_fraction": 0.39669421315193176, "avg_line_length": 19.33333396911621, "blob_id": "b1e8c6103b243ac63f4f78d07b777fc44302399c", "content_id": "5d348fe264c832816e5bea29e3a2e52dd8f1e42f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/11/1110/지각.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n n = int(input())\n t = 0\n while (t+1) + (t+1)**2 <= n:\n t += 1\n print(t)" }, { "alpha_fraction": 0.4692671298980713, "alphanum_fraction": 0.49527186155319214, "avg_line_length": 29.25, "blob_id": "a4bbcac589b85097d62bce8f5a324f69e4e69670", "content_id": "d7a70242157d9e159a42d317d23a7373c1bdedbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1090, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/알고리즘/온라인저지/2021/07/0731/설탕과자 뽑기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "h, w = map(int, input().split()) # 뽑기 상자의 가로 세로\nn = int(input()) # 막대 개수\npoints = [] # 막대길이, 방향, 좌표 리스트\nfor i in range(n):\n l, d, x, y = map(int, input().split()) \n # 막대 개수만큼 for문 돌리면서 정보 입력\n points.append([l, d, x, y])\n # 막대 길이, 막대 놓는 방향(0:가로, 1:세로), 막대 좌표x, 막대 좌표y(x, y = 1, 1 에서 시작)\nboard = [] # 빈 보드\nfor i in range(h):# 세로줄 만들면서\n board.append([])\n for j in range(w): # 가로에 전부 0 채우기\n board[i].append(0)\n\nfor point in points:\n if point[1] == 0: # 가로일 때\n for i in range(point[0]): # 막대 길이만큼\n board[point[2]-1][point[3]-1 + i] = 1\n # 오른쪽으로 1 넣어주기\n else: # 세로일 때\n for j in range(point[0]): # 막대 길이만큼\n board[point[2]-1 + j][point[3]-1] = 1\n # 아래로 1 넣어주기\n\nfor i in range(h):\n for j in range(w): \n print(board[i][j], end=' ')\n print() # 출력문" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.699999988079071, "avg_line_length": 30, "blob_id": "1b191339e0a6ab6307ef0fd7de6ede1897f98750", "content_id": "89911ee092dc7fbac2a5cf579b486f8357de9fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/02/0216/큰 수(BIG).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print((int(input()))%20000303)" }, { "alpha_fraction": 0.5206185579299927, "alphanum_fraction": 0.5979381203651428, "avg_line_length": 26.85714340209961, "blob_id": "08c5b151b2da07dea2b0219a1c55534d7b779543", "content_id": "8c2ed12f30a4044d2582e8d23fc5380ea938cd0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/08/0816/remove pop.py.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "my_list = [1, 5, 4, 2, 5, 2, 3, 8, 9, 4, 2, 3, 2, 1]\nprint(len(my_list))\nprint(my_list)\n# my_list.pop() # 해당 인덱스의 요소를 제거\nmy_list.remove(5) # 가장 앞에 있는 해당 요소를 제거\nprint(len(my_list))\nprint(my_list)" }, { "alpha_fraction": 0.5315487384796143, "alphanum_fraction": 0.5353728532791138, "avg_line_length": 23.952381134033203, "blob_id": "562caf35ede0d15c1aea12a59ba5bdcb2ad10a3d", "content_id": "10b4c0ed5edd5e7bff322316a9354b8c329443bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/07/0731/정수 1개 입력받아 분류하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = int(input())\ndef odd_even(n): # True: 홀수, False: 짝수\n if n % 2 == True:\n return True\n else:\n return False\n\ndef plus_minus(n): # True: 양수, False: 짝수\n if n >= 0:\n return True\n else:\n return False\n\nif odd_even(a) == False and plus_minus(a) == False:\n print(\"A\") # A: - and even\nelif plus_minus(a) == False and odd_even(a) == True:\n print(\"B\") # B: - and odd\nelif plus_minus(a) == True and odd_even(a) == False:\n print(\"C\") # C: + and even\nelse:\n print(\"D\") # D: + and odd" }, { "alpha_fraction": 0.4942166209220886, "alphanum_fraction": 0.5373291373252869, "avg_line_length": 23.41025733947754, "blob_id": "a644123420792555f4471a34305c9b9e83aaf94b", "content_id": "a352b4e9c6d742bfc81c6dbe1defc867c9e76ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1293, "license_type": "no_license", "max_line_length": 63, "num_lines": 39, "path": "/알고리즘/온라인저지/2021/10/1015/숨바꼭질.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque # deque의 메서드인 popleft()를 사용하기 위함\n\nN, M = map(int, input().split())\n\nresult = 0\n\nq = deque()\nq.append((N, result)) # N부터 출발합니다\n\ncheck = set() # +1 -1 *2 를 진행하면서 중복되는 숫자가 발견될 경우 \n# q에 추가하는 것을 막기 위해 중복검사를 해주기 위한 set()\n\nwhile q:\n if not N and not M or N == M: # 수빈이가 동생과 같은 위치에 있을 때\n break\n \n tmp = q.popleft() # BFS\n \n a = tmp[0] + 1\n b = tmp[0] - 1\n c = tmp[0] * 2\n cnt = tmp[1] + 1 # 수빈이가 이동하였고 1초가 지났습니다\n \n if M in (a, b, c): # 이동중 수빈이가 동생을 발견하였을 때\n result = cnt # 그 때의 시간을 저장\n break\n\n # 동생은 0부터 100000 사이에 있습니다, 범위를 벗어나면 동생을 찾을 수 없습니다\n if a not in check and 0<=a<=100000: # 간 적이 없고, 범위를 벗어나지 않으면\n q.append((a, cnt)) # 다음 가볼 곳에 추가\n check.add(a) # 가본 곳에 추가\n if b not in check and 0<=b<=100000:\n q.append((b, cnt))\n check.add(b) \n if c not in check and 0<=c<=100000:\n q.append((c, cnt))\n check.add(c)\n\nprint(result)" }, { "alpha_fraction": 0.4542483687400818, "alphanum_fraction": 0.5065359473228455, "avg_line_length": 26.909090042114258, "blob_id": "3ba45e575dfb3f2645c083b4403e3d654bcbacd8", "content_id": "3fcb400911d47399b69c278136fa492d8f33bf5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/11/1102/Doubles.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n nums = list(map(int, input().split()))\n if nums[0] == -1: break\n result = 0\n arr = [0]*101\n for num in nums[:len(nums)-1]:\n if not arr[num]: arr[num] = 1\n for num in nums[:len(nums)-1]:\n if num*2 <= 100 and arr[num*2]: \n result += 1\n print(result)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5246913433074951, "avg_line_length": 15.100000381469727, "blob_id": "2b538b140f825cf32719241484a43d30e31a3e90", "content_id": "5be8026accd9a43e3098fe627e359643ba3096f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 36, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0205/지능형 기차 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "maxx = 0\nin_train = 0\n\nfor _ in range(10):\n l, b = map(int, input().split())\n in_train += b-l\n if in_train > maxx:\n maxx = in_train\n\nprint(maxx)\n\n" }, { "alpha_fraction": 0.4041095972061157, "alphanum_fraction": 0.48630136251449585, "avg_line_length": 20, "blob_id": "50d4ac02cc332704c25d1aa06a073b20f27b5c2b", "content_id": "bdb820d257d11dac78828ee1181d101da13561e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/05/0518/완전 제곱수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nfor b in range(1, 501):\n for a in range(1, 501):\n if a**2 == b**2 + N:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5081967115402222, "alphanum_fraction": 0.5081967115402222, "avg_line_length": 19.66666603088379, "blob_id": "f34121bf32314ea2c691d946ab37b107acece598", "content_id": "6e7a0bec8a3e8d6e26254a0e33eae04501c1a408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/04/0406/Copier.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n N = input()\n print(N, N)" }, { "alpha_fraction": 0.33282673358917236, "alphanum_fraction": 0.36018237471580505, "avg_line_length": 18.969696044921875, "blob_id": "b0dc75955d7bd099e235e7653fa42fb53eaec471", "content_id": "1ec072252516183f2eb0ed0e7eb076c1ee057952", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 59, "num_lines": 33, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/8. 파이썬 SW문제해결 기본 Tree/8차시 8일차 - 이진 힙.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "TC = int(input())\n\nfor tc in range(1, TC+1):\n N = int(input())\n nodes = list(map(int, input().split()))\n arr = [0]\n\n for n in nodes:\n arr.append(n)\n\n for i in range(1, N//2+1):\n try:\n if arr[i] > arr[i*2]:\n arr[i], arr[i*2] = arr[i*2], arr[i] \n\n if arr[i] > arr[i*2+1]:\n arr[i], arr[i*2+1] = arr[i*2+1], arr[i]\n \n except:\n pass\n\n n = len(arr)\n result = 0\n \n while True:\n N //= 2\n\n result += arr[N]\n\n if N == 1:\n break\n \n print('#{} {}'.format(tc, result))" }, { "alpha_fraction": 0.5388127565383911, "alphanum_fraction": 0.5616438388824463, "avg_line_length": 14.714285850524902, "blob_id": "5f27c780f1c7e72917adadedae4b0b3206130e92", "content_id": "f0ad61fb19f7b422e0b3af84c118fc1257e092e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 34, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/09/0927/3의 배수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nX = input().rstrip()\nresult = 0\nwhile len(X)>1:\n tmp = 0\n for x in X:\n tmp += int(x)\n X = str(tmp)\n result += 1\nprint(result)\nprint('NO' if int(X)%3 else 'YES')" }, { "alpha_fraction": 0.42032331228256226, "alphanum_fraction": 0.46766743063926697, "avg_line_length": 31.11111068725586, "blob_id": "58e661d2ab7df3998a889c99923c8ea46357e35b", "content_id": "7c7c9f56b5c9486426b97dacf694bfed51c26506", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "no_license", "max_line_length": 64, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/09/0913/정사각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n dots = [tuple(map(int, input().split())) for __ in range(4)]\n x1, y1 = dots[0][0], dots[0][1]\n distance_pows = []\n for dot in dots[1:]:\n x2, y2 = dot[0], dot[1]\n distance_pow = abs(x1-x2)**2 + abs(y1-y2)**2\n distance_pows.append(distance_pow)\n flag = 1\n distance_pows.sort()\n if distance_pows[0] != distance_pows[1]:\n print(0)\n else:\n for self in dots:\n if not flag: break\n x1, y1 = self[0], self[1]\n tmp = []\n for other in dots:\n if self != other:\n x2, y2 = other[0], other[1]\n pow = abs(x1-x2)**2 + abs(y1-y2)**2\n tmp.append(pow)\n tmp.sort()\n if tmp != distance_pows: flag = 0\n print(flag)\n\n# https://www.acmicpc.net/problem/1485" }, { "alpha_fraction": 0.4154929518699646, "alphanum_fraction": 0.44366195797920227, "avg_line_length": 19.314285278320312, "blob_id": "27390bc83979c9a27536a33599189f70a718ad67", "content_id": "d99a00e3f01bb74f4ea12d47da0b78786d1929a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "no_license", "max_line_length": 49, "num_lines": 35, "path": "/알고리즘/온라인저지/2021/08/0830/DFS와 BFS.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef dfs(v):\n visited[v] = 1\n print(v, end=' ')\n for w in range(1, N+1):\n if G[v][w] == 1 and not visited[w]:\n dfs(w)\n\ndef bfs(v, n):\n q = []\n visited = [0] * (N+1)\n q.append(v)\n visited[v] = 1\n while q:\n t = q.pop(0)\n print(t, end=' ')\n for i in range(1, N+1):\n if G[t][i] == 1 and not visited[i]:\n q.append(i)\n visited[i] = visited[t] + 1\n\nN, M, V = map(int, sys.stdin.readline().split())\n\nG = [[0] * (N+1) for _ in range(N+1)]\nfor _ in range(M):\n u, v = map(int, sys.stdin.readline().split())\n G[u][v] = G[v][u] = 1\n\nvisited = [0] * (N+1)\ndfs(V)\nprint()\n\nvisited = [0] * (N+1)\nbfs(V, N)" }, { "alpha_fraction": 0.4791666567325592, "alphanum_fraction": 0.4962121248245239, "avg_line_length": 20.571428298950195, "blob_id": "95990ebe98345f6d0b1f318b722434e733197b1b", "content_id": "22c16e1d9a3285f162073566de95c0e7ede0a7b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 46, "num_lines": 49, "path": "/알고리즘/온라인저지/2022/08/0817/탑.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nT = list(map(int, input().split())) # tower\nS, result = [], [0] * N # stack\nfor i in range(N):\n t = T[i]\n while S and T[S[-1]] < t: S.pop()\n if S: result[i] = S[-1] + 1\n S.append(i)\nprint(*result)\n\n\"\"\"\n스택에 들어있는, 현재 탑보다 낮은 탑의 인덱스를 전부 제거한다\n스택이 남아있을 때, stack[-1]의 인덱스는\n앞에서 등장했던 탑들 중 현재 탑 다음으로 높은 탑이 되고\n레이저 신호는 여기서 수신될 것이다\n현재 탑의 신호 수신탑에, stack[-1]+1번째 탑을 저장한다\n그리고 해당 탑의 인덱스를 스택에 추가한다\n\n늘 코드의 시작부터 적는 습관이 있었는데\n위의 코드는 진행 중간만 깔끔하게 적혀있다\n위 코드처럼 짤 수 있도록 더 열심히 해야겠다... ㅜㅜ\n\"\"\"\n\n# 쾅\n# import sys\n\n# input = sys.stdin.readline\n\n# N = int(input())\n# T = list(map(int, input().split())) # tower\n# S = [] # stack\n# result = [0] * N\n# for i in range(N-1, -1, -1): # 진행은 우->좌 가 맞다\n# t = T[i]\n# if not S: \n# S.append((t, i))\n# else:\n# S.append((t, i))\n# for j in range(len(S)-1, -1, -1):\n# if j < len(S)-1:\n# if t > S[j][0]:\n# result[S[j][1]] = i+1\n# S.pop()\n# else: pass\n# print(*result)" }, { "alpha_fraction": 0.4103773534297943, "alphanum_fraction": 0.4245283007621765, "avg_line_length": 20.299999237060547, "blob_id": "0f62e005527198dc37b8127eecec4f274dc7e27d", "content_id": "4b8951832eba7d583131baab381e4dea27cf8452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/04/0407/Can you add this.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n result = 0\n tmp = input().split()\n for i in tmp:\n if i[0] == '-':\n result -= int(i[1:])\n else:\n result += int(i)\n print(result)" }, { "alpha_fraction": 0.5512048006057739, "alphanum_fraction": 0.5783132314682007, "avg_line_length": 22.785715103149414, "blob_id": "ba05fb838517cf92c1090ebe4ae7d0fc287cf2fd", "content_id": "208ca4cf273e67676fdc3da4ec23dd114b2c95f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/10/1002/경로 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN = int(input().rstrip())\narr = [list(map(int, input().rstrip().split())) for _ in [0]*N]\nfor k in range(N):\n for a in range(N):\n for b in range(N):\n if arr[a][k] and arr[k][b]: arr[a][b] = 1\nfor a in arr: print(*a)\n\n# https://www.acmicpc.net/problem/11403" }, { "alpha_fraction": 0.5639810562133789, "alphanum_fraction": 0.6113743782043457, "avg_line_length": 22.55555534362793, "blob_id": "7482ad47eb32b41d957f396f56b49f4830765523", "content_id": "d290b15b864464a7011c6c26727e06ad3a5a249c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 38, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/07/0731/수 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr1 = set(map(int, input().split()))\nM = int(input())\narr2 = list(map(int, input().split()))\nfor a in arr2:\n if a in arr1: print(1)\n else: print(0)\n\n# https://www.acmicpc.net/problem/1920" }, { "alpha_fraction": 0.4373672902584076, "alphanum_fraction": 0.46284499764442444, "avg_line_length": 22.600000381469727, "blob_id": "66f54d43505513792fb478482d5f158c42064dd5", "content_id": "022ba88eddb71522afe5587380cc80cd3f33a9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 54, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/09/0927/별 찍기 - 23.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nmid = 1+2*(N-2)\nresult = ['*'*N+' '*mid+'*'*N]\nstart = 0\nsize = N-2\nfor i in range(2, N+1):\n start, mid = start+1, mid-2\n tmp = ' '*start\n if mid>0: \n tmp += '*' + ' '*size + '*'\n tmp += ' '*mid\n tmp += '*' + ' '*size + '*'\n else: tmp += '*' + ' '*size + '*' + ' '*size + '*'\n result.append(tmp)\nfor r in result[:len(result)-1]: print(r)\nfor r in result[::-1]: print(r)" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 35, "blob_id": "47ddbcbb3a1f19f158da307ddb8a569cb7c7bc73", "content_id": "a52722a4efcfeacd29591cee33b7287245153837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 35, "num_lines": 1, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/12차시 2. 자료구조 – 리스트, 튜플 - 연습문제 13.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print(sum(list(map(int, input()))))" }, { "alpha_fraction": 0.5011037588119507, "alphanum_fraction": 0.5253863334655762, "avg_line_length": 18.7391300201416, "blob_id": "dfe84252545d30e4be97bce1a5b4594236eae14e", "content_id": "959135c92441edf416979513b33a792bae6656d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "no_license", "max_line_length": 51, "num_lines": 23, "path": "/알고리즘/온라인저지/2021/08/0829/수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nN = int(sys.stdin.readline())\nnums = list(map(int, sys.stdin.readline().split()))\ncnt = 1\nresult = 1\n# 증가할 때 따로, 감소할 때 따로, 두번 돌려주는 코드\nfor i in range(len(nums)-1):\n if nums[i] >= nums[i+1]:\n cnt += 1\n else:\n cnt = 1\n if result < cnt:\n result = cnt\n\ncnt = 1\nfor i in range(len(nums)-1):\n if nums[i] <= nums[i+1]:\n cnt += 1\n else:\n cnt = 1\n if result < cnt:\n result = cnt\nprint(result)" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 18.85714340209961, "blob_id": "8fa4bce5a1f44e31615864402136bcbf041db539", "content_id": "76bd35bc2e590b97b4b1a7bcbc24b135358f0e6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/08/0822/수 정렬하기 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nnumbers = []\nfor tc in range(T):\n numbers.append(int(input()))\nnumbers.sort()\nfor number in numbers:\n print(number)" }, { "alpha_fraction": 0.5740740895271301, "alphanum_fraction": 0.5820105671882629, "avg_line_length": 18.947368621826172, "blob_id": "7191e446c4a4164453e9763a9835c01492b68a66", "content_id": "77be887efdafc5c47dee91640b63b85da83f70ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 35, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/03/0316/카드 뽑기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from pprint import pprint\n\nN, M, K = map(int, input().split())\narr = [[] for _ in range(N)]\nfor m in range(M):\n arr[m].append(True)\nfor m in range(M, N):\n arr[m].append(False)\n# print(arr)\nfor k in range(K):\n arr[k].append(True)\nfor k in range(K, N):\n arr[k].append(False)\n# pprint(arr)\nresult = N\nfor a in arr:\n if a[0] != a[1]:\n result -= 1\nprint(result)" }, { "alpha_fraction": 0.4307228922843933, "alphanum_fraction": 0.46084338426589966, "avg_line_length": 14.7619047164917, "blob_id": "a7c24bdfe850d476027dc9598a0466c568c88f8d", "content_id": "2da38b0e8e2e448feb24250b87110f15a12810e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/8. 파이썬 SW문제해결 기본 Tree/7차시 8일차 - 이진탐색.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def makeTree(n):\n global count\n\n if n <= N:\n makeTree(n*2)\n\n tree[n] = count\n count += 1\n\n makeTree(n*2 + 1)\n\nTC = int(input())\nfor tc in range(TC):\n N = int(input())\n \n tree = [0 for _ in range(N+1)]\n count = 1\n\n makeTree(1)\n\n print('#{} {} {}'.format(tc, tree[1], tree[N//2]))\n " }, { "alpha_fraction": 0.4254473149776459, "alphanum_fraction": 0.46222662925720215, "avg_line_length": 29.515151977539062, "blob_id": "e6e8f2e5753be2a2861388f3bd3c0cb86e68a24c", "content_id": "a4305429cc023cd0371e249bf46a018c2a09e98b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1038, "license_type": "no_license", "max_line_length": 77, "num_lines": 33, "path": "/알고리즘/[템플릿]/BFS/토마토 3차원.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\n# 상 하 전 후 좌 우 델타이동\ndz, dy, dx = [-1, 1, 0, 0, 0, 0], [0, 0, -1, 1, 0, 0], [0, 0, 0, 0, -1, 1]\nM, N, H = map(int, input().split())\narr = [[list(map(int, input().split())) for _ in range(N)] for _ in range(H)]\nQ = deque()\nfor k in range(H):\n for i in range(N):\n for j in range(M):\n if arr[k][i][j] == 1:\n Q.append((k, i, j, 0)) # z, y, x, day\n arr[k][i][j] = 0\nresult = 0\nwhile Q:\n z, y, x, day = Q.popleft()\n if arr[z][y][x] == 1: continue\n arr[z][y][x] = 1\n result = max(result, day)\n for i in range(6): # 상 하 전 후 좌 우\n nz, ny, nx = z+dz[i], y+dy[i], x+dx[i]\n if 0<=nz<H and 0<=ny<N and 0<=nx<M and arr[nz][ny][nx] == 0:\n Q.append((nz, ny, nx, day+1))\nfor k in range(H):\n if result == -1: break\n for i in range(N):\n for j in range(M):\n if arr[k][i][j] == 0:\n result = -1\n break\nprint(result)\n\n# https://www.acmicpc.net/problem/7569" }, { "alpha_fraction": 0.48466256260871887, "alphanum_fraction": 0.5030674934387207, "avg_line_length": 16.210525512695312, "blob_id": "3df82339556a451a01526cddb98ceb5fdd713aeb", "content_id": "7fad2c804bae41a95c125f75fe082417fd5d2fac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/02/0226/팰린드롬 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nL, S_rev = len(S), S[::-1]\nresult = 0\n\nfor i in range(L, 0, -1):\n if S_rev[:i] == S_rev[:i][::-1]:\n result = i+(L-i)*2\n break\n\nprint(result)\n\n\"\"\"\nex) abczzzz -> abc + zzzz + cba\n\nL = 문자열 전체의 길이(abczzzz)\ni = 뒤에서 가장 긴 팰린드롬의 길이(zzzz)\nL-i = 문자열 전체에서 가장 긴 팰린드롬을 뺀 길이(abc)\nresult = (L-i) + i + (L-i)\n\"\"\"" }, { "alpha_fraction": 0.4526315927505493, "alphanum_fraction": 0.5052631497383118, "avg_line_length": 12.714285850524902, "blob_id": "a42f0f25848b5ffe281fd4d60629c87b6bf44486", "content_id": "097e778b6c597ca330ab185f22eda60b7b0dfcc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 22, "num_lines": 7, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/11차시 2. 자료구조 – 리스트, 튜플 - 연습문제 12.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "s = set()\n\nfor i in range(1, 21):\n if i%3 or i%5:\n s.add(i*i)\n\nprint(sorted(list(s)))" }, { "alpha_fraction": 0.420634925365448, "alphanum_fraction": 0.43386244773864746, "avg_line_length": 26.071428298950195, "blob_id": "3907629e6c8256a07e4480e33b85f861816a1b7a", "content_id": "760aa93cbb579555c38c9f7a66dff07f11c161ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/07/0727/김인천씨의 식료품가게 (Large).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(1, T+1):\n N = int(input())\n A = list(map(int, input().split())) # all prices\n result = []\n for i in range(N):\n tmp = A.pop(0)\n O = (tmp//3) * 4 # original\n for j in range(len(A)):\n if A[j] == O:\n A.pop(j)\n break\n result.append(tmp)\n print(f'Case #{t}:', *result)" }, { "alpha_fraction": 0.3589743673801422, "alphanum_fraction": 0.43589743971824646, "avg_line_length": 8.375, "blob_id": "b3a1762d1b90c8f9c9ca34a680dc6bbf5720f75c", "content_id": "ceaa32df0a36eeac6f6e5900f31e993ba8df0f7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/01/0131/알파벳 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dp = [0] * 26\n\nS = input()\n\nfor s in S:\n dp[ord(s)-97] += 1\n\nprint(*dp) " }, { "alpha_fraction": 0.550939679145813, "alphanum_fraction": 0.5934718251228333, "avg_line_length": 21, "blob_id": "e50bc5527d0bf28dce2f3a59022703f7ffc05d94", "content_id": "b1b60999feb3aae2a58c5d0586569a69918233ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 52, "num_lines": 46, "path": "/알고리즘/[템플릿]/분할정복/피보나치 수 6.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nmod = int(1e9)+7\n\ndef multiply(matrix1, matrix2): # 행렬 곱셈\n output = []\n for i in range(2):\n output.append([])\n for j in range(2):\n tmp = 0\n for k in range(2):\n tmp += matrix1[i][k] * matrix2[k][j]\n output[i].append(tmp%mod)\n return output\n\ndef power(matrix, n): # 거듭제곱 분할정복\n if n == 1: return matrix\n tmp = power(matrix, n//2)\n if n%2 == 0:\n return multiply(tmp, tmp)\n else:\n return multiply(multiply(tmp, tmp), matrix)\n\nN = int(input().rstrip())\nmatrix = [[1, 1], [1, 0]] # 피보나치 행렬 거듭제곱 일반항\nprint(power(matrix, N)[0][1])\n\n\"\"\"\n피보나치 일반항을, 행렬의 거듭제곱으로 변환할 줄 알아야 하고\n행렬의 거듭제곱을 할 줄 알아야 하고\n거듭제곱을 분할정복으로 할 줄 알아야 풀 수 있는 문제이다\n2022-10-08기준, 가장 빠른 속도로 피보나치를 구할 수 있는 코드이다\n\n<참고한 링크>\n참고한 코드 원문\nhttps://ca.ramel.be/50\n\n백준 피보나치 블로그글\nhttps://www.acmicpc.net/blog/view/28\n\n피보나치가 [1, 1],[1, 0]의 거듭제곱이 되는 과정\nhttps://www.youtube.com/watch?v=uX2IsIykLJc\n\"\"\"\n\n# https://www.acmicpc.net/problem/11444" }, { "alpha_fraction": 0.5959885120391846, "alphanum_fraction": 0.6060171723365784, "avg_line_length": 18.97142791748047, "blob_id": "c37d208ed72e37ae26204315abe9bf4d1a3f3c32", "content_id": "0b8c44931d1a0bc860f62cdefb003c2d6af29a2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 54, "num_lines": 35, "path": "/알고리즘/온라인저지/2022/10/1004/N과 M (5).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef solve(depth, N, M):\n if depth == M:\n print(*result); return\n for i in range(N):\n if not visited[i]:\n visited[i] = True; result.append(arr[i])\n solve(depth+1, N, M)\n result.pop(); visited[i] = False\n\nN, M = map(int, input().rstrip().split())\narr = sorted(list(map(int, input().rstrip().split())))\nvisited = [False] * N\nresult = []\nsolve(0, N, M)\n\n\"\"\"\nBFS 돌 때 \n방문하지 않았으면\n방문처리, Q에 추가 해주듯이\nDFS도 똑같이\n함수 자체 메모리를 Q처럼 활용하면서\n방문하지 않았으면\n방문처리, 다음 탐색\n나오면서 방문 풀어주고, 탐색점 취소해주고\n그러니까\nBFS DFS 차이는\n다른건 다 같고, 방문지점을 당장갈거냐(DFS), 나중에 갈거냐(BFS) 인 것 같다\n진짜 근데 DFS는 구현이 왤케 감이 안잡히냐 어렵네\n\"\"\"\n\n# https://www.acmicpc.net/problem/15654" }, { "alpha_fraction": 0.5014662742614746, "alphanum_fraction": 0.5190615653991699, "avg_line_length": 19.117647171020508, "blob_id": "180fbbcc7e64da3a44ba8f43929d24b31d2eaf5c", "content_id": "9f6247bd5668bb3612ae42497f942fd7ef6fa622", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 33, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/08/0825/요세푸스 문제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\nq = list(range(1, N+1))\npointer = K-1\nresult = []\nwhile q:\n result.append(q.pop(pointer))\n pointer += K - 1\n if not len(q):\n break\n else:\n pointer %= len(q)\n \nprint('<', end='')\nprint(result[0], end='')\nfor i in range(1, len(result)):\n print(',', result[i], end='')\nprint('>')" }, { "alpha_fraction": 0.4923076927661896, "alphanum_fraction": 0.5153846144676208, "avg_line_length": 20.83333396911621, "blob_id": "55407429dd335b586d619e417195bd491252b097", "content_id": "1c1f91116f638f71cfc0e068b5c1df45ef3d5675", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/05/0510/POT.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in range(int(input())):\n N = input()\n N, P = int(N[:len(N)-1]), int(N[-1])\n result += N**P\nprint(result)" }, { "alpha_fraction": 0.5361445546150208, "alphanum_fraction": 0.5481927990913391, "avg_line_length": 18.58823585510254, "blob_id": "fae8757ca2261ac20a2ef53e8af0bbab84ee530a", "content_id": "cd4fb7b3c4b22ecfb736d011742825a2fd22e444", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/06/0618/뚊.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nchar = []\nis_same = []\nfor n in range(N):\n char.append(input())\nfor n in range(N):\n is_same.append(input())\nresult = 'Not Eyfa'\ntmp = []\nfor i in range(N):\n tmp2 = ''\n for c in char[i]:\n tmp2 = tmp2 + c + c\n tmp.append(tmp2)\nif tmp == is_same:\n result = 'Eyfa'\nprint(result)" }, { "alpha_fraction": 0.5392376780509949, "alphanum_fraction": 0.5526905655860901, "avg_line_length": 27.80645179748535, "blob_id": "28921cb3d9f496e8d7c132824af343c27f7efffe", "content_id": "638d288a2be34894e7117f60705d9eee0a6fd805", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1094, "license_type": "no_license", "max_line_length": 62, "num_lines": 31, "path": "/알고리즘/온라인저지/2022/08/0828/특정 거리의 도시 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys, heapq\n\ninput = sys.stdin.readline\nINF = int(1e9) # 가상의 최대 거리\n\ndef dijkstra(start):\n distance[start] = 0\n heapq.heappush(Q, (0, start))\n while Q:\n dist, now = heapq.heappop(Q)\n for next, weight in data[now]: # weight = 1, 출력용으로만 사용\n cost = dist + 1\n if cost < distance[next]:\n distance[next] = cost\n heapq.heappush(Q, (cost, next))\n\nV, E, K, X = map(int, input().split())\ndata = [[] for _ in range(V+1)]\nfor _ in range(E):\n A, B = map(int, input().split())\n data[A].append((B, 1)) # 가중치 = 1\ndistance = [INF] * (V+1) # 가상의 최대 거리 배열\nQ = []\ndijkstra(X) # X부터 시작하여 다른 도시까지 이동하는 최소 거리 배열 값 구하기\nflag = False # K거리인 도시가 있는지 체크하는 플래그\nfor i in range(1, len(distance)):\n if distance[i] == K:\n print(i) # 도시 번호 출력\n flag = True # 최소 거리가 K에 해당하는 도시가 있음\nif not flag: # 해당하는 도시가 없을 경우\n print(-1)" }, { "alpha_fraction": 0.394812673330307, "alphanum_fraction": 0.5100864768028259, "avg_line_length": 17.3157901763916, "blob_id": "a62951e5e3dd69903716d6251c5e0b6b9e2d30bc", "content_id": "8a549b34b13a94dd4da11f29403edec5d02b3cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/07/0716/트럭 주차.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "fees = list(map(int, input().split()))\nresult = 0\nparking = [0] * 101\nfor i in range(3):\n IN, OUT = map(int, input().split())\n for j in range(IN, OUT):\n parking[j] += 1\nfor p in parking:\n result += p*fees[p-1]\nprint(result)\n\n\"\"\"\n1 2 3 4 5 \n 3 4 \n 2 3 4 5 6 7 \n5 6 3 3 6 5 5\n17 20 36\n\"\"\"\n# print(sum([5, 6, 6, 6, 5, 5]))" }, { "alpha_fraction": 0.5721153616905212, "alphanum_fraction": 0.5849359035491943, "avg_line_length": 33.66666793823242, "blob_id": "11d64cb999ecb91b9b6eff90bb52d0a2ded8be87", "content_id": "631c8b41ef131f289cc8f9a6fed02af85dd14ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/알고리즘/온라인저지/2021/09/0928/소트인사이드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\na = [] # N을 쪼개어 담을 리스트\nfor n in N: # 입력받은 숫자는 아직 str문자형\n a.append(int(n)) # int로 형변환 하여 담기\n\na_sorted = [] # a에 담긴 숫자들을 정렬하여 담을 리스트\na_count = [0] * (max(a)+1) # N을 쪼개어 담은 a라는 리스트에 각 숫자들이 몇개씩 들어있는지 셀 것\n# max(a)은, N의 각 자리수중 가장 큰 수를 의미하며\n# +1은 각 숫자들의 개수를 세어 담을 때 리스트 맨 앞에 0을 의미함 -> 숫자와 인덱스가 일치함\n\nfor i in range(len(a)):\n a_count[a[i]] += 1 # 숫자와 인덱스가 일치, 해당 숫자의 개수를 세어줌\n\nfor i in range(len(a_count)-1, -1, -1): # 담겨있는 숫자의 개수들을 반복하면서\n # 문제에서는 역정렬을 요구하였음\n if a_count[i]: # 숫자의 개수가 하나라도 있으면\n for j in range(a_count[i]): # 카운트 한 개수만큼의 횟수를 반복하며\n print(i, end='') # 공백없이 출력하면 문제에서 요구하는 정답이 됨\n" }, { "alpha_fraction": 0.5655737519264221, "alphanum_fraction": 0.5860655903816223, "avg_line_length": 19.41666603088379, "blob_id": "d194b07ecd3007f63704b90354fe5b4817ba40f5", "content_id": "03d6f2af0853cf00cd3ba61c3dc8363f965deb34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0924/크림 파스타.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = list(map(int, input().rstrip().split()))\nmin_num = dp[0]\ndp[0] = 0\nfor i in range(1, N):\n min_num = min(min_num, dp[i])\n dp[i] = max(dp[i]-min_num, dp[i-1])\nprint(*dp)" }, { "alpha_fraction": 0.6578947305679321, "alphanum_fraction": 0.6578947305679321, "avg_line_length": 18.16666603088379, "blob_id": "237a21e1d5137e1928d5aebcffd15e20ec2c727b", "content_id": "ee50502bb8e14c386819acdf91c10bf7e1b2bbff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/10/1015/입실 관리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n print(input().rstrip().lower())" }, { "alpha_fraction": 0.3955555558204651, "alphanum_fraction": 0.4399999976158142, "avg_line_length": 21.600000381469727, "blob_id": "ae3fb0f11489d52fad9a3d383b41f3c6471bb70d", "content_id": "2324375107a3179f164e90bbbccfc131d23b76be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 31, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/09/0901/2진수 8진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\ntmp = 3-len(N)%3\nif tmp != 3: N = '0'*tmp + N\nfor i in range(0, len(N), 3):\n S = N[i:i+3] # section\n S = S[::-1]\n tmp = 0\n for j in range(len(S)):\n tmp += int(S[j])*(2**j)\n print(tmp, end='')" }, { "alpha_fraction": 0.5894736647605896, "alphanum_fraction": 0.6105263233184814, "avg_line_length": 18.100000381469727, "blob_id": "e85dd190d21a23b6516846f82c645c23c58accbd", "content_id": "c27ca6140ba0893cae4a8d7e3d10dbd4b27593af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/10/1030/거꾸로 구구단.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().rstrip().split())\nresult = 0\nfor k in range(1, K+1):\n tmp = int(str(N*k)[::-1])\n result = max(result, tmp)\nprint(result)" }, { "alpha_fraction": 0.4361445903778076, "alphanum_fraction": 0.4746987819671631, "avg_line_length": 26.66666603088379, "blob_id": "2e593644f9e296affd7d6f9b5cb6ba36cd372e70", "content_id": "88eb88d1795c2abf46d5640a9c58f0d17b422ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 63, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/08/0807/직각삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n입력받는 변 3개 A, B, C\n0 0 0 입력받으면 실행 종료\n피타고라스 정리 <if a**2 + b**2 == c**2: 직각삼각형>\n\"\"\"\n\nwhile True: # 계속 반복하는 while True\n A, B, C = map(int, input().split()) # 각 변을 입력받고\n if A + B + C == 0: # 마지막 줄인 0 0 0 이면\n break # 종료\n if A**2 + B**2 + C**2 == ((max(A, B, C))**2) * 2: # 직각삼각형이면\n # 가장 큰 수만 찾아서 구하는 공식을 위와같이 구성하였음\n print('right') # 출력\n else: # 직각삼각형 아니면 \n print('wrong') # 출력\n" }, { "alpha_fraction": 0.38725489377975464, "alphanum_fraction": 0.41911765933036804, "avg_line_length": 17.590909957885742, "blob_id": "2ac4bce7eab5f5974fe7d05030b5bece16877703", "content_id": "26453d6a6ea5a31d3244c696065c89deb8dfffff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/02/0201/GCD 곱.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "mod = 1000000007\n\nN, M = map(int, input().split())\n\nmaxn = max(N, M)\n\ndp = [False, False] + [True] * maxn\n\nresult = 1\n\nfor i in range(2, maxn):\n if dp[i]:\n j = i\n while j <= maxn:\n cnt = 0\n for k in range(i, maxn, i):\n dp[k] = False\n cnt += (N//j) * (M//j)\n result = (result * ((i**cnt)%mod)) % mod\n j *= i\n\nprint(result)" }, { "alpha_fraction": 0.4910941421985626, "alphanum_fraction": 0.49618321657180786, "avg_line_length": 25.266666412353516, "blob_id": "ca62971baa6beb144b93dbd2dd644cc2495aa3d2", "content_id": "3487a1cc24854cace4cafe41f9959ecef86f1563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/06/0616/유니크.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nplayer = []\nfor n in range(N):\n player.append(list(map(int, input().split())))\nresult = [0] * N\nfor i in range(N): # 플레이어\n for j in range(3): # 점수들\n switch = True\n for k in range(N):\n if player[i][j] == player[k][j] and i != k:\n switch = False\n if switch:\n result[i] += player[i][j]\nfor r in result:\n print(r)" }, { "alpha_fraction": 0.3924914598464966, "alphanum_fraction": 0.4778156876564026, "avg_line_length": 12.363636016845703, "blob_id": "c2de15d44eb840b9dc1e11f8f6cad62397785eab", "content_id": "acc6b02a53248985f49a1535e8108ba04f53b78a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 35, "num_lines": 22, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/34차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 9.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "beer = {\n '하이트': 2000, \n '카스': 2100, \n '칭따오': 2500, \n '하이네켄': 4000, \n '버드와이저': 500,\n}\n\n# print(beer, ' # 인상 전')\n\n# for b in beer:\n# beer.update({b:beer[b]*1.05})\n\n# print(beer, ' # 인상 후')\n\n\nprint(beer)\n\nfor b in beer:\n beer.update({b:beer[b]*1.05})\n\nprint(beer)" }, { "alpha_fraction": 0.4861878454685211, "alphanum_fraction": 0.4917127192020416, "avg_line_length": 13, "blob_id": "458cbdac97a6ad5f39f4b38aebb6914c0cc09a2f", "content_id": "7490b58fcc96d2460ff20489f4eec117df444889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 24, "num_lines": 13, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/41차시 4. 문자열 - 연습문제 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "tmp = input().split()\n\nresult = list(set(tmp))\n\nresult.sort()\n\nfor r in result:\n if r == result[-1]:\n print(r, end='')\n print()\n break\n\n print(r, end=',')" }, { "alpha_fraction": 0.30078125, "alphanum_fraction": 0.36328125, "avg_line_length": 15.0625, "blob_id": "7a89e92357f8f48d383e5ede10e609a92300f04f", "content_id": "4ded52f8a97d41af12fe614e3d8e95d30a31cfa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 54, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0228/타임 카드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(3):\n AH, AM, AS, BH, BM, BS = map(int, input().split())\n\n BH -= 1 + AH\n BM += 59 - AM\n BS += 60 - AS\n \n if BS >= 60:\n BM += 1\n BS -= 60\n\n if BM >= 60:\n BH += 1\n BM -= 60\n\n print(BH, BM, BS)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 9.600000381469727, "blob_id": "2d6fbc8d986da801f6f61e2d98c253fa1437d5aa", "content_id": "aa5ae0fa3c33c619639c67881ef60d75f6cdba7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/01/0116/오리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = list(input())\n\nresult = 0\n\nquack = [False] * 5\n\n\n\n" }, { "alpha_fraction": 0.4591194987297058, "alphanum_fraction": 0.4800838530063629, "avg_line_length": 21.761905670166016, "blob_id": "c5200dab7515077680b19a97db77c2bd2ef635d3", "content_id": "297960e17c5f05408c0bc1ef8b9910b943ef2694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 46, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0807/ACM 호텔.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nH = 호텔 높이\nW = 호텔 길이\n그림은 정면에서 봤을 때 호텔 모습\nYYXX = Y층 X호\n들어오는 순서대로 각 층 1호실부터 방 배정\nT = 테스트케이스 개수\nN = 입장 순서\nYY = N % H\nXX = N // H + 1\"\"\"\n\nT = int(input()) # 테스트케이스 개수\nfor i in range(T): # 테스트케이스 개수만큼 반복\n H, W, N = map(int, input().split()) # 입력받고\n if N % H == 0: # 손님들을 차례로 방 배정할 때 나머지가 0이면\n Y = H # 호실의 층수는 높이\n X = N // H # 호수는 몫\n else: # 나머지가 0이 아니면\n Y = N % H # 층수는 나누고 난 나머지\n X = (N // H) + 1 # 호수는 몫에 +1\n print(100 * Y + X) # 방 번호 출력" }, { "alpha_fraction": 0.26037734746932983, "alphanum_fraction": 0.5169811248779297, "avg_line_length": 6, "blob_id": "f9b9db6f4489b7bc50661bd064c8690231e1a656", "content_id": "f1888e4047f443052cf4d5e26c2e822c72492f4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 29, "num_lines": 38, "path": "/알고리즘/온라인저지/2022/01/0108/이친수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1 1\n1\n\n2 1\n10\n\n3 2\n100\n101\n\n4 3\n1000\n1001\n1010\n\n5 5\n10000\n10001\n10010\n10100\n10101\n\"\"\"\n\nN = int(input())\n\ndp = [0 for _ in range(91)]\n\ndp[1], dp[2] = 1, 1\n\nif N < 3:\n print(dp[N])\n exit()\n\nfor i in range(3, N+1):\n dp[i] = dp[i-1] + dp[i-2]\n\nprint(dp[N])" }, { "alpha_fraction": 0.5094066858291626, "alphanum_fraction": 0.5267727971076965, "avg_line_length": 21.322580337524414, "blob_id": "11ca0eb7103cde73d0cf3732c668a4889ddf9b59", "content_id": "9109d65d6b49c7339689aa25e539d3eac85433bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 903, "license_type": "no_license", "max_line_length": 67, "num_lines": 31, "path": "/알고리즘/온라인저지/2021/10/1016/N과 M (1).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# 외부모됼\n# from itertools import permutations\n# N, M = map(int, input().split())\n# arr = list(range(1, N+1))\n# print(list(permutations(arr, M)))\n\n# dfs 재귀로 구현\ndef dfs(x): # x는 숫자 고른 개수\n if x == M: # M개만큼 골랐으면\n print(*result) # 들어있는 순열 언패킹\n return # 해당 재귀 종료\n \n for i in range(N): # 재귀를 통해, N개중 하나 -> N-1개중 하나 -> N-2개중 하나 ...\n if visited[i]: # 이미 고른 수이면\n continue\n\n result.append(arr[i]) # 하나 고르고\n visited[i] = 1 # 방문처리 하고\n dfs(x+1) # 다음 숫자 고르기\n result.pop() # 돌아와서 골랐던 숫자 빼고\n visited[i] = 0 # 방문 취소하고\n\nN, M = map(int, input().split())\n\narr = list(range(1, N+1))\n\nvisited = [0] * N\n\nresult = []\n\ndfs(0) # 고른 숫자 0개부터 시작" }, { "alpha_fraction": 0.5365853905677795, "alphanum_fraction": 0.5365853905677795, "avg_line_length": 12.833333015441895, "blob_id": "8ea0e51cd9745090ac21149d1646cca14c08e807", "content_id": "5f305afd10484b2fed5e51706e453f5549abade3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0205/와이버스 부릉부릉.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\n\nfor n in range(N):\n a = input()\n\nprint('비와이')" }, { "alpha_fraction": 0.5511363744735718, "alphanum_fraction": 0.5625, "avg_line_length": 26.842105865478516, "blob_id": "75dd18fbe39a44723b70ec0b4217c24414a508c0", "content_id": "ab8ebcf02b2bb3b9a72a4d9b257c6a0cf1cf0f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 98, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/01/0124/회문인 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import string\n\ndef base_change(number, base):\n tmp = string.digits + string.ascii_uppercase + string.ascii_lowercase + '!@#$%^' # 자릿수를 담은 문자열\n result = ''\n while number:\n result = tmp[number%base] + result # B진법이므로 B로 나눈 나머지번째 문자를 계속 추가한다\n number //= base\n return result\n\nfor _ in range(int(input())):\n T = int(input())\n result = 0\n for base in range(2, 65):\n new_num = base_change(T, base)\n if new_num == new_num[::-1]:\n result = 1\n break\n print(result)" }, { "alpha_fraction": 0.5670102834701538, "alphanum_fraction": 0.6020618677139282, "avg_line_length": 24.578947067260742, "blob_id": "814e772dfd4e80fc123fa3a945c7ff0d4203d233", "content_id": "0188397c69b30b192fb67a270a6297bd71d9998f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1007/유레카 이론.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ntriangle = [1]\nfor i in range(2, 1000):\n tmp = triangle[-1]+i\n if tmp<=1000: triangle.append(tmp)\n else: break\nresult = set()\nlen_tri = len(triangle)\nfor i in range(len_tri):\n for j in range(len_tri):\n for k in range(len_tri):\n tmp = triangle[i]+triangle[j]+triangle[k]\n if tmp<=1000: result.add(tmp)\nfor _ in range(int(input().rstrip())):\n K = int(input().rstrip())\n print(1 if K in result else 0)" }, { "alpha_fraction": 0.5032680034637451, "alphanum_fraction": 0.529411792755127, "avg_line_length": 17.058822631835938, "blob_id": "aa005383fe7cb3256b5a9e520220845ee40df8f1", "content_id": "bcc9931577852a294d7d3f9230e9f2f9b51668c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/03/0310/베시와 데이지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "B = tuple(map(int, input().split()))\nD = tuple(map(int, input().split()))\nJ = tuple(map(int, input().split()))\n\nd = abs(D[0]-J[0]) + abs(D[1]-J[1])\nb = max(abs(B[0]-J[0]), abs(B[1]-J[1]))\n\nresult = ''\n\nif d < b:\n result = 'daisy'\nelif b < d:\n result = 'bessie'\nelse:\n result = 'tie'\n\nprint(result)" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.5324675440788269, "avg_line_length": 38, "blob_id": "31a834b1d9bf930dab2cf1ef92f73ec7e5ef35ab", "content_id": "7ca41aefa9a586938f816f1a3b5ef69294f8aef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 58, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/08/0822/Equality.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "Q = input() # quiz\nprint('YES' if int(Q[0])+int(Q[4]) == int(Q[8]) else 'NO')" }, { "alpha_fraction": 0.4591549336910248, "alphanum_fraction": 0.5042253732681274, "avg_line_length": 24.428571701049805, "blob_id": "436baf1a9b9ec92217f541ae9b7f3fc74af6db63", "content_id": "a651318562020a1400757a931ba3400c15e1bd01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 69, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/10/1017/소수 단어.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "word = input()\nnum = 0\nfor w in word:\n if w.isupper(): # upper\n num += ord(w)-65+26+1\n else: # lower\n num += ord(w)-97+1\nn = int(1e4)\na = [False, True] + [True]*(n-1)\nfor i in range(2, n+1):\n if a[i]:\n for j in range(2*i, n+1, i):\n a[j] = False\nprint('It is a prime word.' if a[num] else 'It is not a prime word.')" }, { "alpha_fraction": 0.6142857074737549, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 16.75, "blob_id": "099a02d05501c8f3025396ffd09719bb666b941f", "content_id": "db16edb0bbea6e1e1dfaee1edf38582cafb283b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/12/1226/너의 이름은 몇 점이니.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "input()\nresult = 0\nfor i in input(): result += ord(i)-64\nprint(result)" }, { "alpha_fraction": 0.47680413722991943, "alphanum_fraction": 0.5051546096801758, "avg_line_length": 28.923076629638672, "blob_id": "60010eb3bdd8af01bd5b6eddac6fb3c5d5fdfc52", "content_id": "24a0d7a3a9ef67e3441a626fd58d937f69a140d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/10/1012/가위 바위 보.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "player_1_win = ['P R', 'R S', 'S P']\nplayer_2_win = ['R P', 'S R', 'P S']\nfor t in range(int(input())):\n RSP = 0\n result = ''\n for n in range(int(input())):\n tmp = input()\n if tmp in player_1_win: RSP += 1\n elif tmp in player_2_win: RSP -= 1\n if RSP > 0: result = 'Player 1'\n elif RSP < 0: result = 'Player 2'\n else: result = 'TIE'\n print(result)" }, { "alpha_fraction": 0.5976095795631409, "alphanum_fraction": 0.6215139627456665, "avg_line_length": 20, "blob_id": "f0cb06faad63eee39a5850e3454f8954aa2898f2", "content_id": "87e37cb007829ba62932568ddce7fd57030cbb3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 54, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/10/1003/수리공 항승.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, L = map(int, input().rstrip().split())\narr = sorted(list(map(int, input().rstrip().split())))\nresult = 0\nwhile arr:\n tmp = arr[0]+L-1\n while arr and arr[0]<=tmp: arr.pop(0)\n result += 1\nprint(result)" }, { "alpha_fraction": 0.3227176368236542, "alphanum_fraction": 0.40976646542549133, "avg_line_length": 21.4761905670166, "blob_id": "e37272de4483ee87142df3d774a1b52786ce1f20", "content_id": "3f68a06eaea56d3944f1bbb152ee210a4fcb7e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 37, "num_lines": 21, "path": "/알고리즘/온라인저지/2023/03/0318/받아올림.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n A, B = input().split()\n if A == '0' and B == '0': break\n A = '00000000000000'+A\n B = '00000000000000'+B\n arr = []\n A = list(A)[::-1]\n B = list(B)[::-1]\n for i in range(10):\n a, b = map(int, (A[i], B[i]))\n C = a+b\n if C:\n arr.append(C)\n arr.append(0)\n result = 0\n for i in range(len(arr)):\n a = arr[i]\n if a>=10:\n result += 1\n arr[i+1] += 1\n print(result)" }, { "alpha_fraction": 0.41603052616119385, "alphanum_fraction": 0.4312977194786072, "avg_line_length": 12.8421049118042, "blob_id": "aa4ef13da2107a93024e8d35cdb95cc00296fc60", "content_id": "865b24e0d546f2dcf3225b2e1633974aef63733f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 35, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/01/0131/시험 감독.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 0\n\nA = list(map(int, input().split()))\n\nB, C = map(int, input().split())\n\nfor a in A:\n a -= B\n result += 1\n\n if a > 0:\n result += a//C\n tmp = C * (a//C)\n if a != tmp:\n result += 1\n\nprint(result)" }, { "alpha_fraction": 0.4332129955291748, "alphanum_fraction": 0.5577617287635803, "avg_line_length": 19.55555534362793, "blob_id": "b54183e331a3cfdebb865971c8f7ed3655d2bed0", "content_id": "69bce0481659e57f376eb4e78bf3deea794234f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 896, "license_type": "no_license", "max_line_length": 52, "num_lines": 27, "path": "/알고리즘/[템플릿]/[SAMPLE]/비트연산 부분집합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [3, 6, 8]\nn = len(arr)\n\nfor i in range(1<<n):\n for j in range(n):\n if i&(1<<j):\n print(arr[j], end=' ')\n print()\n \n\"\"\"\n0 0 0 : 0\n0 0 1 : 3\n0 1 0 : 6\n0 1 1 : 3 6\n1 0 0 : 8\n1 0 1 : 3 8\n1 1 0 : 6 8\n1 1 1 : 3 6 8\n각각의 숫자는, 각각의 원소들이 있고, 없고를 의미함\ni는 십진수이지만 연산상에서는 2진수로 작용하고\n1<<j는 range에서 0 1 2 ... 일 때, 1 10 100 1000... 으로 증가함\ni&(1<<j)라는건 결국 이진수로 표현된 i가 해당 자리수가 1인지\n즉, 리스트에서 그 원소를 포함하는 부분집합인지를 의미함\nj는 원소 자체를 의미하는게 아니라 원소의 위치, 순서를 의미하고\n3번째 원소(인덱스 상으로 2번째)인 8을 확인하는건 1<<3, 즉 100(이진수)\n이는 돌고있는 i가 1 X X면 8을 포함하는 부분집합이라는 것을 알 수 있다\n\"\"\"" }, { "alpha_fraction": 0.5684455037117004, "alphanum_fraction": 0.5870069861412048, "avg_line_length": 24.41176414489746, "blob_id": "de41654ecfce008da98074dbf597bd3a86d20ed3", "content_id": "478a429ad8173502949c471a1c25761448ece7fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/10/1016/회의실 배정.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\narr = []\n\nfor n in range(N):\n arr.append(tuple(map(int, input().split()))) # 속도 향상을 위한 tuple()\n\narr.sort() # arr[x][0]을 기준으로 정렬\narr.sort(key=lambda x: x[1]) # 다시 한번 x[1]을 기준으로 정렬\n\nresult = [arr[0]] # 첫번째 회의\n\nfor i in range(1, len(arr)): # 두번째 회의부터 회의실 사용 가능한지 확인\n if result[-1][1] <= arr[i][0]: # 이전 회의의 종료시간보다 다음 회의의 시작시간이 늦으면\n result.append(arr[i]) # 회의실 사용가능\n \nprint(len(result)) # 회의실 사용팀 수 출력" }, { "alpha_fraction": 0.5836910009384155, "alphanum_fraction": 0.5836910009384155, "avg_line_length": 20.272727966308594, "blob_id": "a1fc9f43a73ede6d07661c29781e76dea9da6d1f", "content_id": "da72491349c5377040c2ae28a04b56a074bb9471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/07/0729/듣보잡.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nL = set() # listened\nfor n in range(N): L.add(input())\nresult = []\nfor m in range(M):\n s = input()\n if s in L:\n result.append(s)\nresult.sort()\nprint(len(result))\nfor r in result: print(r)" }, { "alpha_fraction": 0.5286624431610107, "alphanum_fraction": 0.5414012670516968, "avg_line_length": 30.600000381469727, "blob_id": "6571a36dc3d87ff9d1ead769cb364ada30fae46d", "content_id": "a2efb9f05a12de3fbd40902fc460fc7dd48188e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 60, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/12/1219/주식 투자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n result = 0\n for n in range(int(input())):\n result += max(list(map(int, input().split())) + [0])\n print(result)" }, { "alpha_fraction": 0.46686747670173645, "alphanum_fraction": 0.4909638464450836, "avg_line_length": 19.8125, "blob_id": "fed60ae259d0a0e3cff519d3c42a19c1705e116c", "content_id": "c7a8c12cdce9597a69b4492ae133f20c7910dfb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/08/0807/꿀벌.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nT = ['Re', 'Pt', 'Cc', 'Ea', 'Tb', 'Cm', 'Ex']\nB = {t:0 for t in T}\ncnt = 0\nwhile True:\n I = input().rstrip().split()\n if not I: break\n for i in I:\n cnt += 1\n try: B[i] += 1\n except: pass\nfor t in T: print(f'{t} {B[t]} {B[t]/cnt:.2f}')\nprint(f'Total {cnt} 1.00')" }, { "alpha_fraction": 0.46268656849861145, "alphanum_fraction": 0.49253731966018677, "avg_line_length": 18.285715103149414, "blob_id": "9cb3f799ef7336e04f77f239324c46215ce8ab0a", "content_id": "5877e1f38deb1c4c2da7c625fc8346ad2c4bb8c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/03/0313/Pyramids.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = int(input())\n if N == 0: break\n result = 0\n for i in range(1, N+1):\n result += i\n print(result)" }, { "alpha_fraction": 0.36206895112991333, "alphanum_fraction": 0.4367816150188446, "avg_line_length": 12.384614944458008, "blob_id": "aa498041dbb0c132d5a3fa0edcfea6c707359a2d", "content_id": "8d262ca2dd8a4e52e6ddafab4a2a2a007f579f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 27, "num_lines": 13, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/33차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 8.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input()\n\nu = 0\nl = 0\n\nfor s in sen:\n if 65 <= ord(s) <= 90:\n u += 1\n if 97 <= ord(s) <= 122:\n l += 1\n\nprint('UPPER CASE', u)\nprint('LOWER CASE', l)\n" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.568648636341095, "avg_line_length": 23.36842155456543, "blob_id": "f01ed89b93d4b0d78574e6899e1e4f21d68041d7", "content_id": "62b8aaf986f8bc0ff02bbc329bafd63dbbfd5b1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 47, "num_lines": 38, "path": "/알고리즘/[템플릿]/정렬/퀵정렬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n첫 피봇은 arr[0]\narr의 앞에서 시작하는 left, 뒤에서 시작하는 right\n피봇을 기준으로 이 두 값이 각각 작고 큰 값을 찾다가\n찾으면 두 값을 서로 바꿔주고\n못찾고 left와 right가 교차해버리면\nright와 피봇값을 바꿔주어, 정렬중인 배열의 left와 right 중간에\n그러니까, 피봇이 있어야 할 자리에 피봇을 넣어준다\npartition 함수를 통해 피봇값을 얻고\nquick_sort 함수를 통해 계속해서 분할하여 정복하는 식으로 배열을 정렬해나간다\n\"\"\"\n\ndef partition(arr, left, right):\n pivot = arr[left]\n i = left + 1\n j = right\n\n while i <= j:\n while i <= j and arr[i] <= pivot:\n i += 1\n while i <= j and arr[j] >= pivot:\n j -= 1\n\n if i <= j:\n arr[i], arr[j] = arr[j], arr[i]\n arr[left], arr[j] = arr[j], arr[left]\n return j\n\ndef quick_sort(arr, left, right):\n if left < right:\n center = partition(arr, left, right)\n quick_sort(arr, left, center-1)\n quick_sort(arr, center+1, right)\n\nfor t in range(int(input())):\n arr = list(map(int, input().split()))\n quick_sort(arr, 0, len(arr)-1)\n print(*arr)" }, { "alpha_fraction": 0.42506811022758484, "alphanum_fraction": 0.4386920928955078, "avg_line_length": 20.647058486938477, "blob_id": "e8f8953833c7f4342343f6f45896b80e75a535a3", "content_id": "1ccc19dfff3751d3059560aafd02dda1d03cc661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/02/0220/방 배정하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C, N = map(int, input().split())\n\nfound_ans = False\n\nfor a in range(N//A + 1):\n if found_ans:\n break\n for b in range(N//B + 1):\n if found_ans:\n break\n for c in range(N//C + 1):\n tmp = N-(a*A + b*B + c*C)\n if not tmp:\n found_ans = True\n break\n\nprint(1 if found_ans else 0)" }, { "alpha_fraction": 0.49462366104125977, "alphanum_fraction": 0.5161290168762207, "avg_line_length": 14.666666984558105, "blob_id": "7589e79fe8586a60479f7d2d86f2aa3faf329c0d", "content_id": "6d15475a2227f91236745081681271bd6866a5c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0220/문자열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n text = input()\n\n print(text[0], text[-1], sep='')" }, { "alpha_fraction": 0.3807106614112854, "alphanum_fraction": 0.4568527936935425, "avg_line_length": 18.799999237060547, "blob_id": "5cf765420a2089ae3db51cc7b155764851ec5842", "content_id": "65a37239321e96f0ee39e9c6927c9a2636107ce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/06/0611/적어도 대부분의 배수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums = list(map(int, input().split()))\nmaxx = 10000000000\nfor i in range(1, maxx):\n a = 0\n for n in nums:\n if not i%n:\n a += 1\n if a >= 3:\n print(i)\n exit()" }, { "alpha_fraction": 0.29611650109291077, "alphanum_fraction": 0.46116504073143005, "avg_line_length": 16.95652198791504, "blob_id": "a8a210f337347b4f5ec49da930f4370731b1795d", "content_id": "cac4bea1247b5d3a7cdc550c60fb7859782f4817", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 35, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/02/0203/저항.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "r = {\n 'black': ('0', 1),\n 'brown': ('1', 10),\n 'red': ('2', 100),\n 'orange': ('3', 1000),\n 'yellow': ('4', 10000),\n 'green': ('5', 100000),\n 'blue': ('6', 1000000),\n 'violet': ('7', 10000000),\n 'grey': ('8', 100000000),\n 'white': ('9', 1000000000),\n}\n\nresult = ''\n\nfor _ in range(2):\n result = result + r[input()][0]\n\nresult = int(result)\n\nresult *= r[input()][1]\n\nprint(result)" }, { "alpha_fraction": 0.42105263471603394, "alphanum_fraction": 0.4417862892150879, "avg_line_length": 23.153846740722656, "blob_id": "0ec4ae41156f014d01d047f5794e2ddc62e8c1e7", "content_id": "00306e1cbf921fafc5d5009246bc079eab8e7a2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/09/0904/부분 문자열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def get_pi(pattern):\n j = 0\n for i in range(1, len(pattern)):\n while j > 0 and pattern[i] != pattern[j]:\n j = pi[j - 1]\n if pattern[i] == pattern[j]:\n j += 1\n pi[i] = j\n\ndef KMP(string, pattern):\n get_pi(pattern)\n j = 0\n for i in range(len(string)):\n while j > 0 and string[i] != pattern[j]:\n j = pi[j-1]\n if string[i] == pattern[j]:\n if j == len(pattern)-1:\n return True\n else:\n j += 1\n return False\n\nS, P = input(), input()\npi = [0] * len(P)\nif KMP(S, P): print('1')\nelse: print('0')" }, { "alpha_fraction": 0.4172042906284332, "alphanum_fraction": 0.43225806951522827, "avg_line_length": 19.217391967773438, "blob_id": "82ce95e3602fa122847521808a5f55034dd96eca", "content_id": "a38de496a80c399a6d27ac7bc9b262d0515f0f1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/02/0225/삼각형과 세 변.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n A, B, C = map(int, input().split())\n if A+B+C == 0:\n break\n\n result = 'Scalene'\n\n if A+B+C == max(A, B, C) * 2:\n result = 'Invalid'\n \n nums = set([A, B, C])\n\n if len(nums) == 2:\n temp = [A, B, C]\n temp.sort()\n if temp[2] - temp[0] >= temp[1]:\n result = 'Invalid'\n else:\n result = 'Isosceles'\n elif len(nums) == 1:\n result = 'Equilateral'\n\n print(result)\n" }, { "alpha_fraction": 0.4945652186870575, "alphanum_fraction": 0.5, "avg_line_length": 9.277777671813965, "blob_id": "e0ad5d7f011785cd33f0daabb68e77bb5454756b", "content_id": "e275252de5a46cae786cb9211050acdb03add30a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 20, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/02/0207/수도요금.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input())\nB = int(input())\nC = int(input())\nD = int(input())\nP = int(input())\n\nresult = 0\n\nXP = A * P\n\nYP = B\n\nif P > C:\n YP += (P-C) * D\n\nresult = min(XP, YP)\n\nprint(result)" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.695652186870575, "avg_line_length": 11, "blob_id": "4b1ba1bcb7dcbad9b5b13406806d7558ea7867a1", "content_id": "dde44ec7ab3a33bb89892b9f89fb2c4fcfc46992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/알고리즘/온라인저지/2023/05/0506/소수가 아닌 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "input()\nprint(int(1e9))" }, { "alpha_fraction": 0.5422885417938232, "alphanum_fraction": 0.572139322757721, "avg_line_length": 21.44444465637207, "blob_id": "9d2cb6716f3411ee8e6be871208fa84f81772626", "content_id": "52aa7eab5c6b43578290d89e5f4cda7866ff3a75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/02/0218/알바생 강호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nN = int(input())\ntips = [int(input()) for _ in range(N)]\ntips.sort(reverse=True)\nfor i in range(1, N+1):\n tip = tips[i-1]\n tip = tip - (i-1)\n if tip > 0: result += tip\nprint(result)" }, { "alpha_fraction": 0.46783626079559326, "alphanum_fraction": 0.5087719559669495, "avg_line_length": 13.333333015441895, "blob_id": "c9830558f3082d842a11bbbfeca0c84dd0e60e2c", "content_id": "133a8ef2ba010a04d4000ac72132ed93198f21b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 31, "num_lines": 12, "path": "/알고리즘/온라인저지/2021/08/0818/팩토리얼 DP.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def fact(n):\n table [0] = 1\n for i in range(1, n+1):\n table[i] = i*table[i-1]\n\n\n return table[n]\n\nn = int(input())\ntable = [0] * (n+1)\nfact(n)\nprint(table)" }, { "alpha_fraction": 0.369047611951828, "alphanum_fraction": 0.3928571343421936, "avg_line_length": 17.77777862548828, "blob_id": "8542ce2a7eaaa4603a26dc2b1a2d1b587d497de4", "content_id": "b1b08cbbd0f92efa0e666e1849fef9510e9aafe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 25, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/12/1211/숫자 놀이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = int(input())\n if N == 0: break\n while N >= 10:\n tmp = 0\n for n in str(N):\n tmp += int(n)\n N = tmp\n print(N)" }, { "alpha_fraction": 0.4337349534034729, "alphanum_fraction": 0.4518072307109833, "avg_line_length": 21.200000762939453, "blob_id": "ec25611fa3973661b012eb2a81b12babc4eec058", "content_id": "f3eed1e5829a57d697f32fa9390b14ddd599a4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0703/Pen Pineapple Apple Pen.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nword = input()\nresult = 0\nidxs = []\nfor i in range(N-3):\n tmp = word[i:i+4]\n if tmp == 'pPAp':\n is_used = False\n for j in range(i, i+4):\n if j in idxs:\n is_used = True\n if not is_used:\n result += 1\n idxs += list(range(i, i+4))\nprint(result)" }, { "alpha_fraction": 0.43089431524276733, "alphanum_fraction": 0.577235758304596, "avg_line_length": 14.5, "blob_id": "ddaf8fe5774594d1aa02eefc5ff2d8def1205d9e", "content_id": "3141a245ea92760f14de34b50cd81d3a070d06fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/21차시 2. 자료구조 – 리스트, 튜플 - 연습문제 23.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "arr = [12, 24, 35, 70, 88, 120, 155]\n\nresult = []\n\nfor i in range(1, len(arr), 2):\n result.append(arr[i])\n\nprint(result)" }, { "alpha_fraction": 0.3997906744480133, "alphanum_fraction": 0.42804813385009766, "avg_line_length": 28.8125, "blob_id": "bc9912e2cffd75f68dcb3793040f03495d5dfce2", "content_id": "b90dcba4d539c1e31c3b3813d3fc1db7a69046e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 87, "num_lines": 64, "path": "/알고리즘/SW역량테스트/2022.04.12 A형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(w, h, depth, tmp):\n global visited, maxx\n print(h, w, depth, tmp)\n # print(visited)\n if depth == 4:\n # print('depth 4 here', w, h, depth, tmp)\n if tmp > maxx:\n maxx = tmp\n print('THIS IS MAXX😊😉😊😊', maxx)\n return\n for i in range(6):\n if (0 <= w+dx[i] < W) and (0 <= h+dy[i] < H) and not visited[h+dy[i]][w+dx[i]]:\n visited[h+dy[i]][w+dx[i]] = 1\n dfs(w+dx[i], h+dy[i], depth+1, tmp+arr[h+dy[i]][w+dx[i]])\n visited[h+dy[i]][w+dx[i]] = 0\n\n# 12시 방향부터 시계방향으로 델타이동\ndx = [0, 1, 1, 0, -1, -1]\ndy = [-1, -1, 0, 1, 0, -1]\nT = int(input())\nfor t in range(1, T+1):\n W, H = map(int, input().split())\n arr = [list(map(int, input().split())) for _ in range(H)]\n # print(arr)\n maxx = 0\n for h in range(H):\n for w in range(W):\n visited = [[0]*W for _ in range(H)]\n visited[h][w] = 1\n dfs(w, h, 1, arr[h][w])\n print('------------------')\n print(maxx ** 2)\n\n\n# def dfs(i, depth, tmp):\n# global visited, maxx\n# print(i, depth, tmp)\n# if depth == 4:\n# # print('depth 4 here', w, h, depth, tmp)\n# if tmp > maxx:\n# maxx = tmp\n# print('THIS IS MAXX😊😉😊😊', maxx)\n# return\n# for j in range(6):\n# if (1 <= i+dx[j] < W*H+1) and not visited[i+dx[j]]:\n# visited[i+dx[j]] = 1\n# dfs(i+dx[j], depth+1, tmp+arr[i+dx[j]])\n# visited[i+dx[j]] = 0\n\n# dx = [-5, -4, 1, 5, -1, -6]\n# T = int(input())\n# for t in range(1, T+1):\n# W, H = map(int, input().split())\n# arr = [0]\n# for h in range(H):\n# arr += list(map(int, input().split()))\n# maxx = 0\n# for i in range(1, W*H+1):\n# visited = [0] * len(arr)\n# visited[i] = 1\n# dfs(i, 1, arr[i])\n# print('------------------')\n# print(maxx ** 2)\n# print(arr[9]) " }, { "alpha_fraction": 0.5130434632301331, "alphanum_fraction": 0.530434787273407, "avg_line_length": 13.5, "blob_id": "60468c1a785829c9871f68fedb7d353e2000f96d", "content_id": "d537011f60bfaeef70220c148a5e4451d5014b9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/알고리즘/온라인저지/2021/12/1213/과자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b, c = map(int, input().split())\n\nresult = c - (a * b)\n\nif result < 0:\n print(abs(result))\nelse:\n print(0)" }, { "alpha_fraction": 0.4268292784690857, "alphanum_fraction": 0.48373982310295105, "avg_line_length": 19.58333396911621, "blob_id": "f0f7757e20e223a16eb06261ba79b3f014f0b45c", "content_id": "edc903b12a421f78ab76e136bb42586ef6e78837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/05/0530/삼각 김밥.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = map(int, input().split())\ncheap = b/a\ne, f = a, b\nN = int(input())\nfor n in range(N):\n c, d = map(int, input().split())\n if d/c > cheap:\n cheap = d/c\n e, f = c, d\n# print(e, f)\nprint(f'{e*1000/f:.2f}')\n# 37:532 = x:1000" }, { "alpha_fraction": 0.54076087474823, "alphanum_fraction": 0.5625, "avg_line_length": 20.705883026123047, "blob_id": "85500c74c0eb0b48691208e94cc131b6e9cd46d9", "content_id": "3d5ebd8ea76c6abf1bd83ba2fa737f39b902d185", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/10/1012/동전 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN, K = map(int, input().rstrip().split())\ncoins = set()\nfor i in range(N):\n coins.add(int(input().rstrip()))\ncoins = list(coins)\ndp = [INF]*(K+1)\ndp[0] = 0\nfor i in range(K+1):\n for coin in coins:\n if i+coin <= K:\n dp[i+coin] = min(dp[i]+1, dp[i+coin])\nprint(dp[K] if dp[K] != INF else -1)" }, { "alpha_fraction": 0.4523809552192688, "alphanum_fraction": 0.5, "avg_line_length": 27.16666603088379, "blob_id": "c0ea716f3da6c5e8ec063f292e0837e7f1a48d94", "content_id": "28952e7638e1785803668385e3e8decbcc73547b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/08/0821/Winning Score.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = 0, 0\nfor i in range(3, 0, -1): A += int(input()) * i\nfor i in range(3, 0, -1): B += int(input()) * i\nif A > B: print('A')\nelif B > A: print('B')\nelse: print('T')" }, { "alpha_fraction": 0.4456404745578766, "alphanum_fraction": 0.46824541687965393, "avg_line_length": 29, "blob_id": "9b025bc0228d3634c729c1295ca26f6b515527f6", "content_id": "b2f67723ee223e95235030e3d67dad23470e9afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/알고리즘/온라인저지/2021/08/0814/전자레인지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n버튼 A, B, C 각각 300초, 60초, 10초\n버튼을 누른 총 횟수가 최소횟수 일 때\n각 버튼을 누른 횟수를 버튼별로 출력\nT만큼 정확히 전자레인지를 동작시킬 수 없으면 -1\n\"\"\"\n\"\"\"\n해결법 탐색\n초기 접근법 독스트링 작성\n코드 작성\n백준 제출\n코드주석 작성\n백준에는 주석은 달지말고\n백준 통과하면 그 다음에 코드파일에만 주석 달자\n\"\"\"\n\nT = int(input()) # 시간초 입력\nbuttons = [300, 60, 10] # 버튼 A, B, C를 초로 환산\ncounts = [0] * 3 # 버튼 누른 횟수 리스트 초기화\nfor i in range(len(buttons)): # 버튼의 개수만큼 순회\n if buttons[i] <= T: # 순회중인 버튼(A->B->C)으로 작동 시켜도 시간을 초과하지 않으면\n while True: # 버튼 누르기를 반복\n counts[i] += 1 # 버튼을 누르고\n T -= buttons[i] # 버튼이 동작하는 시간만큼 빼주고\n if buttons[i] > T: # 버튼시간이 더 크면\n break # while문 종료\nif T == 0: # 깔끔하게 T초만큼 동작했으면\n for count in counts: # 버튼 카운트들 순회하면서\n print(count, end=' ') # 카운트 출력\nelse: # 버튼들로 T초를 완벽하게 동작시키지 못했으면\n print(-1) # -1 출력" }, { "alpha_fraction": 0.35930734872817993, "alphanum_fraction": 0.43290042877197266, "avg_line_length": 24.77777862548828, "blob_id": "4b45b62b32bdeeb19efb96b10e0d9379142626b6", "content_id": "51fc876d0e9f05470950e728b22dec226965264b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/07/0731/파스칼의 삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\nP = [[1], [1, 1], [1, 2, 1]]\nfor n in range(N-3):\n tmp = [0] * (2+n)\n for i in range(len(P[-1])-1):\n tmp[i] = sum(P[-1][i:i+2])\n tmp = [1]+tmp+[1]\n P.append(tmp)\nprint(P[-1][K-1])" }, { "alpha_fraction": 0.5058823823928833, "alphanum_fraction": 0.5215686559677124, "avg_line_length": 17.285715103149414, "blob_id": "b9f28aa32082e882ad7b6c3fb4edf1e4fa2f5723", "content_id": "8425547ce50a744fc851bfddf9ef23b1b8553d40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 30, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/11/1108/도비의 영어 공부.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nwhile True:\n tmp = input().rstrip()\n check = tmp[0]\n if check == '#': break\n sen = tmp[2:]\n result = 0\n for s in sen:\n if s.lower() == check:\n result += 1\n print(check, result)" }, { "alpha_fraction": 0.5030674934387207, "alphanum_fraction": 0.5153374075889587, "avg_line_length": 19.375, "blob_id": "33ef02e8d4ea7cadcbd78e044c894c8d1fc73c3e", "content_id": "90abd7c62fb0ec98970dc898d24b3b778961ace8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/06/0603/Judging Moose.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "L, R = map(int, input().split())\nresult = 'Not a moose'\nif L == R:\n if L:\n result = f'Even {L*2}'\nelse:\n result = f'Odd {max((L,R))*2}'\nprint(result)\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6071428656578064, "avg_line_length": 16.625, "blob_id": "2c46261a38fcbd67da442e7156a7413c23fed7ea", "content_id": "14f6a0a667db5396199c32e6861b6d0f22ef5fc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 56, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/07/0701/접미사 배열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n접미사를 모아서, 정렬 후 출력\n\"\"\"\nword = input()\narr = []\nfor i in range(len(word)): arr.append(word[i:len(word)])\narr.sort()\nfor a in arr: print(a)" }, { "alpha_fraction": 0.4838709533214569, "alphanum_fraction": 0.5307917594909668, "avg_line_length": 27.5, "blob_id": "9bdfc1f225ca2816c47656e83377f71e7567a063", "content_id": "e5edb6361d2d60e30b09ceb4e645a79bd5a867fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 72, "num_lines": 12, "path": "/알고리즘/온라인저지/2021/08/0818/삼각형 외우기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "data = []\nfor _ in range(3):\n data.append(int(input()))\nif sum(data) == 180:\n if data[0] == data[1] and data[1] == data[2] and data[2] == data[0]:\n print('Equilateral')\n elif data[0] == data[1] or data[1] == data[2] or data[2] == data[0]:\n print('Isosceles')\n else:\n print('Scalene')\nelse:\n print('Error')" }, { "alpha_fraction": 0.4481408894062042, "alphanum_fraction": 0.4618395268917084, "avg_line_length": 22.272727966308594, "blob_id": "ff2573218e2beb5fca0986a005aa3a2c15285f44", "content_id": "86415d71d7ad228b4a6789d1b4fd92a91a145c77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 35, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/03/0323/A와 B.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = list(input())\nT = list(input())\nresult = 0\n# S를 T로 만들지 않고, T를 S로 만든다\n# 이유는 두 가지\n# 1.pop()에서 인덱스 없이 사용하여 속도 향상\n# 2.추가, 뒤집고 추가 -> 제거, 제거 후 뒤집기\n# 후자는 둘 다 일단 제거하고 시작하므로 더 직관적임\nwhile True:\n if S == T:\n result = 1 # S를 T로 만들 수 있다!\n break # 종료\n if T[-1] == 'A':\n T.pop() # A를 빼주고\n if not T: # 마지막 문자를 뺐다면\n break # 종료\n elif T[-1] == 'B':\n T.pop() # B를 빼주고\n if not T: # 마지막 문자를 뺐다면\n break # 종료\n T = T[::-1] # 뒤집고\nprint(result) # 결과 출력" }, { "alpha_fraction": 0.5269230604171753, "alphanum_fraction": 0.5423076748847961, "avg_line_length": 20.75, "blob_id": "341c3fce55c73ffef13d5ee4255424e07a75e5d4", "content_id": "0ada239f67af0a17f60f8d852360f69bdc2ea2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/01/0108/고급 여관.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A_atk, A_hp = map(int, input().split())\nB_atk, B_hp = map(int, input().split())\nwhile A_hp > 0 and B_hp > 0:\n A_hp -= B_atk\n B_hp -= A_atk\nif A_hp > 0:\n result = 'PLAYER A'\nelif B_hp > 0:\n result = 'PLAYER B'\nelse:\n result = 'DRAW'\nprint(result)" }, { "alpha_fraction": 0.4957983195781708, "alphanum_fraction": 0.5462185144424438, "avg_line_length": 19, "blob_id": "c530d9f3fb9b641fe47aa9bae4e3c4e5c8c024bb", "content_id": "e0c2834302ec44a4eee72ac31b67fbaf5d8d4c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/09/0906/アイスクリーム (Ice Cream).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S, A, B = int(input()), int(input()), int(input())\nresult = 250\nwhile A < S:\n A += B\n result += 100\nprint(result)" }, { "alpha_fraction": 0.5072463750839233, "alphanum_fraction": 0.5507246255874634, "avg_line_length": 26.799999237060547, "blob_id": "e2a6170203004231145789d3e95112b4be71927d", "content_id": "1bf9366add35ecc10481f340e203b4ad66f7db7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/알고리즘/온라인저지/2023/02/0208/Patyki.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = sorted(list(map(int, input().split())))\nresult = 0\nif A == B == C: result = 2\nelif C**2 == A**2 + B**2: result = 1\nprint(result)" }, { "alpha_fraction": 0.4878048896789551, "alphanum_fraction": 0.5365853905677795, "avg_line_length": 20, "blob_id": "ae4121ca33a00e7cafae6093fdfdf7feb3bdd090", "content_id": "6b9f8f60309f5ea937420a5bb44fcea757514f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/05/0520/접시 안의 원.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nprint(round((T/2) ** 2))" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.6274510025978088, "avg_line_length": 19.600000381469727, "blob_id": "21f5688b03600b1fdc3aab959b3e34a6a9c77c82", "content_id": "273b9247cf8931846ff651d5559045ea1792d180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/알고리즘/온라인저지/2021/08/0822/검증수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "data = list(map(int, input().split()))\nresult = 0\nfor dt in data:\n result += dt**2\nprint(result%10)" }, { "alpha_fraction": 0.5511810779571533, "alphanum_fraction": 0.5669291615486145, "avg_line_length": 13.222222328186035, "blob_id": "ccd862d4c533f21d56ba0b2f1b3d649c3c68ae81", "content_id": "4c92aa24095aee2573d391ea2e56ac78c2f018a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/12/1230/트리 순회.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ntree = dict()\n\nfor n in range(N):\n temp = input().split()\n tree.update({temp[0]:temp[1:]})\n\nprint(tree)" }, { "alpha_fraction": 0.30434781312942505, "alphanum_fraction": 0.49275362491607666, "avg_line_length": 16.5, "blob_id": "975333771309e83239097a6fd6a08bd9ea6a3fe2", "content_id": "5084e59fb323d465c857878932c1d49c820bb523", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/19차시 2. 자료구조 – 리스트, 튜플 - 연습문제 21.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x = (1,2,3,4,5,6,7,8,9,10)\n\nprint(x[:len(x)//2])\nprint(x[len(x)//2:])" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 14, "blob_id": "f110fcbdbd347580a7c3199890c084393ee1e233", "content_id": "05fb42ba8025166ead673f64d4e8694069039497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/07/0714/5의 수난.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\nresult = 0\nfor n in N:\n result += int(n) ** 5\nprint(result)" }, { "alpha_fraction": 0.47422680258750916, "alphanum_fraction": 0.49484536051750183, "avg_line_length": 10.470588684082031, "blob_id": "cf6645afaf8fe6f76032956bd127eb5fd94001dc", "content_id": "a768d4a2c00fed585d0b18ad1cbf3d2c9a40e32a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 31, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/03/0302/막대기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nsticks = []\n\nfor n in range(N):\n sticks.append(int(input()))\n\nTH = 0 # Top Height\n\ncnt = 0\n\nfor s in sticks[::-1]:\n if s > TH:\n TH = s\n cnt += 1\n\nprint(cnt)" }, { "alpha_fraction": 0.38085541129112244, "alphanum_fraction": 0.395112007856369, "avg_line_length": 14.870967864990234, "blob_id": "aea18e9578905e352a590d0a8601c8114c92d2ab", "content_id": "f1667807a7c6af88cf7aed4c583d487c4115b44b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 36, "num_lines": 31, "path": "/알고리즘/온라인저지/2021/09/0909/Fly me to the Alpha Centauri.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\ndef how_many_times(x):\n a = 1\n i = 1\n if a == x:\n return i\n b = 1\n a = a+b\n i += 1\n if a == x:\n return i\n while True:\n b += 1\n a = a+b\n i += 1\n if a >= x:\n break\n a = a+b\n i += 1\n if a >= x:\n break\n return i\n\nT = int(input())\nfor t in range(T):\n x, y = map(int, input().split())\n d = y-x\n print(how_many_times(d))" }, { "alpha_fraction": 0.4335154891014099, "alphanum_fraction": 0.47358834743499756, "avg_line_length": 31.352941513061523, "blob_id": "397751710974cc1a035a75880fd41afa22b71776", "content_id": "3d34bf0f4ecb1fc3ad860cab2be87c63be96e74f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 739, "license_type": "no_license", "max_line_length": 52, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/08/0822/색종이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\narr = [[0]*101 for _ in range(101)]\nfor tc in range(T):\n x1, y1 = map(int, input().split())\n for i in range(y1, y1+10): # 색종이 크기는 10\n for j in range(x1, x1+10): \n if arr[i][j]: # 값이 들어있으면\n continue # 컨티뉴\n else: # 아직 저장된 값이 없으면\n arr[i][j] = 1 # 1을 저장\n # 합집합은 or연산\n # True or로 넘어가기 때문에 값이 있으면 저장이 되지 않고\n # 합해지는 부분을 연산할 필요가 없어짐\narea = 0 # 면적\nfor ar in arr: # 저장된 배열을 돌면서\n area += sum(ar) # 각 줄을 더해서 면적에 저장\nprint(area) # 출력" }, { "alpha_fraction": 0.5136986374855042, "alphanum_fraction": 0.5410959124565125, "avg_line_length": 12.363636016845703, "blob_id": "ef1d321b927c51ee0dc7b30995ae4f71b7325630", "content_id": "1c11a7e0bd4e2383ecfe5390cbe6dcbdfad0cc48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/03/0314/조합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\nresult = 1\n\nfor n in range(N, N-M, -1):\n result *= n\n\nfor m in range(1, M+1):\n result //= m\n\nprint(result)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5148147940635681, "avg_line_length": 17.066667556762695, "blob_id": "2f145f5581c9b7a1b73059cab2f2b3412db626f3", "content_id": "59ed8cb888a97a936ce7dc7a1fe0a28ab9a2d904", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 29, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/05/0519/카드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "card = dict()\nfor n in range(int(input())):\n tmp = int(input())\n try:\n card[tmp] += 1\n except:\n card[tmp] = 1\nnum = sorted(list(card))\nmaxx = 0\nresult = 0\nfor n in num:\n if card[n] > maxx:\n maxx = card[n]\n result = n\nprint(result)" }, { "alpha_fraction": 0.569553792476654, "alphanum_fraction": 0.5774278044700623, "avg_line_length": 18.100000381469727, "blob_id": "248ba5581730d62acece3d2deb71420699e1603f", "content_id": "0e191f45b800d3949d0db6250dacb4f22f90af51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 51, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/11/1127/블로그.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, X = map(int, input().rstrip().split())\nvisitors = list(map(int, input().rstrip().split()))\n\ntmp = sum(visitors[:X])\nmaxx = tmp\ncnt = 1\n\nfor i in range(X, N):\n tmp -= visitors[i-X]\n tmp += visitors[i]\n if maxx < tmp:\n maxx, cnt = tmp, 1\n elif maxx == tmp:\n cnt += 1\n\nprint('SAD') if not maxx else print(maxx, cnt)" }, { "alpha_fraction": 0.4729064106941223, "alphanum_fraction": 0.5049260854721069, "avg_line_length": 17.5, "blob_id": "a5d77773fcfb2a0df9be510881a2b7bd3896b6bd", "content_id": "038020313039b0ebaa3c9af6ff069b19a31dab3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 49, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0830/바이러스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef dfs(v):\n visited[v] = 1\n global cnt\n cnt += 1\n for w in range(1, V+1):\n if G[v][w] == 1 and not visited[w]:\n dfs(w)\n\nV = int(sys.stdin.readline())\nE = int(sys.stdin.readline())\n\nG = [[0] * (V+1) for _ in range(V+1)]\nfor _ in range(E):\n u, v = map(int, sys.stdin.readline().split())\n G[u][v] = G[v][u] = 1\n\nvisited = [0] * (V+1)\ncnt = -1\ndfs(1)\nprint(cnt)" }, { "alpha_fraction": 0.4962962865829468, "alphanum_fraction": 0.5135802626609802, "avg_line_length": 21.55555534362793, "blob_id": "aa184790deb9b4bca496bbb1a2ac7c1a84f64a46", "content_id": "5d02e94eabab7b19a291b06c03caa754d4fa16ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/07/0726/카약과 강풍.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, S, R = map(int, input().split())\ncrack = list(map(int, input().split()))\nspare = list(map(int, input().split()))\nK = [True] * N # kayak\nfor c in crack:\n K[c-1] = False\nfor s in spare:\n try:\n if not K[s-1]: K[s-1] = True\n elif not K[s-2]: K[s-2] = True\n elif not K[s]: K[s] = True\n except:\n pass\nresult = 0\nfor k in K:\n if not k:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5233160853385925, "alphanum_fraction": 0.5492228269577026, "avg_line_length": 23.25, "blob_id": "0405622770cca534e75e4565e5793810a645ecb2", "content_id": "cacdc4d478ed2d82da0bd4ac5e409418f06a6399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/04/0407/친구 친구.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nfriends = [0]*(N+1)\nfor m in range(M):\n A, B = map(int, input().split())\n friends[A] += 1\n friends[B] += 1\nfor friend in friends[1:]:\n print(friend)" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 21.75, "blob_id": "95bc9a368540f8dbfdf69f4d0cd780927787deed", "content_id": "059d3176a16002d48e839d17cbb2254ecd850f2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/03/0308/計算 (Calculation).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nresult = [A+B, A-B]\nprint(max(result))\nprint(min(result))" }, { "alpha_fraction": 0.4357541799545288, "alphanum_fraction": 0.46927374601364136, "avg_line_length": 19, "blob_id": "da118e15c58a35e688249db65f79b7503af501c0", "content_id": "dfe52a50185ea1dd87e3cb19d65da228420e1272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 36, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/03/0326/경기 결과.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = [0, 0]\nfor n in range(N):\n A, B = map(int, input().split())\n if A > B:\n result[0] += 1\n elif B > A:\n result[1] += 1\nprint(*result)" }, { "alpha_fraction": 0.3196721374988556, "alphanum_fraction": 0.33811476826667786, "avg_line_length": 24.05128288269043, "blob_id": "df2adfc59d6279c57db076d78cbb8c134a8d52b5", "content_id": "7e085a366050e2dbd6ab58e3c61a8a0440f8c2a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 73, "num_lines": 39, "path": "/알고리즘/온라인저지/2021/12/1216/유기농 배추.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nT = int(input())\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nfor tc in range(T):\n M, N, K = map(int, input().split())\n\n field = [[0] * M for _ in range(N)]\n\n for k in range(K):\n x, y = map(int, input().split())\n field[y][x] = 1\n \n worm = deque()\n cnt = 0\n\n for i in range(N):\n for j in range(M):\n if field[i][j]:\n worm.append((i, j))\n cnt += 1\n while worm:\n tmp = worm.popleft()\n y = tmp[0]\n x = tmp[1]\n if field[y][x]:\n field[y][x] = 0\n else:\n continue\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if 0 <= nx < M and 0 <= ny < N and field[ny][nx]:\n worm.append((ny, nx))\n\n print(cnt)" }, { "alpha_fraction": 0.3085106313228607, "alphanum_fraction": 0.3297872245311737, "avg_line_length": 17.799999237060547, "blob_id": "4ff5d3dba6c60429d2c2323ec5be6b705a2432d2", "content_id": "ab1b6f613269ad0d268a17de920e8ef6520cd742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 30, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/12/1213/화성 수학.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor tc in range(T):\n a = input().split()\n b = float(a[0])\n\n for i in range(1, len(a)):\n if a[i] == '@':\n b *= 3\n elif a[i] == '%':\n b += 5\n elif a[i] == '#':\n b -= 7\n \n print('{:.2f}'.format(b))\n" }, { "alpha_fraction": 0.41516244411468506, "alphanum_fraction": 0.4332129955291748, "avg_line_length": 22.16666603088379, "blob_id": "251080aecbb4122b51c51c8418e11f1e887b0fb0", "content_id": "fdab24c692535f786919a4c7cc4f53816dfdef9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 37, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/01/0113/중복을 없애자.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n tmp = input()\n if tmp == '0': break\n tmp = list(map(int, tmp.split()))\n N, arr = tmp[0], tmp[1:]\n last = arr[0]\n print(last, end=' ')\n for a in arr[1:]:\n if a != last:\n last = a\n print(last, end=' ')\n print('$')" }, { "alpha_fraction": 0.5337837934494019, "alphanum_fraction": 0.6036036014556885, "avg_line_length": 20.14285659790039, "blob_id": "2b6965ac4e179e9e9303f4034351e1d2b6da808c", "content_id": "3e693c441e72d8b894f1d01f7fa1b4c18a476895", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 45, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/12/1213/주사위 세개.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = list(map(int, input().split()))\n\ndice_num_count = [0 for _ in range(6)]\n\nfor i in a:\n dice_num_count[i-1] += 1\n\nsame_or_max = 0\nresult = 0\n\nif 3 in dice_num_count:\n same_or_max = dice_num_count.index(3) + 1\n result = 10000 + same_or_max * 1000\nelif 2 in dice_num_count:\n same_or_max = dice_num_count.index(2) + 1\n result = 1000 + same_or_max * 100\nelse:\n same_or_max = max(a)\n result = same_or_max * 100\n\nprint(result)\n" }, { "alpha_fraction": 0.5522388219833374, "alphanum_fraction": 0.5671641826629639, "avg_line_length": 18.285715103149414, "blob_id": "4fbf180120aed0adb4f422f6ca1e9b774cb487dc", "content_id": "bab83934a4ed66fcd62cc3a56a40e16067f43293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/03/0320/Identifying tea.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nteas = list(map(int, input().split()))\nresult = 0\nfor tea in teas:\n if T == tea:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.3494318127632141, "alphanum_fraction": 0.3920454680919647, "avg_line_length": 31.090909957885742, "blob_id": "ccd8ef9cedfae2ac5436e5805ca534d1e973415c", "content_id": "7ca0f0131c32920e94ef35766056dc67218b9726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/05/0509/수학적 호기심.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(int(input())):\n N, M = map(int, input().split())\n result = 0\n for i in range(1, N-1):\n for j in range(i+1, N):\n # print((i**2 + j**2 + M)/(i*j))\n case1 = (i**2 + j**2 + M)/(i*j)\n case2 = (i**2 + j**2 + M)//(i*j)\n if case1 == case2:\n result += 1\n print(result)" }, { "alpha_fraction": 0.6030769348144531, "alphanum_fraction": 0.6123076677322388, "avg_line_length": 22.285715103149414, "blob_id": "759fb04e5626169f42c59e41c20fb3dd7661eb3c", "content_id": "2275b27bf4143e1a2eb6fa797b6e4800f365c77a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 50, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/09/0927/숫자 카드 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ninput().rstrip()\ncards = list(map(int, input().rstrip().split()))\ncount = dict()\nfor card in cards:\n try: count[card] += 1\n except: count[card] = 1\ninput().rstrip()\nfor i in list(map(int, input().rstrip().split())):\n try: print(count[i], end=' ')\n except: print(0, end=' ')" }, { "alpha_fraction": 0.4481012523174286, "alphanum_fraction": 0.4911392331123352, "avg_line_length": 22.294116973876953, "blob_id": "91b0453cf990e3e3f2b1fe56ebfbb62484cd6929", "content_id": "e94f931a39991219afe02f4e5837dfb7472d77ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/12/1223/추론.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def plus_or_multiply(A, B):\n if A == B: # 등차\n Q = nums[1]-nums[0]\n result = nums[-1]+Q\n else: # 등비\n Q = nums[1]//nums[0]\n result = nums[-1]*Q\n return result\n\nN = int(input())\nnums = [int(input()) for _ in range(N)]\nA, B = 0, 0\nif N == 3:\n A, B = nums[0]+nums[-1], nums[1]*2\nelse:\n A, B = nums[0]+nums[-1], nums[1]+nums[-2]\nprint(plus_or_multiply(A, B))" }, { "alpha_fraction": 0.4821428656578064, "alphanum_fraction": 0.5, "avg_line_length": 27.5, "blob_id": "5c8bd0d07688a36f8ecb58abe8e6a776e95cd13f", "content_id": "ac82ca538c24ebbf6292e8741ff37b0402f35508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/04/0424/폰 노이만과 파리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S, T, D = map(int, input().split())\nprint((D//S//2) * T)" }, { "alpha_fraction": 0.4094955623149872, "alphanum_fraction": 0.4540059268474579, "avg_line_length": 16.789474487304688, "blob_id": "9c5d06eb0c1943cd39e4e18b4f70b58959cb597a", "content_id": "f2ec5b66bf341f4649c2d791ebbfbcd484fd84ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 6.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "n = int(input())\n\n# result = 0\n# dim = 3\n\n# for i in range(1, n-1):\n# for j in range(i+1, n):\n# for k in range(j+1, n+1): \n# result += 1\n# print(i, j, k)\n\nresult = 0\ndim = 3\n\nfor i in range(1, n-1):\n # result += sum(list(range(1, i+1)))\n result += i * (n-i-1)\n\nprint(result, f'\\n{dim}', sep='')" }, { "alpha_fraction": 0.4348958432674408, "alphanum_fraction": 0.4479166567325592, "avg_line_length": 13.259259223937988, "blob_id": "8b7b4b18f4032998f59adf6c1e580f452fc3f498", "content_id": "f658042b74e1a8f255b8ed6e533455fbc945b3c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 37, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/02/0207/오르막길.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\narr = list(map(int, input().split()))\n\nstart = arr[0]\n\nup = True\n\nmaxx = 0\n\nfor i in range(1, N):\n if arr[i] <= arr[i-1]:\n up = False\n tmp = arr[i-1] - start\n start = arr[i]\n \n if tmp > maxx:\n maxx = tmp\n\n else:\n up = True\n\nif up:\n if arr[i]-start > maxx:\n maxx = arr[i]-start\n \nprint(maxx)" }, { "alpha_fraction": 0.49643704295158386, "alphanum_fraction": 0.5344418287277222, "avg_line_length": 24.545454025268555, "blob_id": "134846b54d424bd59d3f64a5536af70262f2ebc3", "content_id": "b149304ffa5aef723d5f145c149da59900d4f02a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/02/0225/마인크래프트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys # 주어진 시간이 적은 문제\n# sys.stdin.readline()과 PyPy3을 사용\n\nN, M, B = map(int, sys.stdin.readline().split())\n\narr = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n\ntime, height = 9223372036854775807, 0 # 가상의 최대시간, 높이\n\nfor h in range(257): # 브루트포스, 모든 높이에 대한 소요시간과 높이 계산\n # 동일 시간인 경우 더 높은 높이를 출력할 것이므로 \n # 높이는 0에서 256으로 진행\n\n bot = top = 0 # 변수 초기화\n\n # 매 높이 h에 대하여, 모든 배열 순회\n for i in range(N):\n for j in range(M):\n if arr[i][j] < h: # 블럭을 채워야하면\n bot += h-arr[i][j] # bot에 추가\n else: # 블럭을 깎아야하면\n top += arr[i][j]-h # top에 추가\n\n if bot > top + B: # 가진블럭 + 깎은블럭으로도 다 채우지 못하면\n continue # 지나가고\n\n t = bot + top*2 # 걸리는 시간은 채우기1초 깎기2초를 총 더한 시간\n\n if t <= time:\n time = t # 최소시간 갱신\n height = h # 최대높이 갱신\n\nprint(time, height) # 출력" }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.5696969628334045, "avg_line_length": 26.66666603088379, "blob_id": "e8ee647a1a00164d47558a516edd1dec4e7ea06d", "content_id": "1d775d2d67e335ffa60c0be74027590b1fa9ac68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/06/0606/Fence Painting.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nC, D = map(int, input().split())\nwall = [0]*101\nfor i in range(A, B): wall[i] = 1\nfor i in range(C, D): wall[i] = 1\nprint(sum(wall))" }, { "alpha_fraction": 0.29209622740745544, "alphanum_fraction": 0.3161512017250061, "avg_line_length": 19.85714340209961, "blob_id": "475d7aff513dc9ddfd9772683a4febd0d1ce7427", "content_id": "e3eab673f3146b5ac2746c855c2c24f281e7201f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 27, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/03/0329/집 주소.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N = list(input())\n if N == ['0']:\n break\n else:\n result = 1\n for n in N:\n if n == '1':\n result += 3\n elif n == '0':\n result += 5\n else:\n result += 4\n print(result)" }, { "alpha_fraction": 0.5138888955116272, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 17.25, "blob_id": "8792bac9a564e78f090d21bac5de4cc6e676d3d6", "content_id": "822eb4358aa1c85c2a915318706ee3b67efb5ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/42차시 4. 문자열 - 연습문제 7.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "tmp = input()\n\nfor i in range(0, len(tmp), 2):\n print(tmp[i], end='')" }, { "alpha_fraction": 0.4774305522441864, "alphanum_fraction": 0.49884259700775146, "avg_line_length": 25.196969985961914, "blob_id": "3f5af3f94d5139142baf97e9d01655e9d2294b60", "content_id": "c4df36605e424a20b4ca08134b93bef5a9ed3abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2024, "license_type": "no_license", "max_line_length": 66, "num_lines": 66, "path": "/알고리즘/[템플릿]/BFS/치즈 업그레이드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef melt_check():\n global time\n melt = True\n for i in range(N):\n for j in range(M):\n if arr[i][j]:\n melt = False\n if not melt: time += 1\n return melt\n\ndef delta_move(y, x):\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<N and 0<=nx<M and not visited[ny][nx]:\n if arr[ny][nx]:\n cheese_Q.append((ny, nx))\n visited[ny][nx] = 2 # 방문 배열에 2는 치즈\n # 치즈를 녹일 때 주위에 1(공기)이 2개 이상 있으면 녹일 것\n else: \n Q.append((ny, nx))\n visited[ny][nx] = 1 # 방문 배열에 1은 공기\n\ndef melt_cheese(y, x):\n tmp = 0\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if visited[ny][nx] == 1: tmp += 1\n if tmp >= 2:\n melt_Q.append((y, x))\n else: visited[y][x] = 0\n # 치즈 배열의 특성상 반드시 두 면 이상 공기와 접촉하는 치즈가 있고\n # 녹은 치즈, 즉 새로운 탐색점은, 매 while문마다 반드시 나온다\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\nQ, cheese_Q, melt_Q = deque(), deque(), deque()\nvisited = [[0]*M for _ in range(N)]\nQ.append((0, 0))\nvisited[0][0] = 1\ntime = 0\nwhile True:\n if melt_check(): print(time); break\n while Q: # 공기를 탐색하면서\n y, x = Q.popleft()\n delta_move(y, x) # 치즈를 찾자\n while cheese_Q: # 치즈들 중에서\n y, x = cheese_Q.popleft()\n melt_cheese(y, x) # 녹일 수 있는 치즈를 찾자\n while melt_Q: # 녹일 수 있는 치즈들을\n y, x = melt_Q.popleft()\n arr[y][x] = 0 # 녹이자\n visited[y][x] = 1\n Q.append((y, x))\n \n\"\"\"\n치즈가 녹은 자리는 공기가 된다\n녹이고 Q에 추가하기\n\"\"\"\n\n# https://www.acmicpc.net/problem/2638" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 23.22222137451172, "blob_id": "dff9cf1226079b0c6fd26d73b7450656c02d5192", "content_id": "4c134be02501a7be8383fc9dd8b36565998b3b1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/알고리즘/[템플릿]/진법 변환/진법 변환 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import string\n\nN, B = map(int, input().split())\ntmp = string.digits + string.ascii_uppercase # 자릿수를 담은 문자열\nresult = ''\nwhile N:\n result = tmp[N%B] + result # B진법이므로 B로 나눈 나머지번째 문자를 계속 추가한다\n N //= B\nprint(result)" }, { "alpha_fraction": 0.41310539841651917, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 17.421052932739258, "blob_id": "b4b42aa6602a741971957249b50e815fb0eaaf93", "content_id": "8f8d39dc6115fdcbed96e262bb7447d74cbb1f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 41, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/08/0806/알람 시계.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n구하고자 하는 건 45분 전\n분에 15분을 더하고\n시간에 한시간을 빼고\n60분을 넘기면 60을 빼주고\n24시간을 넘기면 24를 빼주고\n\"\"\"\n\nH, M = map(int, input().split()) # 입력 받아서\nM += 15 # 15분 더하고\nH -= 1 # 1시간 빼고\nif M >= 60: # 60분을 넘기면\n M -= 60 # 60빼주고 0분부터\n H += 1 # 1시간 더하고\nif H >= 24: # 24시간 넘기면 0시부터\n H -= 24\nif H == -1: # 계산을 마친 시간이 23시면\n H = 23 # 계산값인 -1을 23으로 덮어 씌우기\nprint(H, M) # 출력\n\n" }, { "alpha_fraction": 0.4157303273677826, "alphanum_fraction": 0.483146071434021, "avg_line_length": 29, "blob_id": "b9e666a91d43b43f0a5bd75f65704f7fb84ea42f", "content_id": "9e0cecd9cad8f0eb13dfa12b175911b4f9acff8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/05/0502/꼬리를 무는 숫자 나열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nC, D = A-1, B-1\nprint(abs(C%4 - D%4) + abs(C//4 - D//4))" }, { "alpha_fraction": 0.5511363744735718, "alphanum_fraction": 0.5994318127632141, "avg_line_length": 28.375, "blob_id": "2627af9b9bc41cb439e32069057b8b8f9c2ccb95", "content_id": "32c9ba503606df4f8788e4e309c64e522640511c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 67, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/10/1004/동전 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().rstrip().split())\ncoins = [int(input().rstrip()) for _ in range(N)]\ndp = [0]*(K+1)\ndp[0] = 1 # 0원을 지불하는 방법은, 아무것도 내지 않는 경우 딱 하나\nfor coin in coins: # 각각 동전 종류에 대해\n for i in range(1, K+1): # 1원을 내는 경우부터 출발\n if i-coin>=0: # 음수의 금액을 낸 경우는 존재하지 않으므로, 0원을 낸 경우 이상부터 확인\n dp[i] += dp[i-coin] # 해당 금액의 동전을 내기 전의 경우의 수 만큼을 더해준다\nprint(dp[K])\n\n\"\"\"\n예제는 동전의 종류가 1 2 5 이므로 1원동전 일 때, dp가 1, 1, 1, ... 이 되기 때문에 헷갈릴 수 있으나\n만약 2 5 7과 같은 종류였다면\nfor coin in coins 1회차에 dp는 \n1, 0, 1, 0, 1, 0, 1 ... 과 같이 저장되었을 것이다\n계산된 값(dp[i])를 실시간으로 반영하면서 line 10의 for문을 돌기 때문에\ndp[i] += dp[i-coin] 만으로도 정답코드가 될 수 있다\n\"\"\"\n\n# https://www.acmicpc.net/problem/2293" }, { "alpha_fraction": 0.5011600852012634, "alphanum_fraction": 0.5127609968185425, "avg_line_length": 29.821428298950195, "blob_id": "cbf3a204b5a3f9bf35bc1a275a5ec9e947d3e22b", "content_id": "8aa1af75b83181076c166cafdc62fb666c1cbb10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/04/0430/프린터 큐.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ndef input():\n return sys.stdin.readline()\n\nfor t in range(int(input())):\n result = 1 # 인쇄 순서 초기화\n N, M = map(int, input().split())\n tmp = list(map(int, input().split())) # 문서 중요도 입력\n docs = deque() # popleft() 사용할 것\n i = 0 # 문서의 순서\n for tm in tmp:\n docs.append((tm, i)) # 현재 문서의 (중요도, 순서) 를 담은 리스트\n i += 1\n # print(docs) # 디버깅\n while docs: # \n tmp = [] # 문서의 중요도들만을 담을 리스트\n for i in range(len(docs)):\n tmp.append(docs[i][0])\n if docs[0][0] != max(tmp): # 가장 중요도가 높은 문서가 아니라면\n docs.append(docs.popleft()) # 해당 문서를 가장 뒤로 이동\n else: # 가장 중요도가 높은 문서라면\n if M == docs[0][1]: # 구하고자 하는 인쇄순서의 문서라면\n break # 종료하고\n docs.popleft() # 인쇄\n result += 1 # 인쇄순서 +1\n print(result) # 현재 인쇄순서 번호를 출력" }, { "alpha_fraction": 0.41818180680274963, "alphanum_fraction": 0.4561983346939087, "avg_line_length": 16.257143020629883, "blob_id": "508d18ffc92b103f069b90ad4d80b2c31603390c", "content_id": "a78d07f40fc64425b5c0d96e38579f3739e8a212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 56, "num_lines": 35, "path": "/알고리즘/온라인저지/2021/12/1216/미로 탐색.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nN, M = map(int, input().split())\nmaze = [list(map(int, input())) for _ in range(N)]\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nroutes = deque()\n\nroutes.append((0, 0, 0))\n\nwhile routes:\n tmp = routes.popleft()\n y = tmp[0]\n x = tmp[1]\n cnt = tmp[2]\n \n if y == N-1 and x == M-1:\n print(cnt + 1)\n break\n\n if maze[y][x] == 0:\n continue\n\n maze[y][x] = 0\n\n cnt += 1\n\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n\n if 0 <= ny < N and 0 <= nx < M and maze[ny][nx]:\n routes.append((ny, nx, cnt))\n\n" }, { "alpha_fraction": 0.45255473256111145, "alphanum_fraction": 0.48175182938575745, "avg_line_length": 14.333333015441895, "blob_id": "f007621dd739effddcf425885143f52566fd86ab", "content_id": "c8e8246694df9a3e8bbf4bd3ffa08b73d536bafb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 21, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/05/0517/지속.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = input()\nresult = 0\nwhile len(N) > 1:\n tmp = 1\n for n in N:\n tmp *= int(n)\n N = str(tmp)\n result += 1\nprint(result)" }, { "alpha_fraction": 0.417391300201416, "alphanum_fraction": 0.426086962223053, "avg_line_length": 18.33333396911621, "blob_id": "76820257f4dfa2cbf3850f3e8c84abaf0884dfd7", "content_id": "ab80de87cf6144c80ea7de0a93e23f42b78e5afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/08/0827/Shares.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n try:\n N, S = map(int, input().split())\n N += 1\n print(S//N)\n except: exit()" }, { "alpha_fraction": 0.36915886402130127, "alphanum_fraction": 0.4579439163208008, "avg_line_length": 27.566667556762695, "blob_id": "cba2c47b9b09ca8fb1dc5e5d78700695801ee29c", "content_id": "01cd0a77d818568e1f0410012619e3f1abe738ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1242, "license_type": "no_license", "max_line_length": 59, "num_lines": 30, "path": "/알고리즘/온라인저지/2021/08/0813/ATM.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input()) # 사람 수\nusers = list(map(int, input().split())) # 각자 ATM 이용에 걸리는 시간\n\"\"\"\n1 2 3 4 5 번째 사람이\n3 1 4 3 2 분이 걸린다 하면\n\n1 2 3 4 5 순으로 가면\n1번은 3분 걸리고\n2번은 3분걸린 1번에 얹어서 1분걸려서 총 4분\n3번은 앞에서 걸린 4분 + 본인 4분 = 8분\n4번은 앞사람 8분 + 본인 3분\n결국 앞에서 걸린 시간들이 길수록 뒤에도 늦어진다는거\n용무가 빨리 끝나는 사람 먼저 하는게 핵심\n걸리는 시간이 짧은순으로 정렬하고 볼일 보게 하는 것\n1 = 1\n1+2 = 3\n1+2+3 = 6\n1+2+3+3 = 9\n1+2+3+3+4 = 13\n1+3+6+9+13 = 32\n1 (1+2) (1+2+3) (1+2+3+3) (1+2+3+3+4)\n\"\"\"\nusers.sort() # 걸리는 시간들을 정렬\nresult = 0 # 결과값을 초기화\nfor n in range(N): # 사람수만큼 반복\n wait_time = 0 # 본인의 대기시간\n for j in range(n+1): # 앞사람들+본인 시간만큼 반복\n wait_time += users[j] # 대기+사용시간 구해서\n result += wait_time # 다 집어넣고\nprint(result) # 출력" }, { "alpha_fraction": 0.42975205183029175, "alphanum_fraction": 0.4793388545513153, "avg_line_length": 23.399999618530273, "blob_id": "065c00350c9b4cb1db77bd209e2f9beb09307995", "content_id": "9c465cc04a6ff091fdc5bfd38bbb5001a7dd8c9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/12/1208/A나누기B - 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = map(int, input().split())\nprint(f'{A//B}.', end='')\nfor _ in range(1001):\n A = (A%B)*10\n print(A//B, end='')" }, { "alpha_fraction": 0.5166666507720947, "alphanum_fraction": 0.5366666913032532, "avg_line_length": 29.100000381469727, "blob_id": "c8430d9db43435a971d86ffb2b18d07773e6c57e", "content_id": "e39b10aeae30802284a37019b6b26d233003c667", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/07/0731/기타줄.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nS_min = P_min = 1e9 # set min, piece min\nfor m in range(M):\n S, P = map(int, input().split()) # set, piece\n if S < S_min: S_min = S\n if P < P_min: P_min = P\n if P*6 < S_min: S_min = P*6\nresult = N//6 * S_min\nresult += min(N%6 * P_min, S_min)\nprint(result)" }, { "alpha_fraction": 0.4893617033958435, "alphanum_fraction": 0.5319148898124695, "avg_line_length": 22.66666603088379, "blob_id": "3b50934dea7449c34c89b0d094d260ab1a462aef", "content_id": "541df3e0a30eed520471d3529c261ed2df129a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/04/0411/ZOAC 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "H, W, N, M = map(int, input().split())\nresult = 0\nfor i in range(0, H, 1+N):\n for j in range(0, W, 1+M):\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5049999952316284, "alphanum_fraction": 0.5400000214576721, "avg_line_length": 25.733333587646484, "blob_id": "c79d50d3e6ac5c7eaa7352f748699c1069cc093a", "content_id": "39c6e3e10549a9dc8e1fad269e4aea21109a0a94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/07/0731/수 찾기(재귀함수).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def binary(t, s, e): # target, start, end\n if s > e: return 0\n i = (s+e) // 2 # index\n if t == arr1[i]: return 1\n elif t < arr1[i]: return binary(t, s, i-1)\n elif t > arr1[i]: return binary(t, i+1, e)\n\nN = int(input())\narr1 = list(map(int, input().split()))\narr1.sort()\nM = int(input())\narr2 = list(map(int, input().split()))\nfor a in arr2:\n s, e = 0, N-1\n print(binary(a, s, e))" }, { "alpha_fraction": 0.40532544255256653, "alphanum_fraction": 0.4289940893650055, "avg_line_length": 20.1875, "blob_id": "0e17b301b3d8c3a2d0cdeb9b878ccfa20d33216f", "content_id": "2aead024c4e8a4eab8f908f2cd3fff1180f09680", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/07/0704/반복.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input()\nalphabet = [chr(i+97) for i in range(26)]\n# print(alphabet)\nresult = 0\nword = ''\ns = 0 # S index\nwhile True:\n result += 1\n for a in alphabet:\n if a == S[s]:\n word += a\n if word == S:\n print(result)\n exit()\n s += 1\n # print(result, word, s)" }, { "alpha_fraction": 0.3913043439388275, "alphanum_fraction": 0.52173912525177, "avg_line_length": 22.33333396911621, "blob_id": "e4e29cb33bbfdd75c644b62ff6fc30f8b551ee6b", "content_id": "431dfd8828cc7c33898ef962f514d28a5a79e3f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/04/0418/수학은 체육과목 입니다 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfinger = [2, 1, 2, 3, 4, 5, 4, 3]\nprint(finger[N%8])" }, { "alpha_fraction": 0.5177664756774902, "alphanum_fraction": 0.6142131686210632, "avg_line_length": 16.954545974731445, "blob_id": "fcd61c19e56c5cfef43e1661a35d062b4416afb9", "content_id": "7cb84463704f7d999efb4317c887bda85caae2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 45, "num_lines": 22, "path": "/알고리즘/온라인저지/2022/10/1003/피보나치 수의 확장.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\nmod = int(1e9)\n\nN = int(input().rstrip())\na, b = 1, 1\nfor _ in range(abs(N)-2): a, b = b, (a+b)%mod\nif abs(N)%2 == 0 and N < 0: print(-1)\nelif N == 0: print(0)\nelse: print(1)\nprint(b if N != 0 else 0)\n\n\"\"\"\n문제를 잘 보자\n1,000,000,007로 나누는 문제가 많았다고 해서\n모든 문제들이 그런 것은 아니다\n1,000,000,000으로 나누면 되는걸로 \n문제 제대로 안읽고 몇분을 날린거냐..\n\"\"\"\n\n# https://www.acmicpc.net/problem/1788" }, { "alpha_fraction": 0.37172773480415344, "alphanum_fraction": 0.39921465516090393, "avg_line_length": 17.658536911010742, "blob_id": "e340b6f4c49aeea2871780825f7af9074cef35c7", "content_id": "fed5d49d327fe124e15ad8be28c4c1643b408be6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "no_license", "max_line_length": 31, "num_lines": 41, "path": "/알고리즘/온라인저지/2021/09/0908/큐 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\n\n\nN = int(input())\nq = [0] * N\npnt = 0\ncnt = 0\nback = 0\nfor n in range(N):\n order = input().split()\n if order[0] == 'push':\n q[back] = order[1]\n cnt += 1\n back += 1\n elif order[0] == 'pop':\n if cnt:\n print(q[pnt])\n pnt += 1\n cnt -= 1\n else:\n print(-1)\n elif order[0] == 'size':\n print(cnt)\n elif order[0] == 'empty':\n if cnt:\n print(0)\n else:\n print(1)\n elif order[0] == 'front':\n if cnt:\n print(q[pnt])\n else:\n print(-1)\n elif order[0] == 'back':\n if cnt:\n print(q[back-1])\n else:\n print(-1)" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 18, "blob_id": "cafc1e157af92c1dfa09543d90da7dd236ab9fe4", "content_id": "cd06dd07209eb7039f52d94eb25d08722ea5524d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0207/이진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n N = int(input())\n\n binary = (str(bin(N))[::-1])[:-2]\n\n for i in range(len(binary)):\n if binary[i] == '1':\n print(i, end=' ')" }, { "alpha_fraction": 0.5568862557411194, "alphanum_fraction": 0.5868263244628906, "avg_line_length": 32.599998474121094, "blob_id": "ea688bd50e5b123fa479f656e198dbb9129ef58f", "content_id": "c49fc1d3f2971dc6dad6541ce95840b93d96734b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/09/0905/빵.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = int(1e9)\nfor _ in range(int(input())):\n A, B = map(int, input().split())\n if A <= B: result = min(result, B)\nprint(-1 if result == int(1e9) else result)" }, { "alpha_fraction": 0.46084338426589966, "alphanum_fraction": 0.5391566157341003, "avg_line_length": 25.600000381469727, "blob_id": "ce1ce4897f4ab2d1f79e8a958dda10859bdbc402", "content_id": "f62084d06d58623937723983c29c98cbc9a6d6ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 52, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/09/0922/주사위 네개.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nresult = 0\nfor _ in range(int(input().rstrip())):\n dices = list(map(int, input().rstrip().split()))\n count_arr = [0]*11\n for dice in dices: count_arr[dice] += 1\n tmp = 0\n if 4 in count_arr:\n tmp += 50000 + count_arr.index(4)*5000\n elif 3 in count_arr:\n tmp += 10000 + count_arr.index(3)*1000\n elif count_arr.count(2) == 2:\n tmp += 2000\n for i in range(11):\n if count_arr[i] == 2: \n tmp += i*500\n elif 2 in count_arr:\n tmp += 1000 + count_arr.index(2)*100\n else: \n tmp += max(dices)*100\n result = max(result, tmp)\nprint(result)" }, { "alpha_fraction": 0.4392419159412384, "alphanum_fraction": 0.47380155324935913, "avg_line_length": 27.90322494506836, "blob_id": "911187260355f4ee89b7ee384980b118d7fc96eb", "content_id": "46add3df2b8269ea76ee904ac864095dec6072ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 70, "num_lines": 31, "path": "/알고리즘/온라인저지/2021/08/0816/소수&팰린드롬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nN보다 큰 수를 하나씩 올려가면서\n소수들로 나눠서 나머지가 있으면서 소수본인과 같아지면 소수\n2~N까지의 소수들을 먼저 리스트에 추가\nN+1이 소수인지 보고 소수이면 리스트에 추가\n소수일 때 팰린드롬인지 보고\n팰린드롬이면 출력\n\"\"\"\nimport sys\nN = int(sys.stdin.readline())\n\n # 1003001까지 에라토스테네스의 체로 소수 전부 골라내기\nn = 1003003 # 조금 편법이긴 한데\n # 최대값이 1003001이라는 걸 계산해서 먼저 알아냄\na = [False,False] + [True]*(n-1)\nprimes=[]\nfor i in range(2,n+1):\n if a[i]:\n primes.append(i)\n for j in range(2*i, n+1, i):\n a[j] = False\n\nprime_palindrome = 0 # 소수팰린드롬 초기값\nwhile True:\n if str(N) == str(N)[::-1]: # 팰린드롬이고\n if N in primes: # 소수이면\n prime_palindrome = N # 소수팰린드롬\n if prime_palindrome: # 값을 찾았다면\n print(prime_palindrome) # 출력하고\n break # while문 종료\n N += 1 # 다음 숫자 탐색\n\n" }, { "alpha_fraction": 0.6336206793785095, "alphanum_fraction": 0.6336206793785095, "avg_line_length": 22.299999237060547, "blob_id": "edd95ff0cbaec0c87f19f1db698602338d2d9085", "content_id": "e58b68684eb8a8014addfd54d13b2a4f2291b5ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 47, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/10/1001/비밀번호 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\ndct = dict()\nfor n in range(N):\n site, password = input().rstrip().split()\n dct[site] = password\nfor m in range(M): print(dct[input().rstrip()])" }, { "alpha_fraction": 0.4627249240875244, "alphanum_fraction": 0.519280195236206, "avg_line_length": 15.956521987915039, "blob_id": "ead0b7748c82b654bfd944226466a9213ff92c62", "content_id": "d9d50c1624f805552b25f2227184c45e102a9327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 41, "num_lines": 23, "path": "/알고리즘/온라인저지/2023/02/0219/8진수, 10진수, 16진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def base_change(num, base):\n base_str = '0123456789abcdef'\n answer = 0\n power = 1\n\n for n in num[::-1]:\n answer += power*base_str.index(n)\n power *= base\n\n return answer\n\nX = input()\nresult = 0\n\nif X[0] == '0':\n if X[1] == 'x':\n result = base_change(X[2:], 16)\n else:\n result = base_change(X[1:], 8)\nelse:\n result = int(X)\n\nprint(result)" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5413534045219421, "avg_line_length": 12.399999618530273, "blob_id": "ef58bb5673ede891532858bcfb1d6a7c7ad68168", "content_id": "1fc003976a6a36c30f10d9988ef4ab27d8e8b564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0201/10부제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "D = int(input())\ncars = list(map(int, input().split()))\n\nresult = 0\n\nfor c in cars:\n if c == D:\n result += 1\n\nprint(result)" }, { "alpha_fraction": 0.34730538725852966, "alphanum_fraction": 0.371257483959198, "avg_line_length": 14.272727012634277, "blob_id": "fa1f12abc76025fb3a0ed5bf32cedfa84201022e", "content_id": "74bf51922582febe29cb9bcbe4d7cf6f3c45bb02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/02/0201/별 찍기 - 12.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\ni = 0\n\nfor n in range(N):\n i += 1\n print(' ' * (N-i), '*' * i, sep='')\n\nfor n in range(N-1):\n i -= 1\n print(' ' * (N-i), '*' * i, sep='')" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.41777777671813965, "avg_line_length": 12.636363983154297, "blob_id": "97790eb978067567390ed0b644fce974bda74e98", "content_id": "b10183d82a7868d41708f850016ec836892b272f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 49, "num_lines": 33, "path": "/알고리즘/온라인저지/2022/02/0221/DNA.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\nDNAs = [input() for _ in range(N)]\n\ndictt = {\n 'A': 0,\n 'C': 1,\n 'G': 2,\n 'T': 3,\n 0: 'A',\n 1: 'C',\n 2: 'G',\n 3: 'T',\n}\n\nHD = 0\n\nfor i in range(M):\n\n table = [0] * 4\n\n for d in DNAs:\n table[dictt[d[i]]] += 1\n \n for j in range(4):\n if j != table.index(max(table)):\n HD += table[j]\n\n print(dictt[table.index(max(table))], end='')\n\nprint()\n\nprint(HD)\n" }, { "alpha_fraction": 0.4709418714046478, "alphanum_fraction": 0.4809619188308716, "avg_line_length": 30.25, "blob_id": "78f02a057dbe3aa4527c60b65603a00c70e262ea", "content_id": "b748efd8c15369276385a8033a42915cb3265835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/04/0417/그룹 단어 체커.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0 # 그룹단어개수 초기화\nfor n in range(N):\n word = input()\n char = [] # 사용한 글자\n for i in range(len(word)): # 단어를 순회하면서\n if word[i] not in char: # 사용한 적 없는 글자라면\n char.append(word[i]) # 사용한 글자에 추가\n else: # 사용한 적 있는 글자라면\n if word[i] != word[i-1]: # 앞글자와 다르다면\n result -= 1 # 그룹단어가 아닙니다\n # 매 케이스 그룹단어라고 가정하고\n # 그룹단어가 아닐 경우 -1 하였음\n break\n result += 1 # 그룹단어입니다!\nprint(result) # 출력" }, { "alpha_fraction": 0.5698529481887817, "alphanum_fraction": 0.5698529481887817, "avg_line_length": 23.81818199157715, "blob_id": "876d6bf33832bd19f360b42f30f25c95e290c899", "content_id": "b189945214f5559c580b9a37361816a49aa12c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 89, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/12/1211/한국이 그리울 땐 서버에 접속하지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nstart, end = input().split('*')\nfor n in range(N):\n file = input()\n if len(start) + len(end) > len(file): print('NE')\n else: print('DA' if start == file[:len(start)] and end == file[-len(end):] else 'NE')\n\n\"\"\"\nabc*def\n와 같은 경우가 테스트케이스에 포함되어 있음\n\"\"\"" }, { "alpha_fraction": 0.5026881694793701, "alphanum_fraction": 0.5188171863555908, "avg_line_length": 15, "blob_id": "a58fe6e007d81533895bf19a6e6781413c0fcce6", "content_id": "f0ffff53cae92d33fede0762006c960a38867b88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 38, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/01/0122/꿀 아르바이트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\n# 급여 정보 입력\npays = list(map(int, input().split()))\n\ns = 0 # 창문의 시작과\ne = M-1 # 끝\n\n# 결과값과 윈도우값 초기화\nresult = window = sum(pays[s:e+1])\n\nwhile e < N-1: # 윈도우 이동을 종료하는 조건\n # 윈도우 한 칸 이동~~\n window -= pays[s]\n s += 1\n e += 1\n window += pays[e]\n\n # 결과값을 갱신할 지 여부\n if window > result:\n result = window\n\nprint(result)\n " }, { "alpha_fraction": 0.4035087823867798, "alphanum_fraction": 0.46929824352264404, "avg_line_length": 14.266666412353516, "blob_id": "731e667936da3095cd52640ed7179775635e5944", "content_id": "7d06fc54a223537fc4fad4b9b2f1f203f5cd14bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 31, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/09/0906/막대기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nX = int(input())\nbars = [64, 32, 16, 8, 4, 2, 1]\ni = 0\ncnt = 0\nwhile X != 0:\n if X >= bars[i]:\n X -= bars[i]\n cnt += 1\n else:\n i += 1\nprint(cnt)" }, { "alpha_fraction": 0.4975247383117676, "alphanum_fraction": 0.5272276997566223, "avg_line_length": 14.576923370361328, "blob_id": "dfc24364b20040990949308aa5852bae807e5b3f", "content_id": "6c52305f42e3332124184c50bb3f70abbe46c86e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 36, "num_lines": 26, "path": "/알고리즘/온라인저지/2021/12/1230/트리의 부모 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nsys.setrecursionlimit(99999)\n\ndef dfs(i):\n for n in nodes[i]:\n if not parent[n]:\n parent[n] = i\n dfs(n)\n\nN = int(input())\n\nnodes = [[] for _ in range(N+1)]\n\nparent = [0 for _ in range(N+1)]\nparent[1] = 'root'\n\nfor n in range(N-1):\n a, b = map(int, input().split())\n \n nodes[a].append(b)\n nodes[b].append(a)\n\ndfs(1)\n\nfor p in parent[2:]:\n print(p)" }, { "alpha_fraction": 0.5570470094680786, "alphanum_fraction": 0.5682326555252075, "avg_line_length": 23.88888931274414, "blob_id": "a276d39976f25f1f7cb350cfb30af811b9325068", "content_id": "f7d805244a527024269a754658618f2f8ea33651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/10/1019/풍선 터뜨리기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nN = int(input().rstrip())\narr = list(map(int, input().rstrip().split()))\nQ = deque()\nfor i in range(1, N+1): Q.append((i, arr[i-1]))\nwhile True:\n idx, move = Q.popleft()\n print(idx, end=' ')\n if not Q: break\n if move > 0:\n for i in range(move-1):\n Q.append(Q.popleft())\n else:\n for i in range(abs(move)):\n Q.appendleft(Q.pop())" }, { "alpha_fraction": 0.31734317541122437, "alphanum_fraction": 0.361623615026474, "avg_line_length": 37.85714340209961, "blob_id": "f5495119abf703f7eeee40c46ac4ad3355f0cd7f", "content_id": "752ef90d9e2a0c5b37c24f7ab61da0c6f0e552f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 78, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/12/1217/완전 세제곱.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for a in range(1, 101):\n for b in range(2, a+1):\n for c in range(b, a+1):\n for d in range(c, a+1):\n tmp = b**3 + c**3 + d**3\n if a**3 == tmp:\n print('Cube = {}, Triple = ({},{},{})'.format(a, b, c, d))" }, { "alpha_fraction": 0.443113774061203, "alphanum_fraction": 0.455089807510376, "avg_line_length": 17.66666603088379, "blob_id": "8df5848801d4997d502a09fcfbe63e978e101da0", "content_id": "e5133575edc97696fb8e657da5e632335f7e3f2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/07/0711/모음의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n sen = input()\n if sen == '#':\n break\n result = 0\n for s in sen:\n if s in 'aeiouAEIOU':\n result += 1\n print(result)" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5614035129547119, "avg_line_length": 37.33333206176758, "blob_id": "cf0097bc7b5b4c37e402f2f46887d6aaa5d959d7", "content_id": "a20b7ae9ad16b39980cec2fcbcb4594378be7714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/10/1012/팰린드롬.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(int(input().rstrip())):\n word = input().lower()\n print('Yes' if word == word[::-1] else 'No')" }, { "alpha_fraction": 0.38063910603523254, "alphanum_fraction": 0.4530075192451477, "avg_line_length": 18.72222137451172, "blob_id": "0e25e4efe8363e8e2295cce1c75e8b2dfc442a42", "content_id": "e32a895d2dd5d59808111c94f030a3faf7b8c1a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 48, "num_lines": 54, "path": "/알고리즘/온라인저지/2022/03/0314/구간 합 구하기 5.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nN, M = map(int, input().split())\n\narr = []\n\ntmp = list(map(int, input().split()))\nnum = 0\nline = []\nfor t in tmp:\n num += t\n line.append(num)\narr.append(line)\n\nfor n in range(1, N):\n tmp = list(map(int, input().split()))\n num = 0\n line = []\n for t in tmp:\n num += t\n line.append(num)\n for i in range(len(line)):\n line[i] += arr[-1][i]\n arr.append(line)\n \n\nfor m in range(M):\n y1, x1, y2, x2 = (map(int, input().split()))\n e = v = h = s = 0\n if x1 > 1 and y1 > 1 and x2 > 1 and y2 > 1:\n e = arr[y2-1][x2-1]\n v = arr[y2-1][x1-2]\n h = arr[y1-2][x2-1]\n s = arr[y1-2][x1-2]\n elif x1 == 1 and y1 > 1:\n e = arr[y2-1][x2-1]\n h = arr[y1-2][x2-1]\n elif y1 == 1 and x1 > 1:\n e = arr[y2-1][x2-1]\n v = arr[y2-1][x1-2]\n elif x1 == 1 and y1 == 1:\n e = arr[y2-1][x2-1]\n\n result = e - v - h + s\n print(result)\n\n# for a in arr:\n# print(*a)\n\n\"\"\"\n2 2 3 4 = [3][4]-[3][1]-[1][4]+[1][1]\n\"\"\"" }, { "alpha_fraction": 0.5176848769187927, "alphanum_fraction": 0.5562701225280762, "avg_line_length": 25, "blob_id": "e65f38fbd9690cfda2b93a5c0291c17f365b97e4", "content_id": "6787ada15675778039e888a163929f50f3488770", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 44, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/09/0921/Strfry.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n A, B = input().rstrip().split()\n A_arr, B_arr = [0]*26, [0]*26\n for a in A: A_arr[ord(a)-97] += 1\n for b in B: B_arr[ord(b)-97] += 1\n result = 'Possible'\n if A_arr != B_arr: result = 'Impossible'\n print(result)" }, { "alpha_fraction": 0.53665691614151, "alphanum_fraction": 0.5601173043251038, "avg_line_length": 37, "blob_id": "77d11479a7e502a243fdf9787b7cbb5e533f8ad6", "content_id": "cc266ae202472bc8d2cbe517a808d6ff8752833b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 55, "num_lines": 9, "path": "/알고리즘/온라인저지/2021/08/0803/알파벳 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "S = input() # 단어를 입력받아서\nresult = [-1] * 26 # 결과값을 -1 26개로 초기화해주고 \nfor s in S: # 알파벳을 순회하면서 \n for i in range(len(result)): # 그리고 result를 순회하면서\n if s == chr(97+i): # 단어의 해당 알파벳을 확인해서\n result[i] = S.index(s) # 알파벳의 index값의 등장위치를\n # result에 추가하고\nfor res in result: # result를 순회하면서\n print(res, end=' ') # 출력" }, { "alpha_fraction": 0.5102040767669678, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 11.25, "blob_id": "ee5bed4c5eb165e57971ec392f5a481eb3a411d6", "content_id": "04750b58f2dbd4988bedfd1475b1146ad8047f90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0220/피시방 알바.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nguests = list(map(int, input().split()))\n\npcs = [False] * 101\n\nreject = 0\n\nfor g in guests:\n if pcs[g]:\n reject += 1\n continue\n\n pcs[g] = True\n\nprint(reject)\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5737977027893066, "avg_line_length": 21.370370864868164, "blob_id": "3aa0b157feba509d13593ec442a3f0a1120708b7", "content_id": "14dcec6eb299a8565b7ffedbb04050c1a52aec5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 47, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/09/0917/용돈 관리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef enough(money):\n money_in_hand, draw = money, 1\n for a in arr:\n if money_in_hand>=a: money_in_hand -= a\n else:\n draw += 1\n money_in_hand = money-a\n if draw<=M: return True\n return False\n\nN, M = map(int, input().rstrip().split())\narr = [int(input()) for _ in range(N)]\nresult = int(1e9)\nstart, end = max(arr), result\nwhile start<=end:\n mid = (start+end) // 2\n if enough(mid):\n result = min(result, mid)\n end = mid-1\n else: start = mid+1\nprint(result)\n\n# https://www.acmicpc.net/problem/6236" }, { "alpha_fraction": 0.6372548937797546, "alphanum_fraction": 0.656862735748291, "avg_line_length": 16.16666603088379, "blob_id": "ad9d2e348d26e99c2964776d3a330317e07fa066", "content_id": "60fcdfff9c999eeac678da7683d124d08494e230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0211/오늘의 날짜는.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\ndate = str(datetime.now())[:10].split('-')\n\nfor d in date:\n print(d)" }, { "alpha_fraction": 0.3861386179924011, "alphanum_fraction": 0.4297029674053192, "avg_line_length": 17.740739822387695, "blob_id": "237cd94ecbfcdb7acba5e8c22074618f024f8c98", "content_id": "8b9b6782af1a759626ac40a8d4784bc4dab41ec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 28, "num_lines": 27, "path": "/알고리즘/온라인저지/2022/04/0404/아이 러브 크로아티아.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input()) - 1\nN = int(input())\nquizs = []\nfor n in range(N):\n tmp = input().split()\n tmp[0] = int(tmp[0])\n quizs.append(tmp)\n# print(quizs)\ntime = 60*3 + 30\n# print(time)\ni = 0\nwhile True:\n now = quizs[i]\n # print(now, time, K, i)\n if time < now[0]:\n break\n if now[1] == 'T':\n time -= now[0]\n K = (K+1)%8\n i += 1\n elif now[1] == 'N':\n time -= now[0]\n i += 1 \n elif now[1] == 'P':\n time -= now[0]\n i += 1\nprint(K+1)" }, { "alpha_fraction": 0.5545454621315002, "alphanum_fraction": 0.5545454621315002, "avg_line_length": 30.571428298950195, "blob_id": "211d86e78f4e1f1bc92a5868b29adb3dcb583806", "content_id": "51f6c1aa78f30a672a5be3798c8b5c4ef5609e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/03/0327/괴짜 교수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n D, N, S, P = map(int, input().split())\n A, B = D+N*P, N*S\n result = 'does not matter'\n if A<B: result = 'parallelize'\n if A>B: result = 'do not parallelize'\n print(result)" }, { "alpha_fraction": 0.5151515007019043, "alphanum_fraction": 0.5151515007019043, "avg_line_length": 15.75, "blob_id": "8a8da07e571193661d5dcea8ed38ba04400cbdf8", "content_id": "02035e789f62d80421f18ce75f29b2edb1af61aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/알고리즘/온라인저지/2022/02/0209/히스토그램.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nfor n in range(N):\n print('=' * int(input()))" }, { "alpha_fraction": 0.36320754885673523, "alphanum_fraction": 0.4292452931404114, "avg_line_length": 13.199999809265137, "blob_id": "ef278bff20b1a99e35c9abbaa5765fb6fe82c181", "content_id": "192675d650ffbeae1bb90c1f6229e64b7f5d76f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 29, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/02/0201/피보나치 함수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\ndp = [0] * 42\ndp[1] = 1\n\nfor i in range(2, 42):\n dp[i] = dp[i-1] + dp[i-2]\n\nfor t in range(T):\n N = int(input())\n\n if N == 0:\n print(1, 0)\n else:\n print(dp[N-1], dp[N])" }, { "alpha_fraction": 0.5956678986549377, "alphanum_fraction": 0.6028881072998047, "avg_line_length": 20.384614944458008, "blob_id": "c6d4e0b4a532ade93e734ebee0e2a197a5e5135c", "content_id": "a72d35502117d19686fc67c5b94014ccbd763244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/11/1111/배수들의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\nnums = list(map(int, input().rstrip().split()))\nsett = set()\nresult = 0\nfor num in nums:\n for i in range(num, N+1, num):\n if i not in sett: result += i\n sett.add(i)\nprint(result)" }, { "alpha_fraction": 0.313296914100647, "alphanum_fraction": 0.32058286666870117, "avg_line_length": 21.91666603088379, "blob_id": "6e1be4c8813a001daadec17f8355817ee49423f6", "content_id": "05a617a39923b2a4d6310a7b273db2a71a63217d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 37, "num_lines": 24, "path": "/알고리즘/온라인저지/2021/08/0822/괄호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor tc in range(T):\n data = input()\n stack = []\n top = -1\n for dt in data:\n if dt == '(':\n stack.append(dt)\n top += 1\n else: # dt == ')'\n if top == -1:\n stack.append(dt)\n break\n else:\n if stack[top] == '(':\n stack.pop()\n top -= 1\n else:\n stack.append(dt)\n break\n if stack:\n print('NO')\n else:\n print('YES')" }, { "alpha_fraction": 0.5096153616905212, "alphanum_fraction": 0.5480769276618958, "avg_line_length": 16.38888931274414, "blob_id": "d42a9915354997ce3c8d9c648b3ddb8ca79d2b7b", "content_id": "8f59ecb6a68b6a8c02160211d2f3420ed86a7576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 42, "num_lines": 18, "path": "/알고리즘/온라인저지/2023/03/0326/금민수의 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nA,B = map(int,input().split())\nQ = deque()\n\nQ.append(4); Q.append(7)\nresult = 0\n\nwhile Q:\n GMS = Q.popleft()\n\n if GMS<=B:\n if A<=GMS: \n result += 1 # A이상 B이하 만족하는 금민수\n Q.append(GMS*10+4) # 다음 금민수 1\n Q.append(GMS*10+7) # 다음 금민수 2\n\nprint(result)" }, { "alpha_fraction": 0.39759036898612976, "alphanum_fraction": 0.4337349534034729, "avg_line_length": 14.625, "blob_id": "b1ac4fb8c1c3e635a1615fcd77a3d6d72d7466e7", "content_id": "230aabb1b98e1773972cbf8d079bca7dd31cc490", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 31, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/09/0906/거스름돈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\ndef input():\n return sys.stdin.readline()\n\nN = int(input())\ncnt = 0\nif N == 1 or N == 3:\n print(-1)\nelse:\n while N != 0:\n if not N%5:\n cnt += N//5\n break\n N -= 2\n cnt += 1\n print(cnt)" }, { "alpha_fraction": 0.4655172526836395, "alphanum_fraction": 0.49425286054611206, "avg_line_length": 16.5, "blob_id": "ef44730a6e825965bf6a70a994445d638a33d973", "content_id": "c7c22e7e17a745e975864adf2b6f7e480abcf027", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/04/0401/우유 축제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nmilk = list(map(int, input().split()))\nnow = 0\nresult = 0\nfor m in milk:\n if now == m:\n result += 1\n now += 1\n now %= 3\nprint(result)" }, { "alpha_fraction": 0.4915824830532074, "alphanum_fraction": 0.5101010203361511, "avg_line_length": 19.517240524291992, "blob_id": "49b59ca4b16429f84722a1f535455d94df3095ef", "content_id": "00389b8c30972f0811c7a7aa92344f451d6db60a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 39, "num_lines": 29, "path": "/알고리즘/온라인저지/2023/01/0122/2진수 뒤집기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dec_to_bin(number):\n # number : str\n dividor = 1\n number = int(number)\n while dividor*2 <= number:\n dividor *= 2\n answer = ''\n while dividor >= 1:\n if number >= dividor:\n answer += '1'\n number -= dividor\n else:\n answer += '0'\n dividor //= 2\n return answer\n\ndef bin_to_dec(number):\n # number : str\n multiplier = 1\n answer = 0\n for num in number:\n answer += int(num) * multiplier\n multiplier *= 2\n return answer\n\nN = input()\nN = dec_to_bin(N).rstrip('0')\nN = bin_to_dec(N)\nprint(N)" }, { "alpha_fraction": 0.5082873106002808, "alphanum_fraction": 0.5368323922157288, "avg_line_length": 26.174999237060547, "blob_id": "06e3eaa0a8dc6d945419d2236d82add3a6697d12", "content_id": "c636a37faf7d85eb9c896a7ed762a393d8cab5de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1456, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/알고리즘/온라인저지/2022/04/0410/수강변경.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# N = int(input())\n# student = []\n# for i in list(map(int, input().split())):\n# student.append([i])\n# j = 0\n# for i in list(map(int, input().split())):\n# student[j].append(i)\n# j += 1\n# for i in range(N):\n# for j in range(i, N):\n# if student[i][1] == student[j][0]:\n# student[i][0], student[j][0] = student[j][0], student[i][0]\n# elif student[j][1] == student[i][0]:\n# student[i][0], student[j][0] = student[j][0], student[i][0]\n# result = 0\n# for s in student:\n# if s[0] != s[1]:\n# result += 1\n# print(result)\n\n\"\"\"\n원하는 학생들끼리 일대일로 교환시켜주는 코드를 짰는데\n시간초과가 나왔다\n문제는 \"그래서 원하는 수업을 못 듣는 학생수는 몇명인가\" 였고\n아래 코드를 통하여 해결하였다\n\"\"\"\n\n# 핵심은 수업 교환횟수는 제한이 없고\n# 학생들끼리 어떻게든 돌리고 돌리다 남는 학생의 수만 구하면 됨\nN = int(input())\ndp = [0] * 1000001 # 수업들 목록\nfor i in list(map(int, input().split())):\n dp[i] += 1 # 해당 수업을 신청하였음\nresult = 0 # 원하는 수업을 듣지 못하는 학생 수\nfor i in list(map(int, input().split())):\n if dp[i] >= 1: # 그 수업을 들고 있는 사람이 있으면\n dp[i] -= 1 # 교환\n else: # 그 수업을 들고 있는 사람 자체가 없으면\n result += 1 # 원하는 수업을 듣지 못하는 사람\nprint(result)" }, { "alpha_fraction": 0.4796747863292694, "alphanum_fraction": 0.4918699264526367, "avg_line_length": 16.64285659790039, "blob_id": "aca21764521c939603241a122fe599f33e756abf", "content_id": "8772a3df5e1782533b44e07273bb323c585f0d44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 31, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/04/0405/디지털 루트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def main(n):\n tmp = 0\n for i in str(n):\n tmp += int(i)\n return tmp\n\nwhile True:\n N = int(input())\n if N == 0:\n break\n result = main(N)\n while len(str(result)) > 1:\n result = main(result)\n print(result)" }, { "alpha_fraction": 0.4733542203903198, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 15.736842155456543, "blob_id": "87fd7b5a35973a3fe7b6ae42598abd85cf4949fc", "content_id": "e3e915871481a660fbd27b3c6c716d08433936e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/02/0202/2차원 배열의 합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\narr = []\n\nfor n in range(N):\n arr.append(list(map(int, input().split())))\n\nK = int(input())\n\nfor k in range(K):\n i, j, x, y = map(int, input().split())\n\n summ = 0\n\n for a in range(i-1, x):\n for b in range(j-1, y):\n summ += arr[a][b]\n \n print(summ)\n\n" }, { "alpha_fraction": 0.4453125, "alphanum_fraction": 0.4453125, "avg_line_length": 17.428571701049805, "blob_id": "159e1caf305a6e1e7ac1d804207a2159c9dbb0a6", "content_id": "8c0356e0f7129ee6d905cd4de47e22ea626faf80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/03/0314/2루수 이름이 뭐야.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print('뭐야', end='')\nfor _ in range(int(input())):\n if input() == 'anj':\n print(';')\n break\nelse:\n print('?')" }, { "alpha_fraction": 0.5604395866394043, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 25.14285659790039, "blob_id": "a41ccafb304aab9b23d0f28f1cb7ad82b6025807", "content_id": "9fb9fc4f8c9cc60618a6779fb14a664ee5e311dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/08/0827/Counting Antibodies.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor _ in range(2):\n tmp = 1\n for i in list(map(int, input().split())): tmp *= i\n result += tmp\nfor i in list(map(int, input().split())): result *= i\nprint(result)" }, { "alpha_fraction": 0.5730336904525757, "alphanum_fraction": 0.584269642829895, "avg_line_length": 13.833333015441895, "blob_id": "6eeac743e95bedb043d25a81531cc03fe37d9357", "content_id": "1fb43e2f270405a535fc7bd47548eff6e9397c95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/37차시 4. 문자열 - 연습문제 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "tmp = input()\n\nprint(tmp)\n\nif tmp == tmp[::-1]:\n print('입력하신 단어는 회문(Palindrome)입니다.')\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.4458874464035034, "avg_line_length": 15.571428298950195, "blob_id": "5f08d2ce6443cdc7ec8ed0548c94cdf42a93c946", "content_id": "72080190fac9469bb52dd39aab646f9314f20edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/12/1216/배수와 약수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n a, b = map(int, input().split())\n\n if a == 0 and b == 0:\n break\n\n result = 'neither'\n\n if a % b == 0:\n result = 'multiple'\n elif b % a == 0:\n result = 'factor'\n \n print(result)" }, { "alpha_fraction": 0.5227272510528564, "alphanum_fraction": 0.5378788113594055, "avg_line_length": 32.125, "blob_id": "8d636b333b4561ab3c71d11e6fa2cd76af4c1a8b", "content_id": "3638ac3b7af46514765d80362da5e0ce95bb6e16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/알고리즘/온라인저지/2021/08/0803/상수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a, b = input().split() # a와 b를 공백을 기준으로 input 받아서\ndef aigosangsuya(a, b): \n if int(a[::-1]) >= int(b[::-1]): # 각각 뒤에서 읽은 값의 int값을 비교해서\n result = a[::-1]\n else:\n result = b[::-1]\n return result # 큰 쪽을 저장해서 반환\nprint(aigosangsuya(a, b)) # 값을 출력" }, { "alpha_fraction": 0.5515695214271545, "alphanum_fraction": 0.5784753561019897, "avg_line_length": 22.526315689086914, "blob_id": "f868f553d9e4dbc74475d6d8f46116cdf0bfd8bc", "content_id": "1e8dc5411b002f4a7b15898d06cd90b8903559f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 48, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1006/점프 점프.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = list(map(int, input().rstrip().split()))\nQ = deque()\nvisited = [0]*N\nQ.append((0, 0))\nvisited[0] = 1\nwhile Q:\n now, move = Q.popleft()\n if now == N-1: print(move); break\n for i in range(now+1, now+1+arr[now]):\n if 0<=i<N and arr[i] and not visited[i]:\n Q.append((i, move+1))\n visited[i] = 1\nelse: print(-1)" }, { "alpha_fraction": 0.44186046719551086, "alphanum_fraction": 0.45348837971687317, "avg_line_length": 13.5, "blob_id": "851a561528e5023b3d15b2b95fbeed1ae0625306", "content_id": "cc957f801b7f5eb7f1b4cf0d14f147a7ff37b22b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0206/!밀비 급일.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n sen = input()\n if sen == 'END':\n break\n\n print(sen[::-1])" }, { "alpha_fraction": 0.3992537260055542, "alphanum_fraction": 0.41044774651527405, "avg_line_length": 13.833333015441895, "blob_id": "fb66f3bff28f3fb5bf51c470d8a09be53a53426d", "content_id": "34f70adc1681ec856a282fad2fe4e56405e71064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/02/0213/내 학점을 구해줘.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n N = int(input())\n\n sumC = 0\n\n tmp = 0\n\n for n in range(N):\n C, G = map(float, input().split())\n C = int(C)\n\n sumC += C\n \n tmp += C * G\n\n print('{} {:.1f}'.format(sumC, tmp/sumC))\n\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 27.33333396911621, "blob_id": "febc505e53e99d642622fb932b829af8edadf636", "content_id": "50412a65d30006796a35d0f30b57792ffbc33861", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/03/0303/알고리즘 수업 - 알고리즘의 수행 시간 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = sum(list(range(1, int(input()))))\ndim = 2\nprint(result, f'\\n{dim}', sep='')" }, { "alpha_fraction": 0.5887850522994995, "alphanum_fraction": 0.5981308221817017, "avg_line_length": 22.88888931274414, "blob_id": "3d948194e389fa119ba80efdaf019adab6e193e0", "content_id": "25c950c060d96543eca997e1f06242586fc3e008", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/11/1120/요다.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nfor _ in range(int(input().rstrip())):\n words = input().split()\n for word in words[2:]: print(word, end=' ')\n for word in words[:2]: print(word, end=' ')\n print()" }, { "alpha_fraction": 0.3683373034000397, "alphanum_fraction": 0.38663485646247864, "avg_line_length": 20.32203483581543, "blob_id": "3c81233a9a5c0b649374fe37901c65948c28fe1b", "content_id": "a7d4267f8c14b187acb3872df97e1f80f4b182b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 75, "num_lines": 59, "path": "/알고리즘/온라인저지/2021/12/1216/안전 영역.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import copy\nfrom collections import deque\n\nN = int(input())\n\ndx = (-1, 1, 0, 0)\ndy = (0, 0, -1, 1)\n\nheight_map = [list(map(int, input().split())) for _ in range(N)]\n\nrain = 0\n\nbfs = deque()\n\nresult = 0\n\nwhile True:\n tmp_map = copy.deepcopy(height_map)\n\n flooded_check = 0\n for i in range(N):\n for j in range(N):\n if tmp_map[i][j] <= rain:\n tmp_map[i][j] = 0\n flooded_check += sum(tmp_map[i])\n \n if flooded_check == 0:\n break\n \n safe = 0 # 현재 rain에서 나온 안전영역의 수\n\n for i in range(N):\n for j in range(N):\n if tmp_map[i][j]:\n bfs.append((i, j))\n safe += 1\n\n while bfs:\n tmp = bfs.popleft()\n y = tmp[0]\n x = tmp[1]\n\n if tmp_map[y][x] == 0:\n continue\n \n tmp_map[y][x] = 0\n for k in range(4):\n ny = y + dy[k]\n nx = x + dx[k]\n\n if 0 <= ny < N and 0 <= nx < N and tmp_map[ny][nx]:\n bfs.append((ny, nx))\n\n rain += 1\n \n if safe > result:\n result = safe\n\nprint(result)" }, { "alpha_fraction": 0.596187174320221, "alphanum_fraction": 0.6048526763916016, "avg_line_length": 29.421052932739258, "blob_id": "ba008688e5385eb03b480e864b59dc2fa7dbc2d6", "content_id": "463c28ea2e3c51280e04c821260db708adce0ace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 725, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/09/0908/점프 점프.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nN = int(input())\narr = list(map(int, input().split()))\nstart = int(input()) - 1 # 인덱스 맞추기\nvisited = [0] * N # 점들의 방문 배열\nQ = deque()\nQ.append(start) # 시작점 넣고 출발\nwhile Q:\n now = Q.popleft()\n if visited[now]: continue # 방문한 적 있는 점이면 continue\n visited[now] = 1 # 방문 처리\n tmp = arr[now] # 현재 지점에서 점프할 수 있는 거리\n front, back = now+tmp, now-tmp # 점프 시 인덱스\n if 0<=front<N and not visited[front]:\n Q.append(front) # 앞으로 점프\n if 0<=back<N and not visited[back]:\n Q.append(back) # 뒤로 점프\nprint(sum(visited)) # 방문한 적 있는 점들의 수 출력" }, { "alpha_fraction": 0.5549132823944092, "alphanum_fraction": 0.589595377445221, "avg_line_length": 14.818181991577148, "blob_id": "9c8d3f58ea68835399b5a852b4ac3de85bce390f", "content_id": "dba8bc49f077dacdf3670d5e0ca0378f096f157a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 34, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/06/0614/다각형의 대각선.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from math import factorial as fact\n\nN = int(input())\nresult = 0\nif N == 3:\n result = 0\nelse:\n result = fact(N)\n result //= 24\n result //= fact(N-4)\nprint(result)" }, { "alpha_fraction": 0.567685604095459, "alphanum_fraction": 0.5764192342758179, "avg_line_length": 19.909090042114258, "blob_id": "ae37922eaacb52619edc0d2db806ac51871bb36e", "content_id": "8f73c95da4bef5a47add7cdb889fbe3e4641c058", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/10/1031/공 넣기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\nresult = [0]*N\nfor m in range(M):\n a, b, c = map(int, input().rstrip().split())\n for i in range(a-1, b):\n result[i] = c\nprint(*result)" }, { "alpha_fraction": 0.49754902720451355, "alphanum_fraction": 0.5098039507865906, "avg_line_length": 23.727272033691406, "blob_id": "7b2675f169228b1065bf6f56ac606c0cc8a59446", "content_id": "f9cba670894799d5cb642fe1729824389668ac1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/알고리즘/온라인저지/2021/08/0828/스위치 켜고 끄기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndef change(num):\n if switch[num]:\n switch[num] = 0\n else:\n switch[num] = 1\n return\n\nswitch_count = int(sys.stdin.readline())\nswitch = [-1] + list(map(int, sys.stdin.readline().split()))\nstudents = int(sys.stdin.readline())\n\nfor i in range(students):\n sex, num = map(int, sys.stdin.readline().split())\n if sex == 1:\n for j in range(num, switch_count+1, num):\n change(j)\n else:\n change(num)\n for k in range(switch_count//2):\n if num+k > switch_count or num - k < 1:\n break\n if switch[num+k] == switch[num-k]:\n change(num+k)\n change(num-k)\n else:\n break\n\nfor i in range(1, len(switch)):\n print(switch[i], end=' ')\n if not i%20:\n print()\n" }, { "alpha_fraction": 0.5022830963134766, "alphanum_fraction": 0.534246563911438, "avg_line_length": 14.642857551574707, "blob_id": "07c3a2236f2a434a728847aea658822aa65ca3f1", "content_id": "f5339ff4db8c02424f9f43b167785a46c0e6ae33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 33, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/12/1216/그릇.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dishes = input()\n\nheight = 10\n\nprev_dish = dishes[0]\n\nfor i in range(1, len(dishes)): \n if dishes[i] == prev_dish:\n height += 5\n else:\n height += 10\n prev_dish = dishes[i]\n \nprint(height)\n" }, { "alpha_fraction": 0.5562701225280762, "alphanum_fraction": 0.5691318511962891, "avg_line_length": 23, "blob_id": "b572e244d087762a67cafcb0f8396a1d1eb148a5", "content_id": "151f3e69787f1328249e071dc55eeb9eb4fb82cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0911/진짜 공간.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfiles = list(map(int, input().split()))\ncluster = int(input())\nresult = 0\nfor file in files:\n if not file: continue\n if file > cluster:\n tmp = file//cluster + 1\n if file%cluster == 0: tmp -= 1\n result += cluster * tmp\n else:\n result += cluster\nprint(result)" }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 52, "blob_id": "40f58b147df1210de1beed3089ca32470f1af037", "content_id": "2edaf399c8852c60b17f623ab92321b6e85c57fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/알고리즘/온라인저지/2022/07/0731/돌 게임 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "print('SK') if not int(input()) % 2 else print('CY')" }, { "alpha_fraction": 0.582524299621582, "alphanum_fraction": 0.6155340075492859, "avg_line_length": 23.571428298950195, "blob_id": "980725440c2bc5ec04a8e1dc6be5d4aa50ceb2c2", "content_id": "9e441cc14038f1cb1a57dbf8c7de5c284a6f6f0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/02/0225/로마 숫자 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import combinations_with_replacement as comb\n# itertools를 코테에서도 쓸 수 있다고 하니\n# 실력향상을 위한 빡구현 vs 일단 라이브러리 사용\n# 고민해볼 가치가 있을 듯 (아래는 참고 블로그 링크)\n# https://uni2237.tistory.com/56\n\n\nN = int(input())\nresult = [] # 경우의 수 들이 담길 리스트\nrome = [1, 5, 10, 50] # 로마숫자 IVXL\n\nfor temp in comb(range(4), N):\n summ = 0 # 매 경우의 수\n # N==2 일 때, temp = (0, 0) ...\n for i in temp:\n summ += rome[i] # i는 rome의 인덱스값으로 사용\n\n result.append(summ) # 경우의 수를 완성해서 \n # result에 추가\n\nprint(len(set(result))) # set으로 중복제거" }, { "alpha_fraction": 0.40869563817977905, "alphanum_fraction": 0.417391300201416, "avg_line_length": 22.200000762939453, "blob_id": "075ea83a22110fa3beb79f66689ea6107acc2f7e", "content_id": "d2c31ef14b1eb09a75de57cee4c5a348f23b79c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/05/0506/분수좋아해.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n N, M = map(int, input().split())\n if N == M == 0:\n break\n print(f'{N//M} {N%M} / {M}')" }, { "alpha_fraction": 0.5279187560081482, "alphanum_fraction": 0.5380710363388062, "avg_line_length": 15.5, "blob_id": "76a01b7d6343833b00b89700b04735a2e6fdb3f2", "content_id": "f1a5604b8c1591a4934f2e6e1421802b981114e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/35차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 10.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "alphabet = {}\n\ntmp = input()\n\nfor t in tmp:\n if t not in alphabet:\n alphabet.update({t:1})\n else:\n alphabet[t] += 1\n\nfor a in alphabet:\n print('{},{}'.format(a, alphabet[a]))" }, { "alpha_fraction": 0.4621409773826599, "alphanum_fraction": 0.5483028888702393, "avg_line_length": 26.428571701049805, "blob_id": "01a0c599142bad682bb3b56c9ff8e505f3f9bb0e", "content_id": "7333b1f8de368e8fe007c05d83deb7a52cddcfce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/알고리즘/[템플릿]/Dynamic Programming/포도주 시식.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nwines = [0] * 10002\ndp = [0] * 10002\nfor n in range(N): wines[n] = int(input())\ndp[0] = wines[0]\ndp[1] = max(dp[0], wines[0]+wines[1])\ndp[2] = max(dp[1], wines[1]+wines[2], wines[0]+wines[2])\nfor i in range(3, N):\n A = dp[i-3] + wines[i-1] + wines[i]\n B = dp[i-2] + wines[i]\n dp[i] = max(A, B, dp[i-1])\nprint(max(dp))\n\n# https://www.acmicpc.net/problem/2156" }, { "alpha_fraction": 0.5573248267173767, "alphanum_fraction": 0.5668789744377136, "avg_line_length": 23.230770111083984, "blob_id": "6db09059b3288cb0f63a8e5b00ed48bba01fd718", "content_id": "2e03a071287aaf2f1d09a948047e4c3fdf9099af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/08/0815/좋은 단어.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input())\nresult = 0\nfor n in range(N):\n W, S = input().rstrip(), [] # word, stack\n for w in W:\n if S and w == S[-1]: S.pop() # 스택 마지막과 같으면 pop\n else: S.append(w) # 다르면 append\n if not S: result += 1 # 다 짝이 맞아서 스택에 남은게 없으면 result++\nprint(result)" }, { "alpha_fraction": 0.44936707615852356, "alphanum_fraction": 0.4810126721858978, "avg_line_length": 12.782608985900879, "blob_id": "5acaba1aa831ac0fec08ba84ce5fa391de7f6c81", "content_id": "d9d2bdd8e426123dde9b9b04d1e6f03b8bf79fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 34, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/01/0108/계단 오르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nstairs = []\n\nfor n in range(N):\n stairs.append(int(input()))\n\nN -= 1\n\nresult = stairs[N]\n\nwhile N >= 2:\n if stairs[N-1] >= stairs[N-2]:\n result += stairs[N-1]\n N -= 1\n else:\n result += stairs[N-2]\n N -= 2\n \nif N == 1:\n result += stairs[N-1]\n\nprint(result)" }, { "alpha_fraction": 0.4912280738353729, "alphanum_fraction": 0.5087719559669495, "avg_line_length": 12.470588684082031, "blob_id": "ba2cedd7cf98ebab0bcfb199a494517640322491", "content_id": "8067ae73fb5a0b9861600819a7249a5e824e8483", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/12/1213/0 = not cute, 1 = cute.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\ncute = 0\ncut = 0\n\nfor tc in range(T):\n is_cute = int(input())\n\n if is_cute:\n cute += 1\n else:\n cut += 1\n\nif cute < cut:\n print('Junhee is not cute!')\nelse:\n print('Junhee is cute!')" }, { "alpha_fraction": 0.468822181224823, "alphanum_fraction": 0.494226336479187, "avg_line_length": 27.933332443237305, "blob_id": "1502ee4aa8f6b1d68c6ffbbe9e25c2da1f02e631", "content_id": "00d56cfe7dc1ddb43f3051a66200185f32a6e1e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 66, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/11/1103/이동하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [1, 0, 1], [0, 1, 1]\n\nN, M = map(int, input().rstrip().split())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\ndp = [a[:] for a in arr]\nfor i in range(N):\n for j in range(M):\n for k in range(3):\n ny, nx = i+dy[k], j+dx[k]\n if 0<=ny<N and 0<=nx<M:\n dp[ny][nx] = max(dp[ny][nx], arr[ny][nx]+dp[i][j])\nprint(dp[N-1][M-1])" }, { "alpha_fraction": 0.6231883764266968, "alphanum_fraction": 0.6376811861991882, "avg_line_length": 34, "blob_id": "4e2516de7d98e9177e93bca847a6c69b3ce778c8", "content_id": "e385c0c5d72340e3061ef37a2b801c7031ef84a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 38, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/03/0306/金平糖 (Konpeito).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums = list(map(int, input().split()))\nprint(max(nums)*3 - sum(nums))" }, { "alpha_fraction": 0.4350649416446686, "alphanum_fraction": 0.5064935088157654, "avg_line_length": 21.14285659790039, "blob_id": "cbebbd888bd8ef9fd9e6d7b0125038527fe6f39c", "content_id": "b38a7606ceb1498e597534c2d30d1b8aba5ae6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/04/0415/Every Second Counts.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "times = []\nfor _ in range(2):\n h, m, s = map(int, input().split(' : '))\n times.append((h*60+m)*60+s)\na, b = times\nif b < a: b += 24*60*60\nprint(b-a)" }, { "alpha_fraction": 0.33421751856803894, "alphanum_fraction": 0.4615384638309479, "avg_line_length": 12.034482955932617, "blob_id": "ea9c02f39632b51b2c601199ec4742a87875c242", "content_id": "d80df80b8ac4421b56dbe7818ffad3a6bff34e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 31, "num_lines": 29, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/29차시 3. 자료구조 – 셋, 딕셔너리 - 연습문제 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = {\n '아메리카노': 1900, \n '카페모카': 3300, \n '에스프레소': 1900, \n '카페라떼': 2500, \n '카푸치노': 2500, \n '바닐라라떼': 2900\n }\n\nb = {\n '헤이즐럿라떼': 2900, \n '카페모카': 3300, \n '밀크커피': 3300, \n '아메리카노': 1900, \n '샷크린티라떼': 3300\n }\n\ntmp = {}\n\ntmp.update(b)\ntmp.update(a)\n\nresult = set()\n\nfor t in tmp:\n if tmp[t] >= 3000:\n result.add((t, tmp[t]))\n\nprint(result)" }, { "alpha_fraction": 0.4507211446762085, "alphanum_fraction": 0.4951923191547394, "avg_line_length": 26.600000381469727, "blob_id": "b590b330131224abbce25f6b6e9f0168a66b97a4", "content_id": "e439f456ac51e28234df65662f962528b5f2e944", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1196, "license_type": "no_license", "max_line_length": 53, "num_lines": 30, "path": "/알고리즘/온라인저지/2021/08/0812/세탁소 사장 동혁.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n미국동전\n1: 페니 = 10원\n5: 니켈 = 50원\n10: 다임 = 100원\n25: 쿼터 = 250\n\n아래 두개는 문제에서 안 씀\n# 50: 하프달러 = 500원\n# 100: 달러 = 1000원\n\"\"\"\nT = int(input()) # 테스트케이스\ncoins = [25, 10, 5, 1] # 동전들\nfor t in range(T): # 테스트케이스 개수만큼\n change = int(input()) # 거스름돈\n changed_coins = [] # 사용된 모든 동전들\n for coin in coins: # 동전들을 순회하면서\n if coin <= change: # 거슬러줄 수 있으면\n while True: # while문 반복하면서 거슬러준다\n change -= coin # 해당 동전을 하나 거슬러주고\n changed_coins.append(coin) # 그 동전을\n # 거슬러준 동전 리스트에 추가\n if coin > change: # 순회중인 동전이 금액보다 커져서\n # 그 동전으로 거슬러 줄 수 없게되면\n break # while문 종료\n for i in range(4): # 동전은 총 4개\n # 사용된 동전의 개수의 종류는 4개\n print(changed_coins.count(coins[i]), end=' ')\n # 동전개수를 세서 출력\n print() # 줄바꿈\n " }, { "alpha_fraction": 0.550632894039154, "alphanum_fraction": 0.6392405033111572, "avg_line_length": 21.428571701049805, "blob_id": "0a76605c86a62d9a9362bcfe0fbed0d7d1c6e112", "content_id": "873ebf7b7061fa227582099a328f18af894628f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/02/0206/정육각형과 삼각형.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import math\n\nL = int(input())\ncos_30 = math.cos(math.radians(30))\ncos_60 = math.cos(math.radians(60))\nresult = L**2 * cos_30 * cos_60\nprint(f'{result:.9f}') " }, { "alpha_fraction": 0.40336135029792786, "alphanum_fraction": 0.4075630307197571, "avg_line_length": 22.899999618530273, "blob_id": "330d164ec17d33adedb03ddf1c9c1b9a187eae19", "content_id": "7007dee796672c42f1573b2961bf9650e268f563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/05/0503/다음수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "while True:\n A, B, C = map(int, input().split())\n if A == B == C == 0:\n break\n result = 'AP'\n if B-A == C-B: # AP\n print(result, C + (C-B))\n else: # GP\n result = 'GP'\n print(result, (C * (B//A)))" }, { "alpha_fraction": 0.5176470875740051, "alphanum_fraction": 0.529411792755127, "avg_line_length": 20.5, "blob_id": "762842be446e110e892cf3e53f6f797ab098d11a", "content_id": "2a7d054610a7e8608bc0f70d0794dd2e30001aee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 30, "num_lines": 4, "path": "/알고리즘/온라인저지/2023/04/0428/스키테일 암호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nPW = input()\nfor i in range(0, len(PW), K):\n print(PW[i], end='')" }, { "alpha_fraction": 0.5176211595535278, "alphanum_fraction": 0.5616739988327026, "avg_line_length": 33.92307662963867, "blob_id": "5fe865008bdce35e2d0fc712eec44e63e10ec214", "content_id": "023118db16e631716bda5901abea1b8f9298d388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 96, "num_lines": 13, "path": "/알고리즘/온라인저지/2021/08/0816/주사위.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nnumbers = list(map(int, input().split()))\nnumber = [min(numbers[0], numbers[5]), min(numbers[1], numbers[4]), min(numbers[2], numbers[3])]\n # 최소값을 도출해낼 수 있는 면 찾기\nnumber.sort() # 사용할 숫자들 크기순으로 정리\nA = number[0]\nB = number[1]\nC = number[2]\nif N == 1: # 1*1일때는 그 주사위 자체이니까\n print(sum(numbers)-max(numbers)) # 가장 큰 수를 아랫면으로 놓은 값을 출력\nelse: # 1이 아니면\n print((((((N-1)*A)+B)*N)*4) + C*4 + (((N-2)*B)*4) + A*(N-2)*(N-2))\n # 최소값 출력\n" }, { "alpha_fraction": 0.4436090290546417, "alphanum_fraction": 0.49624061584472656, "avg_line_length": 18.14285659790039, "blob_id": "a6d9f4568fd6b212f19bbdc71957f15914f5a6f5", "content_id": "7c81fadb186cdf8a9c1a8c7295b6b27524e5cf95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/05/0507/369.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nfor n in range(1, N+1):\n for i in str(n):\n if i in '369':\n result += 1\nprint(result)" }, { "alpha_fraction": 0.45026177167892456, "alphanum_fraction": 0.4921466112136841, "avg_line_length": 15, "blob_id": "a50cf074b95cfe4a18128166bc9d0ebe1d0c4158", "content_id": "511230e575c4c90f98bec7a40bd6e6c086b11f06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 34, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/07/0731/알고리즘 수업 - 피보나치 수 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def fib(n):\n global result\n if n == 1 or n == 2:\n result += 1\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\nN = int(input())\nresult = 0\nfib(N)\nprint(result, N-2)" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.46236559748649597, "avg_line_length": 9.44444465637207, "blob_id": "9d377968726ff4d0c1c9c9955351ed8f56c1769f", "content_id": "a840027d6880af73de75bc156004a8c9c6bf3788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0225/운동장 한 바퀴.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "pi = 3.141592\n\nA = 13\nB = 8\n\nA = int(input())\nB = int(input())\n\nprint((A * 2) + (2 * pi * B))" }, { "alpha_fraction": 0.4301075339317322, "alphanum_fraction": 0.5376344323158264, "avg_line_length": 25.714284896850586, "blob_id": "6c3486bea5bf6a493f568a0e025153c97c5508cc", "content_id": "1b761553461679d90ff8b963d16d66c93c9cbf03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/알고리즘/[템플릿]/Dynamic Programming/파도반 수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "dp = [1, 1, 1, 2, 2, 3] + [0]*97\nfor i in range(5, 103): \n dp[i] = dp[i-5]+dp[i-1]\nfor _ in range(int(input())): \n print(dp[int(input())-1])\n\n# https://www.acmicpc.net/problem/9461" }, { "alpha_fraction": 0.4232558012008667, "alphanum_fraction": 0.4558139443397522, "avg_line_length": 14.428571701049805, "blob_id": "7dd864055711d3cf66b55d930ff3a27ea8c75b53", "content_id": "04c7366cfd69a02e293943a592321560a7a8491a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 35, "num_lines": 14, "path": "/알고리즘/온라인저지/2022/05/0505/ソーシャルゲーム (Social Game).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C = map(int, input().split())\nresult = 0\nday = 0\ncoin = 0\nwhile True:\n coin += A\n day += 1\n result += 1\n if day == 7:\n day = 0\n coin += B\n if coin >= C:\n break\nprint(result)" }, { "alpha_fraction": 0.5569620132446289, "alphanum_fraction": 0.594936728477478, "avg_line_length": 39, "blob_id": "d2bb54d1843c0f8867805aed2047ed6ab52ca0dd", "content_id": "d6a2bd278cf69aeb17450e24f24810724a3599af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 49, "num_lines": 2, "path": "/알고리즘/온라인저지/2022/05/0516/Sum of Odd Sequence.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for t in range(int(input())):\n print(sum(list(range(1, int(input())+1, 2))))" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 17.799999237060547, "blob_id": "62799550449c404c8fc14b0f3601928ecb618c96", "content_id": "5df930954e9eb2ee46d6e8c78c6f2a1884227cdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2021/08/0822/윤년.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nif (not N%4 and N%100) or not N%400:\n result = 1\nprint(result)" }, { "alpha_fraction": 0.5375722646713257, "alphanum_fraction": 0.5664739608764648, "avg_line_length": 12.384614944458008, "blob_id": "a85e80b0a57b0c2d33cc08bacdf0cc66aa0e32fe", "content_id": "6eb7062b6fb412a70b93cd085808f6aaca531392", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 26, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/10/1011/새.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nresult = 0\nnum = 1\nwhile N:\n result += 1\n if num > N: num = 1\n N -= num\n num += 1\nprint(result)" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.5347222089767456, "avg_line_length": 13.5, "blob_id": "d967f38e0cb9e77b7f17d6953750b953e797ff51", "content_id": "db667f7a8b2d4f9990e0be99bf0a3fcf2b04d556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0224/뉴비의 기준은 뭘까.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\n\nresult = 'NEWBIE!'\n\nif M <= N and M > 2:\n result = 'OLDBIE!'\nelif M > N:\n result = 'TLE!'\n\nprint(result)" }, { "alpha_fraction": 0.3737373650074005, "alphanum_fraction": 0.4040403962135315, "avg_line_length": 18.799999237060547, "blob_id": "cc3112aeee5edf1984a4ecb4adcbd4d546f161a7", "content_id": "c1d83e9ce0a682adb477e5925dedeaf22c4c7899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/알고리즘/온라인저지/2021/08/0816/for break.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for i in range(5):\n for j in range(5):\n if j == 3:\n break\n print(i, j)\n" }, { "alpha_fraction": 0.4229764938354492, "alphanum_fraction": 0.44778066873550415, "avg_line_length": 20.30555534362793, "blob_id": "5e22b2dddb8ef33504dddb31d388a2b2e3a16134", "content_id": "53ed0e39933546bf42b2b7fe8940954f4e687533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 65, "num_lines": 36, "path": "/알고리즘/온라인저지/2022/12/1204/갤러리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def find(height, width):\n global result\n for i in range(height-1):\n j = 0\n while j < width-1:\n j += 1\n if arr[i][j:j+2] == 'XX' and arr[i+1][j:j+2] == '..':\n result += 1\n j += 1\n if arr[i][j:j+2] == '..' and arr[i+1][j:j+2] == 'XX':\n result += 1\n j += 1\n\ndef rotate(arr):\n new_arr = []\n for j in range(M):\n tmp = ''\n for i in range(N):\n tmp += arr[i][j]\n new_arr.append(tmp)\n return new_arr\n\nN, M = map(int, input().split())\narr = [input() for _ in range(N)]\nresult = 0\nfind(N, M)\narr = rotate(arr)\nfind(M, N)\nprint(result)\n\n\"\"\"\n그림을 걸 수 있는지 가로로만 스캔할 것\n그리고 배열을 90도 돌려주고 다시 스캔\n주의할 점은\n90도만 배열을 돌리기 때문에 N과 M이 반전됨\n\"\"\"" }, { "alpha_fraction": 0.43579766154289246, "alphanum_fraction": 0.4630350172519684, "avg_line_length": 18.846153259277344, "blob_id": "795fd9194e395140893fe86ef609053cc5a797bf", "content_id": "8eda7c5ff287e47ad46132b86150be19276b45e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/07/0726/국회의원 선거.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nif N == 1:\n print(result)\nelse:\n D = int(input()) # dasom\n C = [int(input()) for _ in range(N-1)]\n while D <= max(C):\n C.sort(reverse=True)\n D += 1\n result += 1\n C[0] -= 1\n print(result)" }, { "alpha_fraction": 0.508083164691925, "alphanum_fraction": 0.5265588760375977, "avg_line_length": 26.0625, "blob_id": "94482ad6e55605edb11211878983d0c4220704fd", "content_id": "50b762f7cff82b266ce4da7354bedffe1d3b3a31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 92, "num_lines": 16, "path": "/알고리즘/온라인저지/2023/01/0129/숫자놀이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "M, N = map(int, input().split())\narr = []\nnum_to_en = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\nnum_list = list(range(M, N+1))\nfor num in num_list:\n tmp = ''\n for i in str(num):\n tmp += num_to_en[int(i)]\n translated = [tmp, num]\n arr.append(translated)\narr.sort(key=lambda x:x[0])\ncnt = 0\nfor a in arr: \n print(a[1], end=' ')\n cnt = (cnt+1)%10\n if cnt == 0: print()\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 26, "blob_id": "161baa71a0a801971651800bc412c6a0e49d68fd", "content_id": "b267dfd38f82b808ee6de791fa6f320f6a2002a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/09/0910/닉네임에 갓 붙이기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n nickname = input().split()\n for i in ['god'] + nickname[1:]:\n print(i, end='')\n print()" }, { "alpha_fraction": 0.391259104013443, "alphanum_fraction": 0.40894901752471924, "avg_line_length": 21.372093200683594, "blob_id": "2477fa5ef1f248ee0f95e42baeb9a93dd6897809", "content_id": "f49e1a82df9f206abc06f2446b12c12b16630553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 80, "num_lines": 43, "path": "/알고리즘/온라인저지/2022/01/0122/소수의 연속합.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nif N == 1:\n # 놀랍게도 1은 소수가 아니었다!!\n cnt = 0\nelse:\n # 에라토스테네스의 체로 N까지의 소수 구하기\n a = [False,False] + [True]*(N-1)\n primes = []\n for i in range(2,N+1):\n if a[i]:\n primes.append(i)\n for j in range(2*i, N+1, i):\n if a[j]:\n a[j] = False\n\n # 투포인터에 사용할 변수 s, e와\n # 연속합이 N일때 카운트할 cnt\n s = e = cnt = 0\n\n # 가장 작은 소수부터 시작\n now = 2\n\n while True:\n # 투포인터 탐색 종료조건\n if (now < N and e == len(primes)-1) or (now > N and s == len(primes)-1):\n break\n\n if now == N: # 구간합이 N과 같으면\n cnt += 1 # 카운트 증가\n now -= primes[s] # 포인터 이동\n s += 1\n elif now < N: # 구간합이 N보다 작을 때\n e += 1 # 한 칸 밀어주기\n now += primes[e]\n elif now > N: # 구간합이 N보다 클 때\n now -= primes[s]\n s += 1 # 한 칸 땡겨주기\n\n if now == N: # 다 돌고 마지막 본인이 소수일 때\n cnt += 1 # 카운트 증가\n\nprint(cnt)" }, { "alpha_fraction": 0.5679012537002563, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 26.33333396911621, "blob_id": "058d9bb0d27ca5d0a006c1f504a5bb4e23a05061", "content_id": "b245c4e797708f56f4ec280f8a5aefe95a4bd3a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/알고리즘/온라인저지/2023/02/0215/간지(干支).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "year = int(input())\ntmp = 'IJKLABCDEFGH'\nprint(tmp[year%12], (year+6)%10, sep='')" }, { "alpha_fraction": 0.45254236459732056, "alphanum_fraction": 0.5169491767883301, "avg_line_length": 16.909090042114258, "blob_id": "d20212a1fb806d5cf77900901143d92fdefc25f3", "content_id": "1d02dfc8b84d2c56f391006144684a22cc1865a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 49, "num_lines": 33, "path": "/알고리즘/온라인저지/2021/12/1216/숨바꼭질.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\nN, K = map(int, input().split())\n\nbfs = deque()\nbfs.append((N, 0))\n\nvisited = [0 for _ in range(100001)]\n\nwhile bfs:\n tmp = bfs.popleft()\n n = tmp[0]\n t = tmp[1]\n\n if visited[n]:\n continue\n\n if n == K:\n print(t)\n break\n\n visited[n] = 1\n\n go = n+1\n back = n-1\n jump = n*2\n\n if 0 <= go <= 100000 and not visited[go]:\n bfs.append((go, t+1))\n if 0 <= back <= 100000 and not visited[back]:\n bfs.append((back, t+1))\n if 0 <= jump <= 100000 and not visited[jump]:\n bfs.append((jump, t+1))" }, { "alpha_fraction": 0.5080440044403076, "alphanum_fraction": 0.5309060215950012, "avg_line_length": 29.947368621826172, "blob_id": "21dccf9f15012be838282d0e163e354ebd066f7d", "content_id": "4030e62ba11cca3746c0689d54a60333aab74119", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 59, "num_lines": 38, "path": "/알고리즘/온라인저지/2022/01/0116/달력.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input()) # 일정의 개수\n\ndp = [0] * 365 # 일년치 달력\n\nfor n in range(N):\n S, E = map(int, input().split()) # 일정 입력\n\n for i in range(S-1, E): # 리스트를 365로 하였기 때문에 1일은 dp[0]\n # 시작도 S-1부터\n dp[i] += 1 # 일정이 있는 날 달력의 높이 하나 추가\n\n# 연속된 일정단위로 만들어지는 직사각형의 넓이들의 합을 구해야 함\nstart = end = width = height = result = 0 # result는 출력값\ncounting = False # 연속된 일정인지 확인하기 위함\n\nfor i in range(365): # 일년동안\n if dp[i]: # 일정이 있는 날이면\n if not counting: # 연속된지 체크중이지 않으면\n counting = True # 체크중으로 변경\n start = i # 이 날 부터 연속된 일정이 시작\n height = dp[i] # 일정들의 높이의 최대값 초기화\n elif counting: # 연속된지 체크중이면\n if dp[i] > height: # 지금 보고 있는 일정의 높이가 더 높으면\n height = dp[i] # 높이 갱신\n else: # 일정이 없는 날이면\n if counting: # 체크중이면\n counting = False # 체크를 종료하고\n end = i # 전날까지가 연속된 일정\n width = end - start # 연속된 일정들의 직사각형의 가로는 (끝-시작)\n result += width * height # 결과값에 직사각형의 높이 저장\n width = height = 0 # 가로 세로 초기화\n\nif dp[364] and counting: # 달력 마지막날이고, 체크중이라면\n end = 365 # 끝은 12월31일\n width = end - start # 가로\n result += width * height # 넓이를 결과값에 더해서\n\nprint(result) # 출력\n \n" }, { "alpha_fraction": 0.448587566614151, "alphanum_fraction": 0.4689265489578247, "avg_line_length": 24.285715103149414, "blob_id": "038151a60487cee5ccd57f7a79b0367c24f29d07", "content_id": "2acd08aa238e48ada0d7b85a71af8178acb56d50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/알고리즘/온라인저지/2022/07/0709/그림.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\n\n\ndef bfs(y, x):\n Q.append((y, x))\n global maxx # 가장 넓이가 넓은 그림의 넓이 전역변수 선언\n tmp = 0\n while Q:\n y, x = Q.popleft()\n if arr[y][x] == 0: # 이전에 이미 탐색한 지점이면\n continue # 컨티뉴\n arr[y][x] = 0\n tmp += 1 # 해당 그림의 넓이 ++\n for i in range(4):\n ny = y+dy[i]\n nx = x+dx[i]\n if 0<=ny<N and 0<=nx<M and arr[ny][nx]:\n Q.append((ny, nx))\n if tmp > maxx: # 가장 넓이가 넓은 그림을 새로 발견하였으면\n maxx = tmp # 갱신\n\n\ndy = [-1, 1, 0, 0] # 상하좌우 4방향 델타이동\ndx = [0, 0, -1, 1]\nN, M = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\nresult = maxx = 0 # 그림의 수, 최대 넓이 초기화\nQ = deque()\nfor n in range(N):\n for m in range(M):\n if arr[n][m]: # 그림을 발견하면\n bfs(n, m) # BFS 탐색\n result += 1 # 그림의 수 ++\nprint(result)\nprint(maxx) " }, { "alpha_fraction": 0.5760456323623657, "alphanum_fraction": 0.5969581604003906, "avg_line_length": 20.079999923706055, "blob_id": "3a3e93abeffc14bc2c7f3986239389b37d8b4e99", "content_id": "960abd72c5c368955be19857e9f84cc37fcacd60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 48, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/09/0916/나무 자르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef cut(height):\n cutted = 0\n for tree in trees:\n if tree>height:\n cutted += tree-height\n if cutted>=M: return True\n return False\n\nN, M = map(int, input().rstrip().split())\ntrees = list(map(int, input().rstrip().split()))\nresult = 0\nstart, end = result, int(1e9)\nwhile start<=end:\n mid = (start+end) // 2\n if cut(mid):\n start = mid+1\n result = max(result, mid)\n else: end = mid-1\nprint(result)\n\n# https://www.acmicpc.net/problem/2805" }, { "alpha_fraction": 0.44978782534599304, "alphanum_fraction": 0.4653465449810028, "avg_line_length": 29.782608032226562, "blob_id": "4b02b72da6efad7d2bc6b8d62884c29a3a2c7901", "content_id": "becd0d4e30c0757154d0b539745fe6048f091518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 57, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/10/1002/친구.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\narr = [input().rstrip() for _ in range(N)]\nfriend_2 = [[] for _ in range(N)]\nfor i in range(N):\n for j in range(N):\n if arr[i][j] == 'Y':\n if j not in friend_2[i]:\n friend_2[i].append(j)\n if i not in friend_2[j]:\n friend_2[j].append(i)\n for k in range(N):\n if arr[j][k] == 'Y' and i != k:\n if k not in friend_2[i]:\n friend_2[i].append(k)\n if i not in friend_2[k]:\n friend_2[k].append(i)\nresult = 0\nfor friend in friend_2: result = max(result, len(friend))\nprint(result)" }, { "alpha_fraction": 0.6010362505912781, "alphanum_fraction": 0.6424870491027832, "avg_line_length": 13.923076629638672, "blob_id": "4918a77527c10f744a07eecfae124b838a7700de", "content_id": "72f850a6e5c5840b26e15f5e6916ac09a139152e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/10/1010/NN.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\nprint((str(N)*N)[:M])\n\n\"\"\"\n<참고한 링크>\nhttps://hongku.tistory.com/268\n\"\"\"\n\n# https://www.acmicpc.net/problem/11944" }, { "alpha_fraction": 0.5444234609603882, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 21.08333396911621, "blob_id": "a18ac7384da7f8d3b74122d1d4362ce032fb49e6", "content_id": "c5cb3185974f5c2aa02e6b853484a47e2793e7ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/알고리즘/온라인저지/2022/10/1012/공유기 설치.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef install(distance):\n prev = houses[0]\n cnt = 1\n for house in houses[1:]:\n if house>=prev+distance:\n cnt += 1\n prev = house\n return cnt\n\nN, C = map(int, input().rstrip().split())\nhouses = sorted([int(input().rstrip()) for _ in range(N)])\nstart, end = 0, houses[-1]-houses[0]\nresult = 1\nwhile start<=end:\n mid = (start+end) // 2\n if install(mid)>=C:\n result = max(result, mid)\n start = mid+1\n else: end = mid-1\nprint(result)" }, { "alpha_fraction": 0.4357798099517822, "alphanum_fraction": 0.4403669834136963, "avg_line_length": 17.25, "blob_id": "8d38edaec428d40691cc989cf6ed61cbb5fadacd", "content_id": "4db2cbdd01f2c17507037cf7e87ef7663103a317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/02/0220/첫 글자를 대문자로.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nfor n in range(N):\n text = input()\n\n for i in range(len(text)):\n if i == 0:\n print(text[i].upper(), end='')\n continue\n print(text[i], end='')\n \n print()" }, { "alpha_fraction": 0.6515151262283325, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21.33333396911621, "blob_id": "00877637a2e3b4506b90076de048e4d982e6495c", "content_id": "617f1d91833b575e542f8def22b9f2b830802766", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/07/0731/피자 (Small).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = 0\nfor i in range(int(input())): result += i\nprint(result)" }, { "alpha_fraction": 0.5478261113166809, "alphanum_fraction": 0.573913037776947, "avg_line_length": 16.769229888916016, "blob_id": "76bb87a5912a81999381510246cd5eba9e955c7f", "content_id": "cf8b5c777bb7c12a6fb5fb885c7d7d6957d9eaf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 41, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/10/1018/싸이클.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, P = map(int, input().rstrip().split())\narr = [0]*(P+1)\nflag = False\nnow = N\nwhile not flag:\n now = (now*N)%P\n arr[now] += 1\n if arr[now] == 3: flag = True\nprint(arr.count(2) + 1)" }, { "alpha_fraction": 0.5064935088157654, "alphanum_fraction": 0.5227272510528564, "avg_line_length": 16.882352828979492, "blob_id": "9449e0396e6c8fc2d8684b559bb20c1803264dc2", "content_id": "5cbc4ec1c7c56f150dee0c11a944afbe3b236727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 26, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/11/1112/보너스 점수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nanswer = input().rstrip()\nresult = 0\nbonus = 0\nfor i in range(N):\n if answer[i] == 'X':\n bonus = 0\n elif answer[i] == 'O':\n result += i+1\n result += bonus\n bonus += 1\n # print(bonus)\nprint(result)\n " }, { "alpha_fraction": 0.4422110617160797, "alphanum_fraction": 0.4673366844654083, "avg_line_length": 17.18181800842285, "blob_id": "7b63d79d91bb155ffa1defbcad958aaf6bd23220", "content_id": "120a4cd802598ef4876facdee0d7fb59f637a969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 26, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/06/0620/가장 큰 금민수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nfor n in range(N, -1, -1):\n gms = True\n for s in str(n):\n if s not in '47':\n gms = False\n if gms:\n result = n\n break\nprint(result)" }, { "alpha_fraction": 0.3533916771411896, "alphanum_fraction": 0.37527352571487427, "avg_line_length": 49.83333206176758, "blob_id": "911a03edbeb1e0b018f1b5c171f100da51c97895", "content_id": "6574bab9eff9851dec609b6a858b8937090b50fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1236, "license_type": "no_license", "max_line_length": 70, "num_lines": 18, "path": "/알고리즘/온라인저지/2021/08/0814/ZOAC 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n시계모양에서 알파벳간에 거리는 ASCII 코드로 |A-B|\n13을 초과하면 26-|A-B|\n\"\"\"\nword = input() # 입력받을 단어\nword = 'A' + word # 단어의 맨 앞에 시작점인 A 추가\ndef zoac(A, B): # 글자간 거리를 구하는 함수 zoac()\n if abs(ord(A)-ord(B)) > 13: # 두 글자의 거리가 13을 초과하면\n result = 26 - abs(ord(A)-ord(B)) # 26에서 거리만큼을 빼줌\n # 한바퀴(26)에서 거리만큼 뒤로 오겠다는 뜻\n else: # 거리가 13아래면\n result = abs(ord(A)-ord(B)) # 거리 = 절대값\n return result # 결과 반환\nsum_word = 0 # 글자간 거리를 더할 변수\nfor i in range(1, len(word)): # 2번째 글자부터 마지막 글자까지\n # 두 글자씩 비교할 것이기 때문\n sum_word += zoac(word[i-1], word[i]) # 글자간 거리들을 변수에 더하고\nprint(sum_word) # 거리들을 더한 값을 출력" }, { "alpha_fraction": 0.4845360815525055, "alphanum_fraction": 0.5154638886451721, "avg_line_length": 15.222222328186035, "blob_id": "db968c031f912cd613aba4006f551dca03e93d18", "content_id": "253d1d645cb39911c0345829cf5c83d6e3d9f97e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 33, "num_lines": 18, "path": "/알고리즘/온라인저지/2022/02/0203/약수 구하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\n\nmeasures = []\n\nfor i in range(1, int(N**0.5)+1):\n if not N%i:\n measures.append(i)\n \ntmp = measures[::-1]\n\nfor t in tmp:\n if N//t != N**0.5:\n measures.append(N//t)\n\nif len(measures) >= K:\n print(measures[K-1])\nelse:\n print(0)" }, { "alpha_fraction": 0.5076923370361328, "alphanum_fraction": 0.5230769515037537, "avg_line_length": 11.600000381469727, "blob_id": "ab606376ffdc7d2e4e383e89e98710c91c49647c", "content_id": "387bb3870e44cf4ea5cf7bf3f976b158c7a1c08d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/02/0205/플러그.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nresult = 0\n\nfor n in range(N):\n if result:\n result -= 1\n result += int(input())\n\nprint(result)\n " }, { "alpha_fraction": 0.49295774102211, "alphanum_fraction": 0.49295774102211, "avg_line_length": 19.428571701049805, "blob_id": "9eaebedf8599d6348bab3f7c540db90d8f2a41df", "content_id": "4b5bb8aee58a6cdbc58e45bc82172d3200af3c02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/알고리즘/온라인저지/2021/08/0814/문자열반복.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n R, word = input().split()\n R = int(R)\n for wor in word:\n print(wor*R, end='')\n print()" }, { "alpha_fraction": 0.5189003348350525, "alphanum_fraction": 0.5257731676101685, "avg_line_length": 23.33333396911621, "blob_id": "1703bdb343ec8af2476b537a8ec9b6d55fd2b84d", "content_id": "8f7a4b05798177ca5a4b5840c6e46d1f2934479f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/알고리즘/온라인저지/2022/04/0412/질투진서.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, A, B = map(int, input().split())\narr = [list(map(int, input().split())) for _ in range(N)]\n# print(arr)\nA, B = A-1, B-1\nresult = 'HAPPY'\nfor a in arr[A]:\n if a > arr[A][B]:\n result = 'ANGRY'\nfor i in range(N):\n if arr[i][B] > arr[A][B]:\n result = 'ANGRY'\nprint(result)" }, { "alpha_fraction": 0.5368421077728271, "alphanum_fraction": 0.5368421077728271, "avg_line_length": 16.363636016845703, "blob_id": "41105922ce4a87432bc94c8f3ace7e710b55b4c6", "content_id": "ade66cae6231cac0d60201001c70cdf12b8131f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/09/0927/백대열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ndef gcd(a, b):\n while a%b: a, b = b, a%b\n return b\n\nN, M = map(int, input().rstrip().split(':'))\nG = gcd(N, M)\nprint('{}:{}'.format(N//G, M//G))" }, { "alpha_fraction": 0.37987011671066284, "alphanum_fraction": 0.3874458968639374, "avg_line_length": 21.0238094329834, "blob_id": "769f02edc8b38b2c4980a4a772f32c0348e5aef6", "content_id": "4ce566aa035a8155f29f31c58286a392c2561a5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 56, "num_lines": 42, "path": "/알고리즘/온라인저지/2021/10/1016/AC.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from collections import deque\nimport sys\n\nT = int(sys.stdin.readline())\n\nfor t in range(T):\n order = sys.stdin.readline().rstrip()\n order.replace('RR', '')\n N = int(sys.stdin.readline())\n arr = sys.stdin.readline().rstrip()[1:-1].split(',')\n q = deque(arr)\n\n rev, left, right = False, 0, len(q)-1\n flag = False\n\n if N == 0:\n q = []\n right = 0\n\n for o in order:\n if o == 'R':\n if rev:\n rev = False\n else:\n rev = True\n elif o == 'D':\n if len(q) < 1:\n flag = True\n print('error')\n break\n else:\n if rev:\n q.pop()\n else:\n q.popleft()\n \n if not flag:\n if rev:\n q.reverse()\n print('['+','.join(q)+']')\n else:\n print('['+','.join(q)+']')" }, { "alpha_fraction": 0.4639175236225128, "alphanum_fraction": 0.5103092789649963, "avg_line_length": 16.727272033691406, "blob_id": "ec9d3bad09e8647d5b5847c420b6243e65fe7e18", "content_id": "e3c28ba3d6a2218b345e48b486549e6990ff52ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 26, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/06/0604/신기한 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "# print(list(str(392)))\nN = int(input())\ni = 1\nresult = 0\nfor n in range(1, N+1):\n tmp = 0\n for j in list(str(n)):\n tmp += int(j)\n if not n%tmp:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.4649122953414917, "alphanum_fraction": 0.5175438523292542, "avg_line_length": 15.428571701049805, "blob_id": "78006e23c92c804515c288f022cf001bb238f1d5", "content_id": "b5142fe3d6f95811beeeda813722d1a7776dce6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/알고리즘/온라인저지/2023/03/0331/Máquina de café.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input())\nB = int(input())\nC = int(input())\n\nresult = [A*4 + B*2, A*2 + C*2, B*2 + C*4]\n\nprint(min(result))" }, { "alpha_fraction": 0.6237623691558838, "alphanum_fraction": 0.6237623691558838, "avg_line_length": 13.571428298950195, "blob_id": "99742ea043268f0a915fb625b4ac8ab22db8fab2", "content_id": "d94d37bbbc84c5dea789b5b5ae551ae6490c876d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/11/1121/CAPS.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nS = input().rstrip()\nfor s in S:\n print(s.upper(), end='')" }, { "alpha_fraction": 0.42718446254730225, "alphanum_fraction": 0.45436891913414, "avg_line_length": 21.434782028198242, "blob_id": "08fb6100f1a847f5129a0eabe4427e3861143123", "content_id": "44c22493c8bde902c54c7164af3a042d46361f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 35, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/03/0330/카드놀이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = list(map(int, input().split()))\nB = list(map(int, input().split()))\npoint = [0, 0]\nwinner = ''\nall_draw = True\nfor i in range(len(A)):\n if A[i] > B[i]:\n point[0] += 3\n all_draw = False\n if point[0] >= point[1]:\n winner = 'A'\n elif A[i] < B[i]:\n point[1] += 3\n all_draw = False\n if point[0] <= point[1]:\n winner = 'B'\n elif A[i] == B[i]:\n point[0] += 1\n point[1] += 1\nif all_draw:\n winner = 'D'\nprint(*point)\nprint(winner)" }, { "alpha_fraction": 0.5875576138496399, "alphanum_fraction": 0.6059907674789429, "avg_line_length": 38.54545593261719, "blob_id": "3038b28ece597c614a7e819b1ffd65a1fccbe94f", "content_id": "d007ea2b5b54003d84ffdfc84443fe7abb96ab19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 65, "num_lines": 11, "path": "/알고리즘/온라인저지/2021/08/0803/곱셈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = int(input()) # a를 정수로 입력받고\nb = input() # b를 str 그대로 입력받아서\nc = 1 * int(b) # c에 b를 백업해두고\nb = list(b) # b는 리스트로 ['3', '8', '5']로 만들어 준 다음\ncalc_list = [] # 각각의 계산값들을 더해서 리스트에 [0], [1], [2]에 저장할 리스트를 만들어주고\nfor i in range(3): # 리스트를 순회하면서 \n calc_list.append(a * int(b[i])) # 각각 자릿수를 곱한 값들을 구해서 저장\ncalc_list.reverse() # 결과값들을 뒤집어서\nfor calc in calc_list: # 결과값 리스트를 순회하면서\n print(calc) # 출력\nprint(a * c) # 마지막에 백업해놓은 원래b값과 a값을 곱해서 출력" }, { "alpha_fraction": 0.4900990128517151, "alphanum_fraction": 0.5198019742965698, "avg_line_length": 21.55555534362793, "blob_id": "126441bc363e14c180360d481e423f84dfadeb94", "content_id": "5e225fc797a5de15c56d34b167a9c4221030e3fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/08/0827/Square Pasture.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "X = []\nY = []\nfor _ in range(2):\n tmp = list(map(int, input().split()))\n X.append(tmp[0])\n Y.append(tmp[1])\n X.append(tmp[2])\n Y.append(tmp[3])\nprint(max(max(X)-min(X), max(Y)-min(Y))**2)" }, { "alpha_fraction": 0.40077072381973267, "alphanum_fraction": 0.42581889033317566, "avg_line_length": 19, "blob_id": "64aae3888b6474eb53faa4abcb924191ac01c86a", "content_id": "aef03e4b65529dc5bcd80381cb248fab3113bb73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 45, "num_lines": 26, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/1. 파이썬 SW문제해결 기본 List1/8차시 1일차 - 숫자 카드.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n N = int(input())\n\n counts = [0] * (10)\n\n num = input()\n \n for n in num:\n counts[int(n)] += 1\n \n cnt = 0\n result = 0\n\n if counts.count(max(counts)) == 1:\n result = counts.index(max(counts))\n cnt = max(counts)\n else: # != 1\n for i in range(9, -1, -1):\n if counts[i] == max(counts):\n result = i\n cnt = counts[i]\n break\n\n print('#{} {} {}'.format(t, result, cnt))" }, { "alpha_fraction": 0.47037702798843384, "alphanum_fraction": 0.4955116808414459, "avg_line_length": 20.461538314819336, "blob_id": "aed8a86468c1912102969352622e717e97f5836e", "content_id": "ee94a2b99e64f93f42fbcbf44a6b9f5a091bb32d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/알고리즘/온라인저지/2023/02/0226/인간-컴퓨터 상호작용.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nS = input().strip()\nprefix_sum = [[0]*26 for _ in range(len(S))]\n\nfor i in range(len(S)):\n for j in range(26):\n idx = ord(S[i])-97\n \n if idx == j:\n prefix_sum[i][j] = prefix_sum[i-1][j]+1\n else:\n prefix_sum[i][j] = prefix_sum[i-1][j]\n\nfor _ in range(int(input().strip())):\n A, L, R = input().strip().split()\n L, R = map(int, (L, R))\n\n idx = ord(A)-97\n result = prefix_sum[R][idx]\n if L != 0:\n result -= prefix_sum[L-1][idx]\n\n print(result)" }, { "alpha_fraction": 0.41981130838394165, "alphanum_fraction": 0.4811320900917053, "avg_line_length": 16.75, "blob_id": "a371b57874b1fcd826e79f939671ab8fc7d7fe80", "content_id": "b8d8d5245f6269533ccf2d2c0693a915f98b5cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 24, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/03/0315/Tournament Selection.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = -1\ncnt = 0\nfor _ in range(6):\n if input() == 'W':\n cnt += 1\nif cnt == 1 or cnt == 2:\n result = 3\nif cnt == 3 or cnt == 4:\n result = 2\nif cnt == 5 or cnt == 6:\n result = 1\nprint(result)" }, { "alpha_fraction": 0.6127819418907166, "alphanum_fraction": 0.6278195381164551, "avg_line_length": 25.700000762939453, "blob_id": "b20d00cba5299247d7a20f8389e0a10e06db7355", "content_id": "6be19f47a64a3858d760879f0f1baa0aee2c1731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 89, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/12/1228/2009년.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from datetime import datetime, date\n\n\ndef what_day_is_it(date):\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n day = date.weekday()\n print(days[day])\n\nD, M = map(int, input().split())\nwhat_day_is_it(date(2009, M, D))" }, { "alpha_fraction": 0.5090909004211426, "alphanum_fraction": 0.5181818008422852, "avg_line_length": 26.75, "blob_id": "61dc20bc2f7d6512cac558dfa6a5ded2202e5eca", "content_id": "2f76d270a6241e27e07102e6e888feda31ed6b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/알고리즘/온라인저지/2023/06/0602/Oddities.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = ['even', 'odd']\nfor _ in range(int(input())):\n X = int(input())\n print(f'{X} is {result[X%2]}')" }, { "alpha_fraction": 0.5087336301803589, "alphanum_fraction": 0.539301335811615, "avg_line_length": 20.85714340209961, "blob_id": "a0275f9e30570eaf2de1467cba760dcd56060944", "content_id": "bc18ac8001dd741704bb93da1e6ee733e2b550a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 71, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/10/1003/올림픽.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().rstrip().split())\narr = []\nfor n in range(N): arr.append(list(map(int, input().rstrip().split())))\narr.sort(key=lambda x:(-x[1], -x[2], -x[3]))\nprev = arr[0][1:]\nrank = 1\nstep = 0\nresult = 1\nfor i in range(1, N):\n now = arr[i][1:]\n if now != prev: \n rank += 1+step\n prev = now[:]\n step = 0\n else: step += 1\n if arr[i][0] == K: result = rank; break\nprint(result)" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 24.399999618530273, "blob_id": "dfc0f7c73b3897bb39c82c577e8f9fd9c8ae4736", "content_id": "9286afeb85c69592efd21df79bd29c676415dd74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/12/1202/3000번 버스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n result = 1\n for i in range(int(input())-1):\n result = result*2+1\n print(result)" }, { "alpha_fraction": 0.4647887349128723, "alphanum_fraction": 0.48826292157173157, "avg_line_length": 25.75, "blob_id": "f4e1ba7a9ee0342d8c2a53877caec5f68ab65536", "content_id": "5fcf237c8d826108c5611ea0abc552ef1b800b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/12/1215/IBM 빼기 1.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor tc in range(1, N+1):\n print('String #{}'.format(tc))\n for i in input():\n tmp = ord(i)+1\n print(chr(tmp) if tmp <= 90 else 'A', end='')\n if tc == N: break\n print('\\n')" }, { "alpha_fraction": 0.5736842155456543, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 20.22222137451172, "blob_id": "48fa14d179b07878114af8fa688e7f07997243aa", "content_id": "ff1400aa71db5a8f38a30eb0ca9760660ac8eac9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 56, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/02/0204/끝말잇기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "input()\nresult = 1\nwords = input().split()\npalindrome = words[0][0]\nfor word in words:\n if word[0] != palindrome or word[-1] != palindrome: \n result = 0\n break\nprint(result)" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 24.5, "blob_id": "06c1e29cc75609b090847c86d43b1bdeacd0ae34", "content_id": "5590086b8620ad2f2f2a3e53b8625e85685d8907", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/01/0107/사탕 선생 고창영.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(T):\n input()\n N = int(input())\n arr = [int(input()) for _ in range(N)]\n print('NO' if sum(arr)%N else 'YES')" }, { "alpha_fraction": 0.6027088165283203, "alphanum_fraction": 0.6139954924583435, "avg_line_length": 25.117647171020508, "blob_id": "cb15499481384f67ce6d7be19b79140827ecedfc", "content_id": "35293e16c7169ad9befdd6333433fdada4b8cdb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/11/1113/최소 회의실 개수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nimport heapq\n\ninput = sys.stdin.readline\n\nN = int(input())\narr = [list(map(int, input().rstrip().split())) for _ in range(N)]\narr.sort(key=lambda x:x[0]) # 회의 시작 시간 기준 오름차순 정렬\nrooms = [0] # 비어있는 회의실 하나로 시작\nresult = 1 # 회의실은 한 개 이상 반드시 필요하다\nfor s, e in arr:\n if s >= rooms[0]: # 빈 회의실이 있다면\n heapq.heappop(rooms)\n else: # 빈 회의실이 없다면\n result += 1 # 최소 회의실 개수 추가\n heapq.heappush(rooms, e) # 회의를 시작하지\nprint(result)" }, { "alpha_fraction": 0.4049079716205597, "alphanum_fraction": 0.4171779155731201, "avg_line_length": 18.235294342041016, "blob_id": "2a6ea32fdd66642bbe0e9562aaa155878592a4c0", "content_id": "c73a338b7051d98a9530f1da4753e01175510364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/알고리즘/온라인저지/2022/06/0602/양념 반 후라이드 반.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B, C, X, Y = map(int, input().split())\nx = min(X, Y)\ny = max(X, Y)\nresult = 0\nif C > (A+B)//2:\n result += A*X + B*Y\nelse:\n result += C*x*2\n if X != Y:\n if X > Y:\n result += A*(y-x)\n else:\n result += B*(y-x)\n tmp = C*y*2\n if result > tmp:\n result = tmp\nprint(result)" }, { "alpha_fraction": 0.35305342078208923, "alphanum_fraction": 0.4236641228199005, "avg_line_length": 17.75, "blob_id": "fe906653bdfe93056c1e7e1c26a7f7eed12401e1", "content_id": "0637b9a16ce27162b9fd105ad5699ac80f84f5c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 36, "num_lines": 28, "path": "/알고리즘/온라인저지/2022/03/0321/8진수 2진수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def oct_to_bin(x):\n global result\n dictt = {\n '0': '000',\n '1': '001',\n '2': '010',\n '3': '011',\n '4': '100',\n '5': '101',\n '6': '110',\n '7': '111',\n }\n # print(dictt[x])\n return dictt[x]\n\nN = input()\nif N == '0':\n print(0)\n exit()\n# print(N, len(N))\nresult = [''] * (len(N))\n# print(result)\nfor i in range(len(N)):\n result[i] = oct_to_bin(N[i])\n # print(result)\nprint(result[0].lstrip('0'), end='')\nfor r in result[1:]:\n print(r, end='')" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.539130449295044, "avg_line_length": 13.5, "blob_id": "0ffca1c2cba7af7d38899a033652919e9ca9935e", "content_id": "32a8cec44ca606761a704c94dda891ee177719c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/6차시 2. 자료구조 – 리스트, 튜플 - 연습문제 4.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "l = []\n\nfor i in range(5):\n l.append(int(input()))\n\navg = sum(l)/5\n\nprint('입력된 값 {}의 평균은 {}입니다.'.format(l, avg))" }, { "alpha_fraction": 0.4297872483730316, "alphanum_fraction": 0.47659575939178467, "avg_line_length": 22.600000381469727, "blob_id": "02d63904efaa4e50d16b1f03247fe37a6cd547ce", "content_id": "ea0ce9d749ad30b4e9f4bd1836cd76ccdc14e8fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/알고리즘/온라인저지/2022/08/0827/럭비 클럽.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "result = ['Senior', 'Junior']\nwhile True:\n tmp = input()\n if tmp == '# 0 0': break\n M = tmp.split()\n print(M[0], end=' ')\n if int(M[1]) > 17 or int(M[2]) >= 80:\n print(result[0])\n else:\n print(result[1])" }, { "alpha_fraction": 0.48630136251449585, "alphanum_fraction": 0.4931506812572479, "avg_line_length": 11.166666984558105, "blob_id": "c4f7639e34470782d29768e0016833d1f3852b8e", "content_id": "9a980c670a15c519fdd03c50afc98d7a17f3432c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 18, "num_lines": 12, "path": "/알고리즘/온라인저지/2021/12/1213/소음.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input())\nope = input()\nB = int(input())\n\nresult = 0\n\nif ope == '+':\n result = A + B\nelif ope == '*':\n result = A * B\n\nprint(result)\n" }, { "alpha_fraction": 0.4037036895751953, "alphanum_fraction": 0.4185185134410858, "avg_line_length": 21.58333396911621, "blob_id": "609539741264b02181e82177bc6ca33fdfde4e18", "content_id": "55e7826b75a2500ccbc42998a034884566e65dad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/알고리즘/온라인저지/2023/04/0403/헤라클레스와 히드라.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "K = int(input())\nfor tc in range(1, K+1):\n H = int(input())\n action = input()\n for a in action:\n if H:\n if a == 'c': H += 1\n elif a == 'b': H -= 1\n else: break\n print(f'Data Set {tc}:')\n print(H)\n if tc != K: print()" }, { "alpha_fraction": 0.3520749807357788, "alphanum_fraction": 0.3627844750881195, "avg_line_length": 43, "blob_id": "8cc834de00fedce201770fc9a60d29d84a773398", "content_id": "eff96d7ea2361b9e9efdb761caad8fbae22a3604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 64, "num_lines": 17, "path": "/알고리즘/온라인저지/2021/08/0813/컵홀더.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n컵홀더 개수 = 좌석수 - 커플석 개수 + 1\n리스트 받아서 while문으로 리스트 탐색\n\"\"\"\nN = int(input()) # 좌석 수\nseats = list(input()) # 좌석 리스트\ni = 0 # 좌석 리스트 탐색 인덱스\ncouple_go_to_hell = 0 # 커플석 개수 카운트\nwhile i != len(seats): # 좌석 탐색 반복\n if seats[i] == 'L': # 자리가 커플석이면\n couple_go_to_hell += 1 # 커플석 카운트\n i += 1 # 옆칸도 커플석이니까 한 칸 패스\n i += 1 # 다음 좌석으로 ㄱㄱ\nif N <= N - couple_go_to_hell + 1: # 커플석이 없으면\n print(N) # 그냥 좌석수 출력\nelse: # 커플석이 많아서 컵홀더가 좌석보다 적으면\n print(N - couple_go_to_hell + 1) # 컵홀더개수 출력" }, { "alpha_fraction": 0.48120301961898804, "alphanum_fraction": 0.5037593841552734, "avg_line_length": 25.799999237060547, "blob_id": "cc2f98d981d8cd0f0f0fc4bf6cc8b3426c92eeb4", "content_id": "066b1a686c24fd6e83ccaec74b26f921a3194690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/08/0814/Abdelrahman.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\nfor t in range(1, T+1):\n N, M = map(int, input().split())\n result = M - (N-1)\n print(f'Case {t}: {result}')" }, { "alpha_fraction": 0.4912280738353729, "alphanum_fraction": 0.5065789222717285, "avg_line_length": 23.052631378173828, "blob_id": "73f4238470095f5afc28767fa20ba1413d39b1e7", "content_id": "7d34eb6cdb689eb8e038ba1b097bed082d77c682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 38, "num_lines": 19, "path": "/알고리즘/온라인저지/2023/01/0104/친구.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\ndictt = {}\nfor n in range(1, N+1):\n dictt[n] = set()\nfor _ in range(M):\n A, B = map(int, input().split())\n dictt[A].add(B)\n dictt[B].add(A)\nfor n in range(1, N+1):\n print(len(dictt[n]))\n\n# N, M = map(int, input().split())\n# arr = [set() for _ in range(N+1)]\n# for m in range(M):\n# A, B = map(int, input().split())\n# arr[A].add(B)\n# arr[B].add(A)\n# for n in range(1, N+1):\n# print(len(arr[n]))" }, { "alpha_fraction": 0.44999998807907104, "alphanum_fraction": 0.5, "avg_line_length": 17.81818199157715, "blob_id": "27df47c51cb1b1cba93d06622823b436fa082b35", "content_id": "d54622dff050a05e0c029a49c176223e9022cc9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/알고리즘/온라인저지/2021/07/0731/성실한 개미.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "num = 10\nboard = [[int(x) for x in input().split()] for y in range(num)]\n # 10 * 10 배열 받기\n\"\"\"\n스타트는 고정\n개미가 있는 자리가 0이면 \n 있는 자리를 9로 바꾸고\n 그 오른쪽칸이 1이면 다음 경로를 한칸 아래로\n 그 외에는 오른쪽으로\n 2가 있는 자리에 도착할건데 만약 아닐수 있으니\n 2인지 확인하고 9로 바꿔주고 코드 종료\n\"\"\"\n# 개미의 x, y좌표\n# 인덱스에서는 [y][x]로 호출해야함\ny = 1\nx = 1\nant = board[y][x]\n\nwhile board[y][x] == 0:\n board[y][x] = 9\n if board[y][x+1] == 1:\n y += 1\n elif board[y][x+1] == 0:\n x += 1\n elif board[y][x+1] == 2:\n x += 1\nif board[y][x] == 2:\n board[y][x] = 9\n\nfor i in range(10):\n for j in range(10): \n print(board[i][j], end=' ')\n print() # 출력문" }, { "alpha_fraction": 0.4735516309738159, "alphanum_fraction": 0.508816123008728, "avg_line_length": 14.920000076293945, "blob_id": "f8164e7aae03792885c6b87ce456f7ad275e5d15", "content_id": "16502a4146021495b31a0e993b8701743b9f709d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 39, "num_lines": 25, "path": "/알고리즘/온라인저지/2022/01/0131/부녀회장이 될테야.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nks = []\nns = []\n\nfor t in range(T):\n ks.append(int(input()))\n ns.append(int(input()))\n\nmaxk = max(ks)\nmaxn = max(ns)\n\na = [[0]*maxn for _ in range(maxk+1)]\n\na[0] = list(range(1, maxn+1))\n\nfor i in range(maxk+1):\n a[i][0] = 1\n\nfor i in range(1, maxk+1):\n for j in range(1, maxn):\n a[i][j] = a[i][j-1] + a[i-1][j]\n\nfor t in range(T):\n print(a[ks[t]][ns[t]-1])" }, { "alpha_fraction": 0.5095447897911072, "alphanum_fraction": 0.5433186292648315, "avg_line_length": 28.65217399597168, "blob_id": "1d7a8d8f9315853df1c2fa934226c96939dacf54", "content_id": "144e58f823f09ccd8f27229db96577726501c8a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 59, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/11/1114/중간계 전쟁.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\ngandalf = [1, 2, 3, 3, 4, 10]\nsauron = [1, 2, 2, 2, 3, 5, 10]\nfor tc in range(1, int(input().rstrip())+1):\n G = S = 0\n gandalf_army = list(map(int, input().rstrip().split()))\n for i in range(6):\n G += gandalf[i]*gandalf_army[i]\n sauron_army = list(map(int, input().rstrip().split()))\n for i in range(7):\n S += sauron[i]*sauron_army[i]\n result = [\n 'No victor on this battle field', # draw\n 'Good triumphs over Evil', # gandalf win\n 'Evil eradicates all trace of Good', # sauron win\n ]\n i = 0\n if G > S: i = 1\n elif G < S: i = 2\n print('Battle {}: {}'.format(tc, result[i]))" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 21.33333396911621, "blob_id": "44ef0dabf4d89191120e9322ea20c69895822e9e", "content_id": "fb929a41de7ed1d07bc3ac86b303731aaf2ee064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/05/0531/오버플로우와 모듈러.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, M = map(int, input().split())\nresult = 1\nfor i in list(map(int, input().split())):\n result *= i%M\n result %= M\nprint(result)" }, { "alpha_fraction": 0.5465465188026428, "alphanum_fraction": 0.5705705881118774, "avg_line_length": 21.266666412353516, "blob_id": "0a99b8bed350199c86e028108367a35c9ad79215", "content_id": "69d19f44fe94bb0697854c8a370c0b885952f3fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/알고리즘/온라인저지/2021/08/0814/음계.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "notes = list(map(int, input().split()))\nascending = 0\ndescending = 0\nfor i in range(1, len(notes)):\n if notes[i-1] < notes[i]:\n ascending += 1\n else:\n descending += 1\nif ascending != 0 and descending != 0:\n print('mixed')\nelse:\n if ascending:\n print('ascending')\n else:\n print('descending')" }, { "alpha_fraction": 0.43448275327682495, "alphanum_fraction": 0.4689655303955078, "avg_line_length": 19.85714340209961, "blob_id": "b04fba16166df29c221b5d00184e9acb22be15f2", "content_id": "ce8c0ed6cec8ac6912d86ef650be66549c3e2e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/알고리즘/온라인저지/2022/02/0207/카이사르 암호.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "word = input()\n\nfor w in word:\n if ord(w)-3 < ord('A'):\n print(chr(ord(w)-3+26), end='')\n else:\n print(chr(ord(w)-3), end='')" }, { "alpha_fraction": 0.3789346218109131, "alphanum_fraction": 0.39830508828163147, "avg_line_length": 18.690475463867188, "blob_id": "87b794f2384746ab712e13c8cb075e2fa93092a3", "content_id": "265e74ebcb1b3d3677c2f87d75289616b159e6f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 45, "num_lines": 42, "path": "/알고리즘/SWEA/LEARN/Course/2. Programming Intermediate/6. 파이썬 SW문제해결 기본 Queue/8차시 6일차 - 노드의 거리.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(1, T+1):\n V, E = map(int, input().split())\n \n arr = [[] for _ in range(V+1)]\n\n for e in range(E):\n a, b = map(int, input().split())\n arr[a].append(b)\n arr[b].append(a)\n \n S, G = map(int, input().split())\n\n visited = [0] * (V+1)\n cnt = 0\n\n q = [arr[S]+[cnt]]\n visited[S] = 1\n result = 0\n found = False\n\n while q:\n tmp = q.pop(0)\n cnt = tmp[-1]\n if found:\n break\n\n for i in range(len(tmp)-1):\n if tmp[i] == G:\n result = cnt+1\n found = True\n break\n\n if visited[tmp[i]] == 0:\n q.append(arr[tmp[i]]+[cnt+1])\n visited[tmp[i]] = 1\n\n if S == G:\n result = 0\n\n print('#{} {}'.format(t, result))" }, { "alpha_fraction": 0.3980582654476166, "alphanum_fraction": 0.4417475759983063, "avg_line_length": 33.5, "blob_id": "074d601470bd3339232040a939dd18dd09d237b9", "content_id": "6f6693ce788e50fbbeab0717c4e0ca5be097803d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/알고리즘/온라인저지/2021/07/0730/[기초-산술연산] 정수 2개 입력받아 차 계산하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "a = input() # -> 123\nb = input() # -> -123\nc = int(a) - int(b) # 입력받은 문자열 a와 b를 int로 바꿔주고 뻴셈\nprint(c) # -> 246\n# 깃, 커밋은 양보다 질\n# 한 문제를 올려도, 정말 고민하고 의미있게 풀은 문제들을 올리자" }, { "alpha_fraction": 0.5729166865348816, "alphanum_fraction": 0.5763888955116272, "avg_line_length": 21.230770111083984, "blob_id": "6d3d751f018fb0ce3334b74fd1beaa0ac3a42ba9", "content_id": "6610edd5b21e7fac78a568fa6c20e12dfb9d90f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/12/1204/파일 완전 삭제.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfiles = list(map(int, list(input())))\nresult = ''\nfor i in range(len(files)):\n if N%2:\n result += str(int(not files[i]))\n else:\n result += str(files[i])\ntmp = input()\nif result == tmp:\n print('Deletion succeeded')\nelse:\n print('Deletion failed')" }, { "alpha_fraction": 0.5380434989929199, "alphanum_fraction": 0.570652186870575, "avg_line_length": 19.55555534362793, "blob_id": "97a8785a82727cefada1319277f22dfe712284fc", "content_id": "41e60587370956a8762eeb785b45ee431a3d4215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/알고리즘/온라인저지/2023/03/0308/노 땡스!.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\narr = sorted(list(map(int, input().split())))\nlast_plus_1 = 0\nresult = 0\nfor a in arr:\n if a != last_plus_1:\n result += a\n last_plus_1 = a+1\nprint(result)" }, { "alpha_fraction": 0.5068492889404297, "alphanum_fraction": 0.5369862914085388, "avg_line_length": 17.299999237060547, "blob_id": "5674f65e6c276720abef73f6eef229b84963d3dc", "content_id": "4c6f05b2b4a6e283efde43654841a258c98a3f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 36, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/07/0712/트리의 부모 찾기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nsys.setrecursionlimit(99999)\n\n\ndef dfs(i):\n for n in nodes[i]:\n if not parents[n]:\n parents[n] = i\n dfs(n)\n\n\nN = int(input())\nnodes = [[] for _ in range(N+1)]\nparents = [0] * (N+1)\nfor n in range(N-1):\n s, e = map(int, input().split())\n nodes[s].append(e)\n nodes[e].append(s)\ndfs(1)\nfor p in parents[2:]: print(p)" }, { "alpha_fraction": 0.38985738158226013, "alphanum_fraction": 0.4088748097419739, "avg_line_length": 22.370370864868164, "blob_id": "d5e77ae4f623572b1ace0c7e6fbc619cb623356f", "content_id": "cedb0cc1c95bb36174d8034faea9734680b72c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 631, "license_type": "no_license", "max_line_length": 45, "num_lines": 27, "path": "/알고리즘/온라인저지/2021/08/0822/딱지놀이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nfor tc in range(N):\n A_cards = list(map(int, input().split()))\n A_count = A_cards.pop(0)\n B_cards = list(map(int, input().split()))\n B_count = B_cards.pop(0)\n A_cards.sort()\n B_cards.sort()\n A = [0] * 5 \n B = [0] * 5\n for a in A_cards:\n A[a] += 1\n for b in B_cards:\n B[b] += 1\n winner = ''\n for i in range(4, -1, -1):\n if i == 0:\n winner = 'D'\n if A[i] > B[i]:\n winner = 'A'\n break\n elif A[i] == B[i]:\n continue\n elif A[i] < B[i]:\n winner = 'B'\n break\n print(winner)\n" }, { "alpha_fraction": 0.5591397881507874, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 17.799999237060547, "blob_id": "654d1ed2b9eb1322656e01ae0932c1719a69a8c9", "content_id": "c850664c96342e3368085908d7532bbae9f40ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/알고리즘/온라인저지/2022/09/0907/母音を数える (Counting Vowels).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, S = input(), input()\nresult = 0\nfor s in S:\n if s in 'aeiou': result += 1\nprint(result)" }, { "alpha_fraction": 0.540123462677002, "alphanum_fraction": 0.595678985118866, "avg_line_length": 39.625, "blob_id": "59376e7d942beffa1ca176e04b9526eed994e2e8", "content_id": "b3b651ee76c6ab01914d2e04379e2fa0a01b255e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/10/1012/Mini Fantasy War.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "for _ in range(int(input())):\n inputs = list(map(int, input().split()))\n stats = []\n stats.append(max(1, inputs[0]+inputs[4]))\n stats.append(max(1, inputs[1]+inputs[5]))\n stats.append(max(0, inputs[2]+inputs[6]))\n stats.append(inputs[3]+inputs[7])\n print(stats[0] + stats[1]*5 + stats[2]*2 + stats[3]*2)" }, { "alpha_fraction": 0.4203979969024658, "alphanum_fraction": 0.44029849767684937, "avg_line_length": 24.0625, "blob_id": "6f4ba5881baad944af19c1cd9963f67c97e2cd9c", "content_id": "0f3f6651ada2655d733f04174a785f0530d42ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/02/0204/ROT13.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input()\n\nfor s in sen:\n o = ord(s)\n if s in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n if o <= ord('M'):\n print(chr(o + 13), end='')\n else:\n print(chr(o - 13), end='')\n elif s in 'abcdefghijklmnopqrstuvwxyz':\n if o >= ord('n'):\n print(chr(o - 13), end='')\n else:\n print(chr(o + 13), end='')\n else:\n print(s, end='')\n\n" }, { "alpha_fraction": 0.4649122953414917, "alphanum_fraction": 0.5350877046585083, "avg_line_length": 9.363636016845703, "blob_id": "da6070e8c56bce5c816f37104b49fa4025897b1e", "content_id": "ceabc03666e6cc547f5c2554490a295f304672e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 22, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/01/0131/열 개씩 끊어 출력하기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input()\n\ni = 10\n\nlenn = len(sen)\n\nwhile i < lenn:\n print(sen[i-10:i])\n i += 10\n\nprint(sen[i-10:lenn])\n" }, { "alpha_fraction": 0.3687500059604645, "alphanum_fraction": 0.41874998807907104, "avg_line_length": 13.636363983154297, "blob_id": "f2aec11634e7af6ae5ddc2f96284c75470aaf718", "content_id": "25f52b889ebe1d052db7a921757a4a9cc5f2c5aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 19, "num_lines": 11, "path": "/알고리즘/온라인저지/2022/04/0406/3n+1 수열.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\nresult = 0\nwhile True:\n if N == 1:\n break\n if N % 2:\n N = N*3 + 1\n else:\n N //= 2\n result += 1\nprint(result+1)" }, { "alpha_fraction": 0.4918699264526367, "alphanum_fraction": 0.5081300735473633, "avg_line_length": 18, "blob_id": "376dae1f54760f0650d74cd5e194527938eadd47", "content_id": "3ee57821aee9c98b2894dba70c6ed44574f2df8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/09/0901/회전하는 큐.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, _ = map(int, input().split())\nM = list(map(int, input().split()))\nN = list(range(1, N+1))\nprev = 0\nresult = 0\nfor m in M:\n i = N.index(m)\n D = abs(prev - i)\n D = min(D, len(N)-D)\n result += D\n N.pop(i)\n prev = i\nprint(result)" }, { "alpha_fraction": 0.32826748490333557, "alphanum_fraction": 0.4559270441532135, "avg_line_length": 11.692307472229004, "blob_id": "c93b2bf7fe73a6474a83a5d80f7b5cf25f818aa9", "content_id": "20ca2229d9415f35cf5ecf6a7db53f47cac3094c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 44, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/02/0220/타일 장식물.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1 : 1+1+1+1\n2 : 4-1+1+1+1\n3 : 6-2+2+2+2\n4 : 10-3+3+3+3\n5 : \n1 : \n\"\"\"\n\nN = int(input())\n\nresult = [0, 4]\n\ndp = [0, 1]\n\nif N >= 2:\n for i in range(2, N+1):\n dp.append(dp[i-1] + dp[i-2])\n\n for i in range(2, N+1):\n result.append(result[i-1] + dp[i]*2)\n\nif result == 1:\n print(4)\nelse:\n print(result[-1])" }, { "alpha_fraction": 0.4178321659564972, "alphanum_fraction": 0.45279720425605774, "avg_line_length": 23.913043975830078, "blob_id": "1465176bef6fd75580d4ba2386ca304d98343832", "content_id": "1f8f4d4ee7386d933c855d923f70308802e0e710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/알고리즘/온라인저지/2021/08/0829/색종이.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\nN = int(sys.stdin.readline())\nboard = [[0]*1002 for _ in range(1002)]\nmax_y = 0\nmax_x = 0\nfor n in range(1, N+1):\n Y, X, H, W = map(int, sys.stdin.readline().split())\n for y in range(Y, Y+H):\n for x in range(X, X+W):\n board[y][x] = n\n if max_y < y:\n max_y = y\n if max_x < x:\n max_x = x\nresult = [0] * N\nfor n in range(1, N+1):\n for i in range(max_y+1):\n for j in range(max_x+1):\n if board[i][j] == n:\n result[n-1] += 1\nfor r in result:\n print(r)" }, { "alpha_fraction": 0.5398229956626892, "alphanum_fraction": 0.5575221180915833, "avg_line_length": 18, "blob_id": "83f17923ca52fd7982129ddbfa25632c9ced2d20", "content_id": "b439f5c8d8838c387fba45109029975454307d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/12/1229/MBTI.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "MBTI = input()\nresult = 0\nfor _ in range(int(input())):\n if MBTI == input():\n result += 1\nprint(result)" }, { "alpha_fraction": 0.5419161915779114, "alphanum_fraction": 0.5958083868026733, "avg_line_length": 16.63157844543457, "blob_id": "805c02f08f828ee25383f6d3a365d3e7c7719cac", "content_id": "ce2d015034a6b8fe30fb183d2ad5b82120459c1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 41, "num_lines": 19, "path": "/알고리즘/온라인저지/2022/10/1006/병든 나이트.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().rstrip().split())\nresult = 0\nif N == 1: result = 1\nelif N == 2: result = min(4, (M+1)//2)\nelse: # N >= 3\n if M < 7: result = min(4, M)\n else: result = M-2\nprint(result)\n\n\"\"\"\n<참고한 링크>\nhttps://pacific-ocean.tistory.com/354\n\"\"\"\n\n# https://www.acmicpc.net/problem/1783" }, { "alpha_fraction": 0.3558441698551178, "alphanum_fraction": 0.3636363744735718, "avg_line_length": 15.782608985900879, "blob_id": "8e504ff8c1ed1e7bc665e89545a008c1fd766144", "content_id": "44f8935b3eef84ba35e6716e1b9657c4c438c336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "no_license", "max_line_length": 42, "num_lines": 23, "path": "/알고리즘/온라인저지/2022/11/1123/우유가 넘어지면.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nrotate = {\n \".\": \".\",\n \"O\": \"O\",\n \"-\": \"|\",\n \"|\": \"-\",\n \"/\": \"\\\\\",\n \"\\\\\": \"/\",\n \"^\": \"<\",\n \"<\": \"v\",\n \"v\": \">\",\n \">\": \"^\",\n}\n\nN, M = map(int, input().rstrip().split())\narr = [input().rstrip() for _ in range(N)]\nfor j in range(M-1, -1, -1):\n for i in range(N):\n print(rotate[arr[i][j]], end='')\n print()" }, { "alpha_fraction": 0.42657342553138733, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 19.5, "blob_id": "05b68bb8397d837db3c96a0bd201e80d5af0b9ce", "content_id": "cee7965588ac63f029fdf4c57d8d1a40bf33ed2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/알고리즘/온라인저지/2021/08/0803/사분면 고르기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x = int(input()) # x와\ny = int(input()) # y값을 int로 받아서 \nquadrant = 0 # 사분면 값을 초기화해주고\nif x >= 0:\n if y >= 0:\n quadrant = 1 # + +\n else:\n quadrant = 4 # + -\nelse:\n if y >= 0:\n quadrant = 2 # - +\n else:\n quadrant = 3 # - -\nprint(quadrant) # 사분면 값을 출력" }, { "alpha_fraction": 0.43379446864128113, "alphanum_fraction": 0.45059287548065186, "avg_line_length": 27.13888931274414, "blob_id": "c85ba4be0659933352281f162df7e30785b7ba3d", "content_id": "9905f6c3c6635a07cbcc2f56eaa345dd6159fca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 62, "num_lines": 36, "path": "/알고리즘/온라인저지/2021/08/0829/주사위 쌓기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ndice_count = int(sys.stdin.readline())\ndices = []\nfor d in range(dice_count):\n dices.append(list(map(int, sys.stdin.readline().split())))\nresult = 0\nfor j in range(6):\n max_sum = 0\n up = 0\n for i in range(dice_count):\n dice_opposite = {\n dices[i][0]:dices[i][5], \n dices[i][1]:dices[i][3],\n dices[i][2]:dices[i][4],\n dices[i][3]:dices[i][1],\n dices[i][4]:dices[i][2],\n dices[i][5]:dices[i][0]\n }\n tmp = [*dices[i]]\n try:\n if up == 0:\n tmp.remove(dices[i][j])\n tmp.remove(dice_opposite[dices[i][j]])\n up = dice_opposite[dices[i][j]]\n max_sum += max(tmp)\n else:\n tmp.remove(up)\n tmp.remove(dice_opposite[up])\n up = dice_opposite[up]\n max_sum += max(tmp)\n except:\n pass\n if result < max_sum:\n result = max_sum\nprint(result)" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 35.25, "blob_id": "4f39cc3ffa1fc9b8a155db24042ee4117aa8bdc5", "content_id": "13461c3ae9d201b056b0928d875654620ae879ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 56, "num_lines": 8, "path": "/알고리즘/온라인저지/2021/08/0803/X보다 작은 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, X = map(int, input().split()) # N은 왜준거지? 모르겠다\nA = list(map(int, input().split())) # 수열을 입력받아서 리스트로 만들고\nresult = [] # 결과값을 초기화하고\nfor a in A: # 수열을 순회하면서\n if a < X: # X보다 작은 수는\n result.append(a) # 결과값에 추가\nfor res in result: # 결과값 수열을 순회하면서\n print(res, end=' ') # 띄어쓰기 간격으로 출력" }, { "alpha_fraction": 0.481675386428833, "alphanum_fraction": 0.5078533887863159, "avg_line_length": 11.800000190734863, "blob_id": "9c0d56d6542133b0885779e26c784f764479c824", "content_id": "82fe8cc20233b606ef7e4909072f36528ea78363", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 40, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/02/0220/행복.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nscores = list(map(int, input().split()))\n\nmaxx = 0\nminn = 1000\n\nfor s in scores:\n if s >= maxx:\n maxx = s\n \n if s <= minn:\n minn = s\n\nprint(maxx-minn)" }, { "alpha_fraction": 0.5213675498962402, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 13.75, "blob_id": "cd28b5f866cd7215eb883ba25475f7a44b895132", "content_id": "d1c5fca7afb793b1a4e387892a171233dbe51ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/알고리즘/온라인저지/2022/02/0203/네 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "nums1 = input().split()\n\nresult = 0\n\nfor i in range(0, 4, 2):\n result += int(nums1[i] + nums1[i+1])\n\nprint(result)" }, { "alpha_fraction": 0.29192546010017395, "alphanum_fraction": 0.3913043439388275, "avg_line_length": 12.5, "blob_id": "b45db5ce4065a63ba1be19a16a1bc3d0c81c70ad", "content_id": "aa8f2197092b708dad2b9d21c94d67141fadb7a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 30, "num_lines": 12, "path": "/알고리즘/SWEA/LEARN/Course/1. Programming Beginner/파이썬 프로그래밍 기초(2)/20차시 2. 자료구조 – 리스트, 튜플 - 연습문제 22.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "x = [5, 6, 77, 45, 22, 12, 24]\n\ni = 0\nwhile True:\n if x[i]%2 == 0:\n x.remove(x[i])\n else:\n i += 1\n if i == len(x):\n break\n\nprint(x)" }, { "alpha_fraction": 0.46783626079559326, "alphanum_fraction": 0.46783626079559326, "avg_line_length": 18.11111068725586, "blob_id": "2cdc85add6b18fe4b5ee54d4b3db7b1a6221571c", "content_id": "619458b2400cbd2bece94328f59b1b25feaa621a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/알고리즘/온라인저지/2022/02/0206/대소문자 바꾸기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "sen = input()\n\nfor s in sen:\n if s.isupper():\n print(s.lower(), end='')\n elif s.islower():\n print(s.upper(), end='')\n else:\n print(s, end='')" }, { "alpha_fraction": 0.4260089695453644, "alphanum_fraction": 0.4506726562976837, "avg_line_length": 26.9375, "blob_id": "451c7d5e60aef773ade8e8613c3480b8acfc55b6", "content_id": "86b634d8298052d43c40d5407b0e9bd593658394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/알고리즘/온라인저지/2022/06/0617/박스.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def down():\n global result\n for i in range(N-2, -1, -1):\n for j in range(M):\n if arr[i][j] == 1 and arr[i+1][j] == 0:\n arr[i][j], arr[i+1][j] = arr[i+1][j], arr[i][j]\n result += 1\n\nT = int(input())\nfor t in range(T):\n N, M = map(int, input().split())\n arr = [list(map(int, input().split())) for _ in range(N)]\n result = 0\n for n in range(N-1):\n down() \n print(result)" }, { "alpha_fraction": 0.5649717450141907, "alphanum_fraction": 0.5762711763381958, "avg_line_length": 26.30769157409668, "blob_id": "559e249b4f56abfae6ee86d48ffd17124162e203", "content_id": "7c1d976cbfeda2d7704267f3149434c1c5af0e28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/알고리즘/온라인저지/2022/04/0417/차이를 최대로.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "from itertools import permutations\n\nN = int(input())\ntmp = list(map(int, input().split()))\nperm = list(permutations(tmp, N)) # 배열에서 나올 수 있는 모든 경우\nmaxx = 0 # \"차이를 최대로\"\nfor p in perm:\n a = 0 # 해당 배열의 최대값 초기화\n for i in range(N-1):\n a += abs(p[i]-p[i+1]) # 앞뒤 값의 절대값 더하기\n if a > maxx: # 최대값 갱신이 가능하면\n maxx = a # 최대값 갱신\nprint(maxx) # 출력" }, { "alpha_fraction": 0.5613811016082764, "alphanum_fraction": 0.5664961934089661, "avg_line_length": 47.9375, "blob_id": "b8b1e39f9adcd95e6d957f99cbdd5aca7328ecf2", "content_id": "833231e7898ce1f63534f3e1297c46a67cebdf10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 968, "license_type": "no_license", "max_line_length": 90, "num_lines": 16, "path": "/알고리즘/프로그래머스/Level1/신고 결과 받기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def solution(id_list, report, k):\n answer = [0] * len(id_list) # 정답 배열\n report_list = [[] for _ in range(len(id_list))] # 신고받은 목록\n black_list = [0] * len(id_list) # 신고받은 횟수\n for r in report:\n user, reported = r.split() # 공백을 간격으로, 유저, 피신고자\n if reported not in report_list[id_list.index(user)]: # 중복 신고 방지, 아직 신고 받은 적 없는지 확인\n report_list[id_list.index(user)].append(reported) # 신고 받은 목록 추가\n black_list[id_list.index(reported)] += 1 # 신고 받은 횟수 추가\n # print(id_list, report, k) # 디버깅\n # print(report_list, black_list) # 디버깅\n for i in range(len(id_list)): # 해당 유저가\n for a in report_list[i]: # 신고한 사람이\n if black_list[id_list.index(a)] >= k: # k번 이상 신고 되었을 때\n answer[i] += 1 # 제재완료\n return answer" }, { "alpha_fraction": 0.5055928230285645, "alphanum_fraction": 0.5548098683357239, "avg_line_length": 27, "blob_id": "5cf550c23cade46acc6a48779641bd04b1539374", "content_id": "e54f7018f7b512051f5e6aeb599b5451abf422b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/알고리즘/온라인저지/2021/08/0811/거스름돈.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n1000엔에서 물건값 빼고\n거스름돈을 거슬러주면 된다\n동전의 최소개수\"\"\"\nprice = int(input()) # 물건의 가격\nchange = 1000-price # 거스름돈\ncoins = [500, 100, 50, 10, 5, 1] # 동전들\ncount = 0 # 동전의 개수 초기화\nfor coin in coins: # 동전들을 금액이 큰 순서로 순회하면서\n if coin <= change: # 거슬러줄 수 있으면\n while True:\n change -= coin # 해당 동전을 하나 거슬러주고\n count += 1 # 동전 개수 증가\n if coin > change: # 거슬러주지 못하면\n break # 종료하고 다음 작은 동전으로\nprint(count) # 동전 개수 출력" }, { "alpha_fraction": 0.4670184552669525, "alphanum_fraction": 0.4854881167411804, "avg_line_length": 19, "blob_id": "a77b5d42f948a58e61778e036c0a0fc78d26e14e", "content_id": "fbcf9f955be9e09b86c9c9776267e6f533315481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/알고리즘/온라인저지/2021/08/0822/방 배정.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N, K = map(int, input().split())\narr = [[0]*2 for _ in range(6)]\nfor n in range(N):\n S, Y = map(int, input().split())\n arr[Y-1][S] += 1\nnew_arr = []\nfor ar in arr:\n for a in ar:\n new_arr.append(a)\nresult = 0\nfor ar in new_arr:\n if ar > K: \n result += ar//K\n ar = ar%K\n if not ar:\n continue\n else:\n result += 1\nprint(result)" }, { "alpha_fraction": 0.6321070194244385, "alphanum_fraction": 0.654403567314148, "avg_line_length": 21.450000762939453, "blob_id": "b0efe938fe583d8c6f34c96e41c0a659aff0b767", "content_id": "53a7e22be4a1902e58c9e16ff5ad37c8973e0c78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1527, "license_type": "no_license", "max_line_length": 41, "num_lines": 40, "path": "/알고리즘/온라인저지/2022/10/1002/1로 만들기 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\nQ = deque()\nQ.append([N])\nresult = []\nwhile Q:\n now = Q.popleft()\n X = now[-1]\n if X == 1: result = now; break\n if X % 3 == 0: Q.append(now + [X//3])\n if X % 2 == 0: Q.append(now + [X//2])\n Q.append(now + [X-1])\nprint(len(result)-1)\nprint(*result)\n\n\"\"\"\n3으로 나누고, 2로 나누고, 1을 빼는\n탐색기록 그 자체를 큐에 담으며 진행함\n1에 도착했을 때의 탐색기록이 결과가 됨\n스페셜저지 이므로 답은 아무거나 상관이 없음\n해당 코드는 top-down DP코드이지만\n1에서 출발하여 N까지 가는 bottom-up 코드를 짤 수도 있음\n하지만 *연산보다 //연산이 시간이 조금 더 빠를지도..?\nDP의 개념은 매우 단순함\n\"계산한 값을 기억했다가 다시 사용한다\"\n본디 다이나믹 프로그래밍이라는 이름 자체가\n별 뜻이 없고, 만든 사람이 그냥 어그로성으로 붙인 이름이라고 함\n한 번 계산된 값을 기록하여 재활용하는 알고리즘을 전부 DP로 봄\n특징으로는, 계산된 값을 날리지 않고 들고 있는만큼\n메모리가 많이 필요함\n해당 코드는 수많은 리스트들이 Q에 들어가는 것처럼 보이지만\n시간이 logN으로 빠른 이(二)분탐색보다 훨씬 빠른\n코드상 삼(三)분탐색을 사용하므로 \n저렇게 넘치게 Q에 append 하는 것처럼 보여도 메모리는 괜찮다\n\"\"\"\n# https://www.acmicpc.net/problem/12852" }, { "alpha_fraction": 0.6751269102096558, "alphanum_fraction": 0.6751269102096558, "avg_line_length": 32, "blob_id": "50bece0232b422491a7a6d8808a441a22cd0ba13", "content_id": "44e30ccea0f8e6da6aa462490d436e9cbacc8783", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/알고리즘/온라인저지/2023/01/0128/줄 세우기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "players = [input() for _ in range(int(input()))]\nif players == sorted(players):\n print('INCREASING')\nelif players == sorted(players, reverse=True):\n print('DECREASING')\nelse: print('NEITHER')" }, { "alpha_fraction": 0.5317515730857849, "alphanum_fraction": 0.5415213108062744, "avg_line_length": 30.866666793823242, "blob_id": "66c5d2db3902be5a3155bf5fce3ae62a30239505", "content_id": "720a9223315b3822c2c2c1a8050f5549b954148b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2215, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/알고리즘/온라인저지/2022/08/0812/외판원 순회 2.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "def dfs(start, now):\n global result, tmp\n if tmp > result: return # 백트래킹\n for next in range(N): # 다음 방문 지점\n # 다음 방문할 지점이 지금 지점과 다르고\n # 방문한 적이 없으며\n # 방문을 할 수 있을 때(W배열에 값이 존재할 때)\n if now != next and not visited[next] and W[now][next]:\n visited[next] = 1 # 다음 지점 방문 찍고\n tmp += W[now][next] # 그 지점의 코스트를 더하고\n dfs(start, next) # 해당 지점 탐색\n visited[next] = 0 # 방문 지워주고\n tmp -= W[now][next] # 코스트 지워주고\n elif sum(visited) == N and W[now][start]:\n tmp += W[now][start] # 코스트 더하고\n if tmp < result: result = tmp # 최소값 갱신\n tmp -= W[now][start] # 코스트 지워주고\n\nN = int(input())\nW = [list(map(int, input().split())) for _ in range(N)]\nresult = int(1e9) # 가상의 최소값\nvisited = [0] * N # 방문 배열\nfor n in range(N): # 모든 점에서 다 출발해본다\n tmp = 0 # 해당 출발의 임시 결과값\n visited[n] = 1 # 들어가면서 찍고\n dfs(n, n) # 출발 시, 시작점과 탐색점은 같다\n visited[n] = 0 # 나오면서 지운다\nprint(result) # 얻은 최소값 출력\n\n\"\"\"\n시간 : 80분(혼자 고민) + 10분(\"질문검색\"탭에서 반례 및 힌트 참고)\n풀이\n 갈 수 있는 동선을 모두 체크하여(브루트포스)\n dfs로 탐색하면서\n 방문배열을 만들어서, 들어갈 때 찍고, 나올 때 지워준다\n 모든 점을 탐색했을 때 시작점으로 돌아가서\n 해당 순회의 결과값이 최소값인지 비교하여 갱신한다\n (from itertools import permutation을 사용하여 풀 수도 있을 것이다)\n\n 문제의 함정\n 1. 다음 지점으로 갈 수 없는 경우가 있다\n 순회 도중에 갈 수 없는 경우만 구현하였더니 오답이었고\n 마지막 지점에서 다시 시작점으로 갈 수 없는 경우를 구현해주니 맞았음\n 2. 순회 도중, 이미 구한 최소값보다 값이 커지면 return한다 (백트래킹)\n\"\"\"" }, { "alpha_fraction": 0.5054945349693298, "alphanum_fraction": 0.5164835453033447, "avg_line_length": 14.333333015441895, "blob_id": "9c3430f7d7500c3724f96fc0f3a43a96de3a3dab", "content_id": "84e89fd50779e6550cacb1238064f0e79f6354fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/알고리즘/온라인저지/2022/02/0211/다면체.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "T = int(input())\n\nfor t in range(T):\n V, E = map(int, input().split())\n\n print(E-V+2)" }, { "alpha_fraction": 0.35384616255760193, "alphanum_fraction": 0.5076923370361328, "avg_line_length": 21, "blob_id": "72075ee002a3440075dd7f938d88d65aecacb2d2", "content_id": "0ed46d4655bb4cd672272fa115981f7d35f22ead", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 47, "num_lines": 3, "path": "/알고리즘/온라인저지/2022/02/0221/세금.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "N = int(input())\n\nprint(int(N*0.78), int((N*0.8) + (N*0.2)*0.78))" }, { "alpha_fraction": 0.4741035997867584, "alphanum_fraction": 0.5119521617889404, "avg_line_length": 24.149999618530273, "blob_id": "1cde46789ad61409c9ca3cb5420cff414cd9f00a", "content_id": "0689a6120b1ea835ed59575fd1112c578e164777", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/알고리즘/온라인저지/2022/10/1006/숫자판 점프.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\ndy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]\n\ndef dfs(y, x, move, str_num):\n if move == 5: \n result.add(int(str_num))\n return\n for i in range(4):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<5 and 0<=nx<5:\n dfs(ny, nx, move+1, str_num+str(arr[ny][nx]))\n\narr = [list(map(int, input().rstrip().split())) for _ in range(5)]\nresult = set()\nfor i in range(5):\n for j in range(5):\n dfs(i, j, 0, str(arr[i][j]))\nprint(len(result))" }, { "alpha_fraction": 0.4848484992980957, "alphanum_fraction": 0.5353535413742065, "avg_line_length": 11.5, "blob_id": "31e9830d34873c93249dfee32a85a81c1281f10b", "content_id": "48f90ff8c3b42ca6b65dc1f614d2aea9c28b05d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 16, "num_lines": 8, "path": "/알고리즘/온라인저지/2023/05/0521/11 月 (November).py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A = int(input())\nB = int(input())\nB *= 7\nC = A+B\nresult = 1\nif C > 30:\n result = 0\nprint(result)" }, { "alpha_fraction": 0.48106446862220764, "alphanum_fraction": 0.5076765418052673, "avg_line_length": 22.08661460876465, "blob_id": "69f2bdf44f86c9c5cd90c21dfd1a2468d5eb7bea", "content_id": "1f0c4b40d6bb0aa58b6ee4bce76020495addd8cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3589, "license_type": "no_license", "max_line_length": 61, "num_lines": 127, "path": "/알고리즘/온라인저지/2022/03/0313/배열 돌리기 3.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\narr = 원본 배열\nresult = 연산을 실행한 배열\n매 연산마다 배열을 돌리고\nresult를 다시 arr에 deepcopy하면서 진행\n\"\"\"\n\nimport copy # deepcopy 사용\n\ndef one():\n global arr, result\n result = []\n for a in arr[::-1]: # 세로로 뒤집어서\n result.append(a) # 한 줄 씩 추가\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n\ndef two():\n global arr, result\n result = []\n for a in arr:\n result.append(a[::-1]) # 가로로 뒤집어서 한 줄 씩 추가\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n \ndef three():\n global arr, result\n result = []\n for j in range(M): # 왼쪽 줄을\n tmp = []\n for i in range(N-1, -1, -1): # 거꾸로 읽어와서\n tmp.append(arr[i][j])\n result.append(tmp) # 한 줄 씩 추가\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n\ndef four():\n global arr, result\n result = []\n for j in range(M-1, -1, -1): # 오른쪽 줄을\n tmp = []\n for i in range(N): # 읽어와서\n tmp.append(arr[i][j])\n result.append(tmp) # 한 줄 씩 추가\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n\n\"\"\"\n5번연산, 6번연산은\n배열의 모양을 \n1 2\n4 3\n으로 나누어 생각하기로 함\n5번연산을 실행한 결과값의 맨 윗줄은\n4번의 첫 줄 + 1번의 첫 줄이 되고\n그렇게 결과값의 반을 채우고 나면 나머지 반은\n3번의 첫 줄 + 2번의 첫 줄\n을 더한 값으로 순서대로 나머지 반을 채우게 됨\n\"\"\"\ndef five():\n global arr, result\n result = []\n for i in range(N//2, N):\n tmp1 = [] # 4\n tmp2 = [] # 1\n for j in range(M//2):\n tmp1.append(arr[i][j])\n tmp2.append(arr[i-(N//2)][j])\n result.append(tmp1+tmp2) # 4와 1을 한 줄 씩 더해서 result에 추가\n for i in range(N//2, N):\n tmp1 = [] # 3\n tmp2 = [] # 2\n for j in range(M//2, M):\n tmp1.append(arr[i][j])\n tmp2.append(arr[i-(N//2)][j])\n result.append(tmp1+tmp2) # 3과 2를 한 줄 씩 더해서 result에 추가\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n\n\"\"\"\n6번연산은\n2번의 첫 줄 + 3번의 첫 줄\n을 구해서 결과값의 절반을 채우고\n나머지 절반은\n1번의 첫 줄 + 4번의 첫 줄\n을 더한 값으로 채움\n\"\"\"\ndef six():\n global arr, result\n result = []\n for i in range(N//2):\n tmp1 = [] # 2\n tmp2 = [] # 3\n for j in range(M//2, M):\n tmp1.append(arr[i][j])\n tmp2.append(arr[i+(N//2)][j])\n result.append(tmp1+tmp2) \n for i in range(N//2):\n tmp1 = [] # 1\n tmp2 = [] # 4\n for j in range(M//2):\n tmp1.append(arr[i][j])\n tmp2.append(arr[i+(N//2)][j])\n result.append(tmp1+tmp2)\n arr = copy.deepcopy(result) # arr에 result 덮어쓰기\n\n\nN, M, R = map(int, input().split())\narr = [] # 원본 배열\nfor n in range(N):\n arr.append(list(map(int, input().split())))\norders = list(map(int, input().split())) # 연산들\nresult = [] # 연산을 진행할 배열\n\nfor order in orders:\n if order == 1:\n one()\n elif order == 2:\n two()\n elif order == 3:\n three()\n N, M = M, N # 직사각형 배열인 경우 에러 방지\n elif order == 4:\n four() \n N, M = M, N # 직사각형 배열인 경우 에러 방지\n elif order == 5:\n five()\n elif order == 6:\n six()\n\nfor r in result:\n print(*r) # 각 줄 출력" }, { "alpha_fraction": 0.5787781476974487, "alphanum_fraction": 0.5932475924491882, "avg_line_length": 27.272727966308594, "blob_id": "ea3799b49cf2ba67c0b01b1566b1a8060d689f18", "content_id": "ad93b25c07599cc59743bc6368c11a24a5ed2484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 70, "num_lines": 22, "path": "/알고리즘/온라인저지/2021/08/0806/평균은 넘겠지.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\n테스트케이스 개수 C개\n학생의 수 N명, 점수 N개\n점수 N개의 평균\n을 넘는 점수의 개수\n를 전체 점수의 개수로 나눈 퍼센트\n을 3자리까지 출력\n을 테스트케이스 개수만큼\"\"\"\n\nC = int(input()) # 테스트케이스 개수\nfor i in range(C): # 테스트케이스 개수만큼 순회하면서\n N_and_scores = list(map(int, input().split())) # 점수개수 N과 점수들을 입력받고\n N = N_and_scores[0] # N값을 잡아주고\n average = (sum(N_and_scores) - N) / N # 평균 구해주고\n count = 0 # 평균이상인 점수 개수 초기화\n for j in range(1, len(N_and_scores)): # 점수 리스트 순회하면서\n if N_and_scores[j] > average: # 평균이상인 점수 있으면\n count += 1 # 카운트 증가\n print(f'{(count / N) * 100:.3f}%') # 출력\n\n# 입력받고 출력하고를 테스트케이스 개수만큼 반복하는 것이 가능하다\n# 모든 값을 입력받고 저장해서 거기서 값을 꺼낼 필요가 없다\n" }, { "alpha_fraction": 0.5025728940963745, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 26.809524536132812, "blob_id": "96a48ac352d10a06be0088c8f4fabf0e9f3805be", "content_id": "a9dc1ea1f3180f1ea0110d252e0f995d56d51fcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/알고리즘/온라인저지/2021/08/0807/동전O.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "\"\"\"\nN = 동전의 종류 가짓수\nK = 목표 금액\n필요한 동전 개수의 최솟값을 구하는 문제\n\"\"\"\n\nN, K = map(int, input().split()) # 동전개수, 목표금액 입력\ncoins = [] # 입력받을 동전 리스트 초기화\nfor i in range(N): # 동전개수만큼 순회하면서\n coins.append(int(input())) # 동전들 입력받아서 추가\ncoins.reverse() # 입력받은 리스트 뒤집고\ncount = 0 # 필요한 동전개수 count 초기화\nfor coin in coins: # 동전들 순회하면서\n if coin <= K: # 이 동전이 목표금액보다 작으면\n while True: # 반복해서\n K -= coin # 목표금액에서 빼고\n count += 1 # 동전 개수 추가하고\n if coin > K: # 동전값이 목표금액보다 크면\n break # 정지\n# 4200 -> 3200 -> 2200 -> 1200 -> 200 -> 100 -> 0\nprint(count) # 동전갯수 출력" }, { "alpha_fraction": 0.44768211245536804, "alphanum_fraction": 0.4834437072277069, "avg_line_length": 28.076923370361328, "blob_id": "989fad5fac506e5fad6162f0ade46949e5be9ec3", "content_id": "09716823664e4da785982603d2473a6e09aea197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/알고리즘/온라인저지/2022/10/1002/나이트의 이동.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\ndy = [-1, -2, -2, -1, 1, 2, 2, 1]\ndx = [-2, -1, 1, 2, 2, 1, -1, -2]\n\nfor _ in [0]*int(input().rstrip()):\n L = int(input().rstrip())\n start = tuple(map(int, input().rstrip().split()))\n goal = tuple(map(int, input().rstrip().split()))\n result = 0\n visited = [[0]*L for l in [0]*L]\n Q = deque()\n y, x = start\n Q.append((y, x, 0))\n visited[y][x] = 1\n while Q:\n y, x, move = Q.popleft()\n if (y, x) == goal: result = move; break\n for i in range(8):\n ny, nx = y+dy[i], x+dx[i]\n if 0<=ny<L and 0<=nx<L and not visited[ny][nx]:\n Q.append((ny, nx, move+1))\n visited[ny][nx] = 1\n print(move)" }, { "alpha_fraction": 0.560606062412262, "alphanum_fraction": 0.6212121248245239, "avg_line_length": 14.7619047164917, "blob_id": "1b7c30bee4210564164513959d010e9e807f31b2", "content_id": "ec8bbbd3dd91dde827dea45570e928e76748adcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 39, "num_lines": 21, "path": "/알고리즘/온라인저지/2022/10/1006/오르막 수.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "import sys\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip())\ndp = [1]*10\nfor i in range(1,N) :\n for j in range(1,10) :\n dp[j] += dp[j-1]\nprint(sum(dp)%10007)\n\n\"\"\"\n규칙은 찾았는데 구현하는게 막혔다\n구현된 코드를 보고 이해는 가는데\n어떻게 하면 이런 걸 코드로 쓸 수 있는지.. 신기하다\n\n<참고한 링크>\nhttps://jainn.tistory.com/91\n\"\"\"\n\n# https://www.acmicpc.net/problem/11057" }, { "alpha_fraction": 0.5062656402587891, "alphanum_fraction": 0.5137844681739807, "avg_line_length": 25.66666603088379, "blob_id": "0d24da98f36f421c766736b3556c607ed85ce61d", "content_id": "eb9076b1d5eee14f4872856434d15627749ac493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/알고리즘/온라인저지/2022/09/0904/크로스워드 만들기.py", "repo_name": "sorrow4468/BAEKJOON", "src_encoding": "UTF-8", "text": "A, B = input().split()\nresult = [['.']*len(A) for _ in range(len(B))]\ny = 0\nchar = ''\nfor a in A[::-1]:\n if a in B:\n y = B.index(a)\n char = a\nx = A.index(char)\nfor j in range(len(A)): result[y][j] = A[j]\nfor i in range(len(B)): result[i][x] = B[i]\nfor i in range(len(result)):\n for j in range(len(result[i])):\n print(result[i][j], end='')\n if i < len(result)-1: print()" } ]
1,015
kristinemaeg/SentimentAnalysis
https://github.com/kristinemaeg/SentimentAnalysis
f775d48ad32eae5e043717d5baa259c8b37bcbf3
55438beb63fce195b77ae051978d5d1d55d89ccf
2513225f9c03fa33f19f6fedb455a91029bc8b3c
refs/heads/main
2023-03-09T21:11:47.928069
2021-02-24T04:21:45
2021-02-24T04:21:45
341,380,331
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4535974860191345, "alphanum_fraction": 0.6538060307502747, "avg_line_length": 15.759259223937988, "blob_id": "06c1e83b60765e67de3d70e5d911b9cead5505c1", "content_id": "88fbb37c9111faf52d89fb80c982b9f7e8d6a2c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 959, "license_type": "no_license", "max_line_length": 28, "num_lines": 54, "path": "/requirements.txt", "repo_name": "kristinemaeg/SentimentAnalysis", "src_encoding": "UTF-8", "text": "backcall==0.2.0\r\nBrotli==1.0.9\r\ncertifi==2020.12.5\r\nchardet==4.0.0\r\nclick==7.1.2\r\ncolorama==0.4.4\r\ncycler==0.10.0\r\ndash==1.19.0\r\ndash-core-components==1.15.0\r\ndash-html-components==1.1.2\r\ndash-renderer==1.9.0\r\ndash-table==4.11.2\r\ndecorator==4.4.2\r\nFlask==1.1.2\r\nFlask-Compress==1.9.0\r\nfuture==0.18.2\r\ngunicorn==20.0.4\r\nidna==2.10\r\nipython==7.20.0\r\nipython-genutils==0.2.0\r\nitsdangerous==1.1.0\r\njedi==0.18.0\r\nJinja2==2.11.3\r\njoblib==1.0.1\r\nkiwisolver==1.3.1\r\nMarkupSafe==1.1.1\r\nmatplotlib==3.3.4\r\nnltk==3.5\r\nnumpy==1.20.1\r\noauthlib==3.1.0\r\npandas==1.2.2\r\nparso==0.8.1\r\npickleshare==0.7.5\r\nPillow==8.1.0\r\nplotly==4.14.3\r\nprompt-toolkit==3.0.16\r\nPygments==2.8.0\r\npyparsing==2.4.7\r\nPySocks==1.7.1\r\npython-dateutil==2.8.1\r\npytz==2021.1\r\nregex==2020.11.13\r\nrequests==2.25.1\r\nrequests-oauthlib==1.3.0\r\nretrying==1.3.3\r\nsix==1.15.0\r\ntextblob==0.15.3\r\ntqdm==4.57.0\r\ntraitlets==5.0.5\r\ntweepy==3.10.0\r\nurllib3==1.26.3\r\nwcwidth==0.2.5\r\nWerkzeug==1.0.1\r\nwordcloud==1.8.1\r\n" }, { "alpha_fraction": 0.5692776441574097, "alphanum_fraction": 0.5773134827613831, "avg_line_length": 32.103641510009766, "blob_id": "486a3891e09abb2872ecdde689a7a900a6f108d2", "content_id": "a4701be62312e8dd1b940b8cd703c9a8284788dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11826, "license_type": "no_license", "max_line_length": 250, "num_lines": 357, "path": "/SentimentAnalysis.py", "repo_name": "kristinemaeg/SentimentAnalysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Import the libraries\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nimport tweepy\nfrom textblob import TextBlob\nfrom wordcloud import WordCloud\nimport pandas as pd\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\n\nimport plotly.graph_objects as go \n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize \nimport string\n\nimport base64\nfrom io import BytesIO\n\nimport urllib.request\nimport json \nimport datetime\n\nfrom wordcloud import WordCloud, STOPWORDS\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom pprint import pprint\nfrom IPython import display\n\nnltk.download('vader_lexicon')\nnltk.download('stopwords')\nnltk.download('punkt')\n\n#Authentication\nconsumerKey=\"U4y7XJ0ejIy9ThRje9UtZXl8N\"\nconsumerSecret=\"M2QyH0E4VpxBwPnXWmV9Bnwy0ZTWobrWuuoFv02PsMECyLq1oJ\"\naccessToken=\"1268986499728580610-57f201gaeuOB0yHS4LdHcqm0ekNT6P\"\naccessTokenSecret=\"oIWLBcPVBEf5XZ8lQGlBWjC9ARV8PMrgL26mPkspZtYip\"\n\nauth = tweepy.OAuthHandler(consumerKey, consumerSecret)\nauth.set_access_token(accessToken, accessTokenSecret)\napi = tweepy.API(auth, wait_on_rate_limit= True)\n\n#Initiate the app\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, suppress_callback_exceptions=True, external_stylesheets=external_stylesheets)\nserver = app.server\n\ncolors = {\n 'background': '#111111',\n 'text': 'firebrick'\n}\n\napp.layout = html.Div([\n html.H6(\"Sentiment Analysis\",style={\n 'textAlign': 'left-center',\n 'color': colors['text'],\n 'fontWeight': 'bold',\n }),\n dcc.Location(id='url', refresh=False),\n html.Div(id='home_page')\n])\n\n\nindex_page = html.Div([\n dcc.Link('Analyze a Twitter Account', href='/twitter'),\n html.Br(),\n dcc.Link('Analyze a Subreddit Forum', href='/reddit'),\n])\n\n\n#Twitter Page\ntwitter_layout = html.Div([\n html.H6('Twitter'),\n dcc.Input(\n id='twitterhandle',\n placeholder='Enter a Twitter Handle',\n type='text',\n value='',\n style={'width': 300}\n ),\n html.Button('Submit', id='button', n_clicks=0),\n html.Div(id='twitter_output', style={'whiteSpace': 'pre-line'}),\n html.Br(),\n dcc.Link('Analyze using Reddit', href='/reddit'),\n html.Br(),\n dcc.Link('Go back to home', href='/'),\n])\n\[email protected](dash.dependencies.Output('twitter_output', 'children'),\n [dash.dependencies.Input('button', 'n_clicks')],\n [dash.dependencies.State('twitterhandle', 'value')])\ndef twitter_page(n_clicks, twitterhandle):\n if n_clicks > 0:\n # Extract 1000 tweets from twitter user\n posts = api.user_timeline(screen_name = twitterhandle, count=10000, lang = \"en\", tweet_mode=\"extended\")\n \n # Create dataframe \n df = pd.DataFrame( [tweet.full_text for tweet in posts] , columns=['Tweets'])\n \n # Clean text\n # Create function to clean tweets\n def cleanTxt(text):\n text = re.sub(r'@[A-Za-z0-9]+', '', text) #removes @mentions\n text = re.sub(r'#', '', text) #removes '#'\n text = re.sub(r'RT[\\s]+', '', text) # removes RT\n text = re.sub(r'https?:\\/\\/\\S+' , '', text) #removes links\n return text\n \n df['Text']= df['Tweets'].apply(cleanTxt)\n \n # Create function to get the subjectivity\n def getSubjectivity(text):\n return TextBlob(text).sentiment.subjectivity\n \n # Create function to get the polarity\n def getPolarity(text):\n return TextBlob(text).sentiment.polarity\n \n # Create two new columns\n df['Subjectivity'] = df['Text'].apply(getSubjectivity)\n df['Polarity'] = df['Text'].apply(getPolarity)\n \n # Calculate the negative, neutral and positive analysis\n def getAnalysis(score):\n if score < 0:\n return 'Negative'\n elif score == 0:\n return 'Neutral'\n else:\n return 'Positive'\n \n df['Sentiment'] = df['Polarity'].apply(getAnalysis)\n \n #Create Pie Chart\n labels = df['Sentiment'].value_counts().index\n values = df['Sentiment'].value_counts()\n fig = go.Figure(data=[go.Pie(labels=labels, values=values)])\n \n \n # Clean the text\n allWords = ' '.join( [twts for twts in df['Text']] )\n allWords_lower = allWords.lower()\n stop = set(stopwords.words('english') + list(string.punctuation))\n new_stopwords = ['...', '``', \"''\", '’', \"'s\", \"n't\" ]\n new_stopwords_list = stop.union(new_stopwords)\n\n text_tokens = nltk.word_tokenize(allWords_lower)\n text_no_stop_words_punct = [t for t in text_tokens if t not in new_stopwords_list and t not in string.punctuation]\n\n filtered_string = (\" \").join(text_no_stop_words_punct)\n \n # Convert the long string to a dictionary with frequency counts.\n def word_count(str):\n counts = dict()\n words = str.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts\n\n twitter_wordcloud = ( word_count(filtered_string))\n \n # Create Word Cloud\n wc = WordCloud().generate_from_frequencies(frequencies=twitter_wordcloud)\n wc_img = wc.to_image()\n with BytesIO() as buffer:\n wc_img.save(buffer, 'png')\n img2 = base64.b64encode(buffer.getvalue()).decode()\n\n\n #Display Pie Chart and Word Cloud\n twitter_results = html.Div([ \n html.Div(\n dcc.Graph(id='graph1', figure=fig)\n ,style={'width': '49%', 'display': 'inline-block'}), \n html.Div( \n children=[html.Img(src=\"data:image/png;base64,\" + img2,\n style={'height':'50%', 'width': '50%'})]\n ,style={'width': '49%', 'display': 'block', 'textAlign':'center'}), \n ])\n \n return twitter_results\n\n\n#Reddit Page\nreddit_layout = html.Div([\n html.H6('Reddit'),\n dcc.Input(\n id='subreddit',\n placeholder='Enter a Subreddit Forum',\n type='text',\n value='',\n style={'width': 300}\n ),\n html.Button('Submit', id='button', n_clicks=0),\n html.Div(id='reddit_output', style={'whiteSpace': 'pre-line'}),\n html.Br(),\n dcc.Link('Analyze using Twitter', href='/twitter'),\n html.Br(),\n dcc.Link('Go back to home', href='/')\n])\n\[email protected](dash.dependencies.Output('reddit_output', 'children'),\n [dash.dependencies.Input('button', 'n_clicks')],\n [dash.dependencies.State('subreddit', 'value')])\ndef reddit_radios(n_clicks, subreddit):\n if n_clicks > 0:\n #return 'You have selected \"{}\"'.format(subreddit)\n \n #Define function to pull up historical Reddit post information\n def load_results(lower_bound_timestamp, upper_bound_timestamp, target_result_size, target_subreddit, score_threshold):\n headline_collection = set()\n \n reddit_data_url = f\"https://api.pushshift.io/reddit/submission/search/?after={lower_bound_timestamp}&before={upper_bound_timestamp}&sort_type=score&sort=desc&subreddit={target_subreddit}&limit={target_result_size}&score={score_threshold}\"\n \n try:\n with urllib.request.urlopen(reddit_data_url) as url:\n data = json.loads(url.read().decode())\n \n for submission in data['data']:\n headline_collection.add(submission['title'])\n\n return headline_collection\n except urllib.error.HTTPError as e:\n print(e.__dict__)\n return set()\n except urllib.error.URLError as e:\n print(e.__dict__)\n return set()\n \n # Get Reddit posts\n headlines = set()\n\n time_now = datetime.datetime.now()\n\n limit_delta = 7\n limit_lower_delta = 6\n\n result_size = 1000\n score_limit = \">0\"\n\n for i in range(0, 8):\n previous_timestamp = int((time_now - datetime.timedelta(days=limit_delta)).timestamp())\n current_timestamp = int((time_now - datetime.timedelta(days=limit_lower_delta)).timestamp())\n\n full_collection = load_results(previous_timestamp, current_timestamp, result_size, subreddit, score_limit)\n headlines = headlines.union(full_collection)\n \n limit_delta = limit_delta - 1\n limit_lower_delta = limit_lower_delta - 1\n \n display.clear_output()\n \n # Calculate polarity to get the sentiment\n sia = SentimentIntensityAnalyzer()\n results = []\n\n for line in headlines:\n pol_score = sia.polarity_scores(line)\n pol_score['headline'] = line\n results.append(pol_score)\n \n #Convert the results to a dataframe\n df = pd.DataFrame.from_records(results)\n \n #Label the results accordingly\n df['label'] = 'Neutral'\n df.loc[df['compound'] > 0.1, 'label'] = 'Positive'\n df.loc[df['compound'] < -0.1, 'label'] = 'Negative'\n \n #Create Pie Chart\n labels = df['label'].value_counts().index\n values = df['label'].value_counts()\n fig = go.Figure(data=[go.Pie(labels=labels, values=values)])\n reddit_pie = html.Div([\n dcc.Graph(figure=fig)])\n \n # Clean the text\n allWords = ' '.join( [rdf for rdf in df['headline']] )\n allWords_lower = allWords.lower()\n stop = set(stopwords.words('english') + list(string.punctuation))\n new_stopwords = ['...', '``', \"''\", '’', \"'s\", \"n't\" ]\n new_stopwords_list = stop.union(new_stopwords)\n\n text_tokens = nltk.word_tokenize(allWords_lower)\n text_no_stop_words_punct = [t for t in text_tokens if t not in new_stopwords_list and t not in string.punctuation]\n\n filtered_string = (\" \").join(text_no_stop_words_punct)\n \n # Convert the long string to a dictionary with frequency counts.\n def word_count(str):\n counts = dict()\n words = str.split()\n\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts\n\n reddit_wordcloud = ( word_count(filtered_string))\n \n # Create Word Cloud\n wc = WordCloud().generate_from_frequencies(frequencies=reddit_wordcloud)\n wc_img = wc.to_image()\n with BytesIO() as buffer:\n wc_img.save(buffer, 'png')\n img2 = base64.b64encode(buffer.getvalue()).decode()\n \n #Display Pie Chart and Word Cloud\n reddit_results = html.Div([ \n html.Div(\n dcc.Graph(id='graph1', figure=fig)\n ,style={'width': '49%', 'display': 'inline-block'}), \n html.Div( \n children=[html.Img(src=\"data:image/png;base64,\" + img2,\n style={'height':'50%', 'width': '50%'})]\n ,style={'width': '49%', 'display': 'block', 'textAlign':'center'}), \n ])\n \n return reddit_results\n \n\n# Update the index\[email protected](dash.dependencies.Output('home_page', 'children'),\n [dash.dependencies.Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/twitter':\n return twitter_layout\n elif pathname == '/reddit':\n return reddit_layout\n else:\n return index_page\n\nif __name__ == '__main__':\n app.run_server(debug=False)\n\n\n# In[ ]:\n\n\n\n\n" } ]
2
indapa/pgmPy
https://github.com/indapa/pgmPy
128c08ad9c201fa7eae0142f7e0f31f3138db028
0378d380e7fb33c941d3daa28a41d33cca2f4bcf
9354f46f726a3750081ea40e703ed3db55fd0a8d
refs/heads/master
2020-12-24T17:45:04.068015
2013-08-30T17:28:42
2013-08-30T17:28:42
6,001,237
15
4
null
null
null
null
null
[ { "alpha_fraction": 0.7966291904449463, "alphanum_fraction": 0.7988764047622681, "avg_line_length": 41.380950927734375, "blob_id": "2f2cb0f9cbc7ca9c6e341920e6f3837f8eff677a", "content_id": "b8e2946911b5163d8d9e528d6c28cb5557092251", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 890, "license_type": "no_license", "max_line_length": 152, "num_lines": 21, "path": "/README.md", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "pgmPy\n=====\n\nBackground:\n\nProbablistic Graphical Models representation in Python\n\nProbablistic Graphical Models (PGMs) use a graph-based representation\nto compactly encode the joint distribution. PGMs use elements of \ngraph theory and probability theory. There are two classes of PGMs. \ndirected acyclic graphical models are called Bayesian Networks (BNs)\nand undirected graphical models are Markov networks.\n\nThe code in this repository is based off the Matlab code\nfrom Daphne Koller's Coursera class on PGMs <https://www.coursera.org/course/pgm> For the assignments in the class students were given starter Matlab code\nand had to implement various algorithms, etc.\n\nWhile I grew to like Matlab, I needed to implement the inference and represenation of PGMs in Python for a personal project of mine. So I went down the \nrabbit hole. The code here depends on NumPy and uses Python v2.7.\n\nAmit Indap\n" }, { "alpha_fraction": 0.7048406004905701, "alphanum_fraction": 0.7201889157295227, "avg_line_length": 27.233333587646484, "blob_id": "06fcb001f011f9b775eaa3dce60cef2e4e27bf09", "content_id": "0e3a67446e7f339e59d777938ea0a92eb39acf0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 80, "num_lines": 30, "path": "/PythonNotebooks/testCliqueTreeMaxSumCalibrate.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['MaxSumCalibrate']\nval=mat_struct[0,0]\ninput_edges = val['INPUT']['edges'][0][0]\n#print input_edges\ninput_cliqueList= val['INPUT']['cliqueList'][0][0][0]\nclique_list_factorObj=[]\nfor tpl in input_cliqueList:\n (var, card, values)=tpl\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n #print f \n clique_list_factorObj.append(f)\n\nP=CliqueTree( clique_list_factorObj , input_edges, clique_list_factorObj, [])\n\nP=CliqueTreeCalibrate(P,1)\n\nfor f in P.getNodeList():\n print f\n print \"==\"\n" }, { "alpha_fraction": 0.6499402523040771, "alphanum_fraction": 0.6606929302215576, "avg_line_length": 25.15625, "blob_id": "8c6f9a8ec28ce746f319beaed03ca361e895f07e", "content_id": "a5d7adfd01c27cd7128f476c30ee4a018c5346d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 114, "num_lines": 32, "path": "/PythonNotebooks/testMaxDecoding.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['MaxDecoded']\nval=mat_struct[0,0]\ninput_factors = val['INPUT']\nfactorList=[]\n\n\n\nfor elm in input_factors:\n #print ary[0]\n #print\n (var, card, values)=elm[0]\n #print var, card, values\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n print f\n factorList.append( f )\n\nDECODE= MaxDecoding( factorList )\nALPHABET=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\nprint DECODE\nprint [ALPHABET[idx] for idx in DECODE]\n" }, { "alpha_fraction": 0.6134676337242126, "alphanum_fraction": 0.6289501786231995, "avg_line_length": 37.806583404541016, "blob_id": "6722fc523b10085c87b5dd4e4738154978fbc2d2", "content_id": "0305ab96f4d940ef4fd0f12b0a712221eba3f0cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9430, "license_type": "no_license", "max_line_length": 127, "num_lines": 243, "path": "/PGMcommon.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom Factor import *\nimport numpy as np\nfrom itertools import product\nimport sys\nimport itertools\n\n\ndef getUniqueVar( factorList):\n \"\"\" given factorList which is a list of Factor objects\n return a list of unique variables appearing in the\n Factor objects. See http://stackoverflow.com/a/2151553/1735942 \"\"\"\n\n return list(set().union(*[ list(f.getVar()) for f in factorList ] ))\n\n\ndef isMemberBoolean (A, B):\n \"\"\" returns an list of the same length as A containing True where the elements of A are in B and False otherwise \"\"\"\n \n return [ x in B for x in A ]\n\ndef isMember( A, B):\n \"\"\" return a python list containing indices in B where the elements of A are located\n A and B are numpy 1-d arrays\n mapA[i]=j if and only if B[i] == A[j]\"\"\"\n mapA=[]\n for i in range(len(A)):\n mapA.append( np.where(B==A[i])[0].tolist()[0] )\n\n return mapA\n\n\n\n\ndef accum(accmap, a, func=None, size=None, fill_value=0, dtype=None):\n \"\"\" based on the recipie here to implement a numpy equivalent to matlab accumarray:\n http://www.scipy.org/Cookbook/AccumarrayLike\n An accumulation function similar to Matlab's `accumarray` function.\n\n Parameters\n ----------\n accmap : ndarray\n This is the \"accumulation map\". It maps input (i.e. indices into\n `a`) to their destination in the output array. The first `a.ndim`\n dimensions of `accmap` must be the same as `a.shape`. That is,\n `accmap.shape[:a.ndim]` must equal `a.shape`. For example, if `a`\n has shape (15,4), then `accmap.shape[:2]` must equal (15,4). In this\n case `accmap[i,j]` gives the index into the output array where\n element (i,j) of `a` is to be accumulated. If the output is, say,\n a 2D, then `accmap` must have shape (15,4,2). The value in the\n last dimension give indices into the output array. If the output is\n 1D, then the shape of `accmap` can be either (15,4) or (15,4,1)\n a : ndarray\n The input data to be accumulated.\n func : callable or None\n The accumulation function. The function will be passed a list\n of values from `a` to be accumulated.\n If None, numpy.sum is assumed.\n size : ndarray or None\n The size of the output array. If None, the size will be determined\n from `accmap`.\n fill_value : scalar\n The default value for elements of the output array.\n dtype : numpy data type, or None\n The data type of the output array. If None, the data type of\n `a` is used.\n\n Returns\n -------\n out : ndarray\n The accumulated results.\n\n The shape of `out` is `size` if `size` is given. Otherwise the\n shape is determined by the (lexicographically) largest indices of\n the output found in `accmap`.\n\n\n Examples\n --------\n from numpy import array, prod\n a = array([[1,2,3],[4,-1,6],[-1,8,9]])\n a\n array([[ 1, 2, 3],\n [ 4, -1, 6],\n [-1, 8, 9]])\n # Sum the diagonals.\n accmap = array([[0,1,2],[2,0,1],[1,2,0]])\n s = accum(accmap, a)\n array([9, 7, 15])\n # A 2D output, from sub-arrays with shapes and positions like this:\n # [ (2,2) (2,1)]\n # [ (1,2) (1,1)]\n accmap = array([\n [[0,0],[0,0],[0,1]],\n [[0,0],[0,0],[0,1]],\n [[1,0],[1,0],[1,1]],\n ])\n # Accumulate using a product.\n accum(accmap, a, func=prod, dtype=float)\n array([[ -8., 18.],\n [ -8., 9.]])\n # Same accmap, but create an array of lists of values.\n accum(accmap, a, func=lambda x: x, dtype='O')\n array([[[1, 2, 4, -1], [3, 6]],\n [[-1, 8], [9]]], dtype=object)\n \"\"\"\n\n if accmap.shape[:a.ndim] != a.shape:\n raise ValueError(\"The initial dimensions of accmap must be the same as a.shape\")\n if func is None:\n func = np.sum\n if dtype is None:\n dtype = a.dtype\n if accmap.shape == a.shape:\n accmap = np.expand_dims(accmap, -1)\n adims = tuple(range(a.ndim))\n if size is None:\n size = 1 + np.squeeze(np.apply_over_axes(np.max, accmap, axes=adims))\n size = np.atleast_1d(size)\n size=np.array(size, dtype=int)\n # Create an array of python lists of values.\n vals = np.empty(size, dtype='O')\n for s in product(*[range(k) for k in size]):\n vals[s] = []\n for s in product(*[range(k) for k in a.shape]):\n indx = tuple(accmap[s])\n val = a[s]\n vals[indx].append(val)\n\n # Create the output array.\n out = np.empty(size, dtype=dtype)\n for s in product(*[range(k) for k in size]):\n if vals[s] == []:\n out[s] = fill_value\n else:\n out[s] = func(vals[s])\n\n return out\n\n\n\ndef getIndex ( V, I):\n \"\"\" this method finds for every element of 1d- NumPy array, the index in another 1d-NumPy array\n based off of this StackOverflow answer http://stackoverflow.com/a/8251757/1735942\n V is a numpy 1d array listing the variables of a factor in a PGM\n I is the numpy list reprsenting the intersection of variables between two factors\n\n This method returns the index of the intersection variables in the list V \n \"\"\"\n index=np.argsort(V) #np.argsort gives the index ordering of an array that would result in sorted order\n #http://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html\n sorted_v=V[index] #sorted version of V array\n sorted_index=np.searchsorted( sorted_v, I) #i ndices where elements of I would be inserted into sorted_v to maintain order\n #http://docs.scipy.org/doc/numpy/reference/generated/numpy.searchsorted.html\n vindex=np.take(index, sorted_index, mode='clip') #Take elements from an array along an axis.\\\n #http://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html\n #print vindex\n return vindex.tolist()\n\ndef generateAlleleGenotypeMappers( numAlleles):\n \"\"\" analogous to AssignmentToIndex and IndexToAssignment\n this function maps alleles to genotypes and genotypes to alleles\n each allele's id is its index in the list of alleles/list of allele freqs\n Similar for genotypes, its id is its index in list of genotypes\n\n This function returns to numpy 2-d arrays. if n is the number of alleles\n and m is the number of genotypes, then the following NumPy data structures\n are returned\n\n allelesToGenotypes: n x n matrix that maps pairs of allele IDs to\n genotype IDs -- if allelesToGenotypes[i, j] = k, then the genotype with\n ID k comprises of the alleles with IDs i and j\n\n genotypesToAlleles: m x 2 matrix of allele IDs, where m is the number of\n genotypes -- if genotypesToAlleles(k, :) = i, j, then the genotype with ID k\n is comprised of the allele with ID i and the allele with ID j\n\n \"\"\"\n allelesToGenotypes=np.zeros([numAlleles,numAlleles], dtype=np.int)\n index=0\n for i in range(numAlleles):\n for j in range (i,numAlleles):\n allelesToGenotypes[i, j] = index;\n index+=1\n\n for i in range(numAlleles):\n for j in range(0,i):\n allelesToGenotypes[i,j] = allelesToGenotypes[j,i]\n\n\n numGenotypes= (numAlleles * (numAlleles - 1))/2 + numAlleles\n genotypesToAlleles=np.zeros([numGenotypes,2], dtype=np.int)\n\n index=0\n for i in range(numGenotypes):\n for j in range(i,numAlleles):\n #print i, j\n genotypesToAlleles[index, :] = [i, j];\n index+=1\n return ( allelesToGenotypes, genotypesToAlleles)\n\n\ndef genotypeToIndex( geno, alleles='ACGT',ploidy=2):\n \"\"\" given a string enumerating possible alleles\n return the index of geno in enumerated list of genotypes\n by default, the enumerated list is all 10 possibel genotypes\"\"\"\n\n genotypes= [ \"\".join(list(genotype)) for genotype in itertools.combinations_with_replacement(alleles, ploidy) ]\n print genotypes\n \n try:\n return genotypes.index(geno)\n except ValueError:\n print \"genotype not in list of genotypes.\"\n \n\ndef indexToGenotype( index, alleles='ACGT', ploidy=2):\n \"\"\" return genotype at a given index position after\n enumerating all possible genotypes given string of alleles and\n assigning to a list. By default the list contains all possible 10 genotypes\"\"\"\n\n \n genotypes= [ \"\".join(list(genotype)) for genotype in itertools.combinations_with_replacement(alleles, ploidy) ]\n \n try:\n return genotypes[index]\n except IndexError:\n print \"Index out of bounds, not a valid index for list of genotypes\"\n\n\n\ndef lognormalize(x):\n \"\"\"http://atpassos.posterous.com/normalizing-log-probabilities-with-numpy\n 'when working with probabilities it's very easy to get floating point underflows,\n so most people use log probabilities. Probability multiplication is very easy in\n log-space (it's just addition), but probability addition (or normalization) is harder,\n and the linked post gives an equation to compute that. I used to implement it myself,\n but today I learned that numpy does almost everything you need.'\n\n x is (natural) log list/array of numbers\n this normalizes it in probabliity space\"\"\"\n a=np.logaddexp.reduce(x)\n return np.exp(x-a)\n" }, { "alpha_fraction": 0.6626373529434204, "alphanum_fraction": 0.6725274920463562, "avg_line_length": 25.764705657958984, "blob_id": "b8ba422539d63a40e51a28785a1acec6f66f6232", "content_id": "699408ae88a86444bb8d779c9baf87a651d0865d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/PythonNotebooks/testComputeExactMaxMarginalsBP.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['MaxMarginals']\nval=mat_struct[0,0]\ninput_factors = val['INPUT'][0]\nfactorList=[]\n\nALPHABET=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\nfor tpl in input_factors:\n (var, card, values)=tpl\n #print var, card, values\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n #print f\n factorList.append( f )\n\nMARGINALS= ComputeExactMarginalsBP( factorList, [], 1 )\n\nfor m in MARGINALS:\n print m\n print \n\nMAPAssignment=MaxDecoding( MARGINALS )\n\nprint [ALPHABET[idx] for idx in MAPAssignment]\n" }, { "alpha_fraction": 0.6729958057403564, "alphanum_fraction": 0.6870604753494263, "avg_line_length": 29.80434799194336, "blob_id": "f841b9c048f226eb153184190888b776b9c16e91", "content_id": "4050115e93c84fe585b4751c0c64b0e71efca4a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "no_license", "max_line_length": 83, "num_lines": 46, "path": "/PythonNotebooks/testGetNextC.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "import scipy.io as sio\nimport numpy as np\nimport pprint\nfrom Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport pickle\n\n\"\"\" load the test data from the matlab file \"\"\"\n\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['GetNextC']\nnp.shape(mat_struct)\nval=mat_struct[0,0] \n\ninput_edges = val['INPUT1']['edges'][0][0]\ninput_cliqueList= val['INPUT1']['cliqueList'][0][0][0]\nclique_list_factorObj=[]\nfor tpl in input_cliqueList:\n (var, card, values)=tpl\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n clique_list_factorObj.append(f)\n\nmessages=val['INPUT2']\nmessageFactors=[]\n(nrow,ncol)=np.shape(messages)\nfor i in range( nrow ):\n for j in range ( ncol ):\n (var, card, values)=messages[i][j]\n f=Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n messageFactors.append(f)\nMESSAGES= np.reshape( np.array ( messageFactors ), (nrow,ncol) )\n\n\n\"\"\" dump it to Python pickle file \"\"\"\nwith open('GetNextC.INPUT1.pickle', 'wb') as f:\n pickle.dump(clique_list_factorObj,f)\nwith open('GetNextC.INPUT2.pickle', 'wb') as f:\n pickle.dump(MESSAGES, f)\n\nP=CliqueTree( clique_list_factorObj , input_edges, clique_list_factorObj, [])\n(a,b)=getNextClique(P,MESSAGES)\nprint 'a: ', a, ' b:', b\n\n\n\n\n\n" }, { "alpha_fraction": 0.7124645709991455, "alphanum_fraction": 0.723796010017395, "avg_line_length": 25.148147583007812, "blob_id": "3da136b5d711f3610dcdd7012ff1b9369c21a21b", "content_id": "2be847120a8d1e3734495ecd1244c22c27111867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 80, "num_lines": 27, "path": "/PythonNotebooks/testComputeExactMarginalsBP.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['ExactMarginal']\nval=mat_struct[0,0]\ninput_factors = val['INPUT'][0]\nfactorList=[]\nfor tpl in input_factors:\n (var, card, values)=tpl\n #print var, card, values\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n print f\n factorList.append( f )\n print\nM=ComputeExactMarginalsBP( factorList )\n\n#for marginal in M:\n# print marginal\n# print\n" }, { "alpha_fraction": 0.7310230731964111, "alphanum_fraction": 0.7384488582611084, "avg_line_length": 21.830188751220703, "blob_id": "0732913f71d3a0e37ce1578a80734298d803125d", "content_id": "30737560acaabb2078c7491194efc40255b2bc76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 82, "num_lines": 53, "path": "/PythonNotebooks/testComputeExactMarginalsBP_SixPersonPedigree.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\n\nmatfile='/Users/indapa/software/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\n\nmat_struct=mat_contents['SixPersonPedigree']\nval=mat_struct[0]\n\nfactorList=[]\nfor elem in val:\n \n (var, card, val) =elem\n f= Factor( var[0].tolist(), card[0].tolist(), val[0].tolist(), 'factor' )\n #print f\n factorList.append(f)\n#print\n\njointFactor = ComputeJointDistribution(factorList)\n#print jointFactor\n\nMARGINALS= ComputeExactMarginalsBP( factorList, [], 1)\nP=CreatePrunedInitCtree(factorList, [] )\n(P, MESSAGES) = CliqueTreeCalibrate(P, isMax=1)\njointDistribution=ComputeJointDistributionFromCalibratedCliqueTree(P, MESSAGES, 1)\n\nprint jointDistribution\n\n\n #for marginal in MARGINALS:\n #print marginal\n#print\n\nMAPAssignment=MaxDecoding( MARGINALS )\nprint MAPAssignment\n\n\n #for m in MARGINALS:\n #m.setVal( np.log( lognormalize(m.getVal() ) ) )\n#print np.sum( lognormalize(m.getVal() ) )\n\n#print MAPAssignment\n\n #for marginal in MARGINALS:\n #print marginal.getVal()\n#print\n\n\n" }, { "alpha_fraction": 0.5498276948928833, "alphanum_fraction": 0.5565862655639648, "avg_line_length": 34.619049072265625, "blob_id": "28b1213670945b618ae76007849239564e60561a", "content_id": "bc4b62dd851aee8a9a7b141e5692c63d02d25d65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7546, "license_type": "no_license", "max_line_length": 129, "num_lines": 210, "path": "/CliqueTree.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom FactorOperations import *\nimport numpy as np\nimport networkx as nx\nimport pdb\n\nclass CliqueTree(object):\n 'represent a Clique tree'\n\n def __init__(self, nodeList=[], edges=[], factorList=[],evidence=[]):\n self.nodeList=nodeList\n self.edges=edges\n self.factorList=factorList\n self.evidence=evidence\n self.card= []\n self.factorInds= []\n\n\n def toString(self):\n \n print 'nodes: ', self.nodeList\n print 'card: ', self.card\n print 'factorList: ', len( self.factorList)\n print 'factorInds:', self.factorInds\n print 'edges:\\n',self.edges\n \n \n def setNodeList(self,nodeList):\n self.nodeList=nodeList\n\n def setEdges(self,edges):\n self.edges=edges\n\n def setFactorList(self,factorList):\n self.factorList=factorList\n #self.factorInds= len( factorList ) * [None]\n\n def setEvidence(self,evidence):\n self.evidence=evidence\n\n\n def setCard(self, cardinality):\n self.card = cardinality\n\n def getNodeList(self):\n return self.nodeList\n\n def getNodeCount(self):\n return len(self.nodeList)\n\n\n def getEdges(self):\n return self.edges\n\n def getFactorList(self):\n return self.factorList\n\n def getFactorCount(self):\n return len(self.factorList)\n \n\n def getEvidence(self):\n return self.evidence\n\n def incorporateEvidence(self):\n for j in range ( len(self.evidence)):\n k=j+1\n if self.evidence[j] > 0:\n self.factorList=ObserveEvidence(self.factorList, np.matrix([[k, self.evidence[j] ]] ) )\n \n\n def eliminateVar(self,Z,E,factorList):\n \"\"\" a variable elimination function\n based on https://github.com/indapa/PGM/blob/master/Prog4/EliminateVar.m\n\n Z is the variable to be eliminated. We base this code on the matlab file\n linked to above as well as the Sum-product VE pseudo code in Koller and Friedman\n page 298\n\n E is a numpy 2d matrix representing adjacency matrix of variables\n It represents the induced VE graph\n Once a variable is eliminated, its edges are removed from E\n\n \"\"\"\n\n useFactors = []#the index of the factor that contains the variable Z\n scope = []\n\n #print 'Z: ', Z\n\n \n #get a list containining the index in self.factorLlist of factors\n #that contain the variable Z to be eliminated\n # get the scope of variables from the factors that contain variable Z\n for i in range (len(factorList)):\n \n if Z in factorList[i].getVar().tolist():\n useFactors.append(i)#the ith factor is being currently involved in elimination\n scope=list(set.union(set(scope), factorList[i].getVar().tolist() ))\n\n \n # update edge map\n \"\"\" These represent the induced edges for the VE graph.\n once the variable Z is eliminated, its edges are removed from the graph\n but in the process of elimination, we create a new factor. This\n introduces fill edges (see pg. 307 Koller and Friedman)\n Z is one based, but the indices in E are zero based, hence Z-1\n also the variable names in scope are 1 based, so we subtract 1 when\n updating the induced VE graph \"\"\"\n\n for i in range ( len(scope)):\n for j in range ( len(scope)):\n if i != j:\n E[ scope[i]-1, scope[j]-1 ]=1\n E[ scope[j]-1, scope[i]-1 ]=1\n E[Z-1,:]=0\n E[:,Z-1]=0\n\n #G=nx.from_numpy_matrix(E)\n #print 'induced graph edges:\\n', (G.edges())\n #nx.draw_shell(G)\n #plt.show()\n\n \n #these are the indices of factorList which are not involved in VE\n unusedFactors= list( set.difference ( set(range(len(factorList))), set(useFactors) ) )\n \n newF=None\n #check first if there are any unused factors left!\n if len(unusedFactors) > 0:\n newF=len(unusedFactors)*[None]\n newmap=np.zeros(max(unusedFactors)+1,dtype=int).tolist()\n \n #newF is a new factor list, we populate it first\n #with the unused factors\n #newmap is maps the new location of ith unusedFactor\n for i in range( len(unusedFactors)):\n newF[i]=factorList[ unusedFactors[i] ]\n newmap[ unusedFactors[i] ]= i\n \n #print 'newmap ', newmap,\"\\n\"\n #print 'length of newmap: ', len(newmap), \"\\n\"\n\n newFactor = Factor( [], [], [], 'newFactor')\n\n #we multiple in all the factors that contain the variable Z\n for i in range( len (useFactors)):\n newFactor = FactorProduct(newFactor,factorList[ useFactors[i] ])\n \n\n #then we marginalize Z out and obtain a new factor\n #then append it the end of newF, the new factor list\n newFactor = FactorMarginalization( newFactor,[Z] )\n #print 'newFactor: ',newFactor\n #newF(length(nonUseFactors)+1) = newFactor;\n if newFactor != None:\n newF.append ( newFactor )\n\n \n\n if newF != None:\n factorList=newF\n #return E\n\n ########################################################################\n \"\"\" the remaining code builds the edges of the clique tree \"\"\"\n\n \"\"\" add new node with the factors that contain the variable Z\n adding a new node represents new clique.\n The scope of every factor generated during the variable elimination process is a clique pg. 309 Koller & Friedman \"\"\"\n\n self.nodeList.append ( scope )\n \n #newC is the total number of nodes in the clique tree\n newC=len( self.nodeList )\n #print 'newC: ', newC\n\n #factorInds are individual factors with one variable ... I think\n self.factorInds.append ( len(unusedFactors) + 1 )\n \n\n #print 'range( newC -1) ', range( newC-1 )\n #print 'factorInds: ', self.factorInds\n #print 'useFactors: ', useFactors\n #pdb.set_trace()\n \"\"\" we update the edges of the clique tree \"\"\"\n for i in range( newC -1 ):\n \n #if self.factorInds [ i ] -1 in useFactors:\n #there was the off by onoe erorr - the values in factorInds\n #were one-based, need to subtract 1\n if self.factorInds [ i ] -1 in useFactors:\n \n self.edges[ i, newC-1 ] = 1\n self.edges [ newC-1, i ] = 1\n self.factorInds[ i ] = 0\n else:\n if self.factorInds [i] != 0:\n #print 'i: ', i\n #print 'factorInds: ', self.factorInds\n #print 'newmap: ', newmap\n #print 'newmap [ self.factorInds[i] -1: ', newmap [ self.factorInds[i] -1 ]\n #print 'self.factorInds[ i ] = newmap [ self.factorInds[i] - 1 ] + 1 '\n if len(unusedFactors) > 0:\n #self.factorInds[ i ] = newmap [ self.factorInds[i] -1 ] +1\n self.factorInds[ i ] = newmap [ self.factorInds[i] -1 ] +1\n #self.factorInds[ i ] = newmap [ self.factorInds[i] ]\n \n #print 'factorInds right before returning: ', self.factorInds\n return E, factorList\n\n \n \n \n \n \n \n\n\n \n" }, { "alpha_fraction": 0.6040363311767578, "alphanum_fraction": 0.6095243692398071, "avg_line_length": 32.891998291015625, "blob_id": "5c1ec832317f91ec124a82ea127d3d09b357f2f2", "content_id": "7f8e61964fb1ca92ec0a9bb7bc030ecd890dca57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16946, "license_type": "no_license", "max_line_length": 125, "num_lines": 500, "path": "/PedigreeFactors.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nfrom Factor import *\nfrom PGMcommon import *\nfrom FactorOperations import *\nimport itertools\n\nclass PhenotypeFactor (object):\n \"\"\" represents a factor that encodes Pr(phenotype|genotype)\n for purposes here the variable to the left of the conditioning\n bar is the first variable in the PhenotypeFactor var list. \"\"\"\n\n def __init__(self, isDominant, genotypeVar, phenotypeVar, name):\n\n #instantiate a Factor object\n phenotype = Factor( [phenotypeVar, genotypeVar], [2, 3], [], name )\n \n phenotype.setVal( np.zeros ( np.prod(phenotype.getCard())).tolist() )\n #this enumerates the values the factor can take\n # since there are 2x3 cardinality, 6 possible assignments\n assignments=IndexToAssignment( np.arange(np.prod(phenotype.getCard())), phenotype.getCard() )\n val=val = np.zeros(np.prod(phenotype.getCard() ))\n (nrows,ncols)=np.shape(assignments)\n\n for i in range(np.prod([2,3])):\n #if its dominant, if you have at least one copy, you have the phenotype\n (pheno,geno)=assignments[i]\n if isDominant==1:\n if pheno ==1: #affected\n if geno ==1 or geno ==2:\n val[i]=1\n else:\n val[i]=0\n else:#uneffected\n if geno == 3:\n val[i]=1\n\n\n if isDominant == 0:\n if pheno == 1:\n if geno==3:\n val[i]=1\n else:\n if geno ==1 or geno == 2:\n val[i]=1\n\n\n phenotype.setVal( val.tolist() )\n\n self.phenotype=phenotype\n\n\n\n def getVar(self):\n return self.phenotype.getVar()\n def getCard(self):\n return self.phenotype.getCard()\n def getVal(self):\n return self.phenotype.getVal()\n\n def getFactor(self):\n return self.phenotype\n\n def __str__(self):\n return self.phenotype.__str__()\n\n\n\nclass PhenotypeGivenGenotypeFactor(object):\n \"\"\" construct factor of phenotype|genotype\n #prob of being effected, given the ith genotype\n #alphaList[i] is the prob of being effected given the ith genotype \"\"\"\n def __init__(self,alphaList, phenotypeVar, genotypeVar , name):\n self.phenotypeFactor=Factor( [ phenotypeVar, genotypeVar], [], [], name)\n self.alpha=np.array ( alphaList)\n\n ngenotypes=len(alphaList)\n self.phenotypeFactor.setCard( [2, ngenotypes])\n\n values=[x for x in range( np.prod(self.phenotypeFactor.getCard()))]\n\n for i in range( len(alphaList )):\n\n values[i]=alphaList[i]\n values[i+1]=1-alphaList[i]\n ctr=0\n alphas=2*len(alphaList)*[None]\n for i in range(len(alphaList)):\n alphas[ctr]=alphaList[i];\n ctr=ctr+1\n alphas[ctr]=1-alphaList[i];\n ctr=ctr+1\n\n values=alphas\n self.phenotypeFactor.setVal( values)\n\n def getVar(self):\n return self.phenotypeFactor.getVar()\n def getCard(self):\n return self.phenotypeFactor.getCard()\n def getVal(self):\n return self.phenotypeFactor.getVal()\n def setVal(self,val):\n self.phenotypeFactor.setVal(val)\n def getFactor(self):\n return self.phenotypeFactor\n\n\n def __str__(self):\n return self.phenotypeFactor.__str__()\n\n\nclass GenotypeAlleleFreqFactor (object):\n \"\"\" construct a factor that has the probability of each genotype\n given allele frequencies Pr(genotype|allele_freq)\"\"\"\n\n def __init__(self, allelefreqs, genotypeVar, name):\n self.allelefreq=allelefreqs\n #number of alleles == number of allele frequencies passed in\n numAlleles=len(allelefreqs)\n self.allelesToGenotypes=None\n self.genotypesToAlleles=None\n self.genotypeFactor=None\n\n #map alleles to genotypes and genotyeps to alleles\n (self.allelesToGenotypes, self.genotypesToAlleles)=generateAlleleGenotypeMappers(numAlleles)\n (ngenos,ploidy)=np.shape(self.genotypesToAlleles)\n\n\n self.genotypeFactor = Factor( [genotypeVar], [], [], name)\n #the cardinality of the factor is the number of genotypes\n self.genotypeFactor.setCard( [ngenos] )\n\n #set the values to zero initially\n values=np.zeros( (np.prod(self.genotypeFactor.getCard()))).tolist()\n \n for i in range (ngenos):\n alleles=self.genotypesToAlleles[i,:].tolist()\n \n\n if alleles[0] == alleles[1]:\n values[i]= np.prod( [ allelefreqs[j] for j in alleles ])\n \n else:\n values[i]= np.prod( [ allelefreqs[j] for j in alleles ]) * 2\n \n self.genotypeFactor.setVal( values )\n\n\n def getVar(self):\n return self.genotypeFactor.getVar()\n def getCard(self):\n return self.genotypeFactor.getCard()\n def getVal(self):\n return self.genotypeFactor.getVal()\n def setVal(self,val):\n self.genotypeFactor.setVal(val)\n def getFactor(self):\n return self.genotypeFactor\n\n\n def __str__(self):\n return self.genotypeFactor.__str__()\n\nclass GenotypeGivenParentsFactor (object):\n \"\"\" construct factor that has prob of genotype of child given both parents\n Pr(g_child| g_mother, g_father \"\"\"\n\n def __init__(self,numAlleles, genotypeVarChild, genotypeVarParentOne, genotypeVarParentTwo, name):\n self.genotypeFactor = Factor( [genotypeVarChild, genotypeVarParentOne, genotypeVarParentTwo ], [ ], [ ], name)\n\n #map alleles to genotypes and genotyeps to alleles\n (self.allelesToGenotypes, self.genotypesToAlleles)=generateAlleleGenotypeMappers(numAlleles)\n\n (ngenos,ploidy)=np.shape(self.genotypesToAlleles)\n \n\n \n self.genotypeFactor.setCard([ ngenos,ngenos,ngenos ] )\n #set the values to zero initially\n values=np.zeros( (np.prod(self.genotypeFactor.getCard()))).tolist()\n\n #iterate thru variable assignments to random variables\n #assign probablities based on Punnet square crosses\n assignments=IndexToAssignment( np.arange(np.prod(self.genotypeFactor.getCard())), self.genotypeFactor.getCard() )-1\n for z in range( np.prod(self.genotypeFactor.getCard() ) ):\n curr_assign= assignments[z]\n childAssignment=int(curr_assign[0])\n\n parent1gametes= self.genotypesToAlleles[curr_assign[1],:]\n parent2gametes= self.genotypesToAlleles[curr_assign[2],:]\n #print 'parental gametes: ', parent1gametes, parent2gametes\n #print 'child assignment: ', childAssignment\n #list of tuples containing list of zygote(genotype) tuples\n zygote_list=list(itertools.product(parent1gametes,parent2gametes))\n punnet_freq=[ self.allelesToGenotypes[zygote[0],zygote[1]] for zygote in zygote_list ]\n histc={}\n hist=[]\n for g in range( ngenos):\n histc[g]=0.\n for x in punnet_freq:\n histc[x]+=1.\n #print histc.values()\n for g in range (ngenos):\n hist.append ( histc[g] )\n #print punnet_freq\n hist=(np.array ( hist)) /4\n #print 'hist:', hist\n #print zygote_list\n values[z]=hist[childAssignment]\n\n self.genotypeFactor.setVal( values )\n\n def getVar(self):\n return self.genotypeFactor.getVar()\n def getCard(self):\n return self.genotypeFactor.getCard()\n def getVal(self):\n return self.genotypeFactor.getVal()\n def setVal(self, val):\n self.genotypeFactor.setVal(val)\n\n def getFactor(self):\n return self.genotypeFactor\n def genotypeSlice(self):\n pass\n #see this http://stackoverflow.com/q/4257394/1735942\n\n def __str__(self):\n return self.genotypeFactor.__str__()\n\n\nclass ChildCopyGivenParentalsFactor(object):\n \"\"\" this represents a de-coupled factor\n given a parents two haplotypes, returns\n factor whose values are the probablity\n of inheriting (grand)paternal or (grand)maternal\n haplotype. This allows for some more flexibility\n in modeling inheritance, rather than clumping\n a single parent's haplotype into a genotype\n i.e. GenotypeGivenParentsFactor \"\"\"\n\n def __init__(self, numAlleles, geneCopyVarChild, geneCopyHapOne, geneCopyHapTwo):\n self.numalleles=numAlleles\n self.hapone=geneCopyVarChild\n self.haptwo=geneCopyHapTwo\n\n #geneCopyFactor = struct('var', [], 'card', [], 'val', []);\n self.geneCopyFactor=Factor( [geneCopyVarChild, geneCopyHapOne, geneCopyHapTwo ], [], [], 'child|hap1,hap2')\n self.geneCopyFactor.setCard( [self.numalleles,self.numalleles,self.numalleles ])\n values=np.zeros( np.prod([ self.numalleles,self.numalleles,self.numalleles])).tolist()\n #this keeps track of what posiiton you are in the values list\n index=0\n #the number of iterations thru the nested for loops should be equal to numallels^3\n\n for i in range(numAlleles):\n #iterate through alleles from\n #grand(paternal) haplotype\n for j in range(numAlleles):\n #iterate through alleles from\n #grand(maternal) haplotype\n for k in range(numAlleles):\n #iterate thru child alleles\n print i, j, k\n if j==k:#child has grandmotherhap\n if i==k:#grandfatherhap is the same\n values[index]=1\n else:\n values[index]=.5\n elif i==k:#child has grandfather hap\n values[index]=.5\n else:\n pass\n index+=1\n #print values\n self.geneCopyFactor.setVal( values )\n\n def getVar(self):\n return self.geneCopyFactor.getVar()\n def getCard(self):\n return self.geneCopyFactor.getCard()\n def getVal(self):\n return self.geneCopyFactor.getVal()\n def getFactor(self):\n return self.geneCopyFactor\n def __str__(self):\n return self.geneCopyFactor.__str__()\n \nclass ChildCopyGivenFreqFactor(object):\n \"\"\" for a founder, its particular haplotype is proprortional to the\n given allelel freq of the locus. This factor is part of the decoupled\n Bayesian Genetic network , along with ChildCopyGivenParentalsFactor\"\"\"\n \n def __init__(self, alleleFreqs, geneCopyVar):\n numAlleles = len(alleleFreqs)\n self.geneCopyFactor=Factor( [geneCopyVar], [], [], 'founderHap')\n self.geneCopyFactor.setCard ( [numAlleles])\n self.geneCopyFactor.setVal( alleleFreqs )\n #geneCopyFactor = struct('var', [], 'card', [], 'val', [])\n #geneCopyFactor.var(1) = geneCopyVar;\n #geneCopyFactor.card(1) = numAlleles;\n #geneCopyFactor.val = alleleFreqs';\n\n\n def getVar(self):\n return self.geneCopyFactor.getVar()\n def getCard(self):\n return self.geneCopyFactor.getCard()\n def getVal(self):\n return self.geneCopyFactor.getVal()\n def getFactor(self):\n return self.genCopyFactor\n def __str__(self):\n return self.geneCopyFactor.__str__()\n\nclass phenotypeGivenHaplotypesFactor(object):\n \"\"\" factor represents Pr(phenotype| paternal haplotype, maternal haplotype)\n very similiar to PhenotypeGivenGenotypeFactor, but we are de-coupling into\n paternal and maternal alleles rather than genotype\"\"\"\n\n def __init__(self, alphaList, numAlleles, geneCopyVarOne, geneCopyVarTwo, phenotypeVar):\n \n self.numalleles=numAlleles\n self.alphaList=alphaList\n self.phenotypeFactor=Factor([phenotypeVar,geneCopyVarOne, geneCopyVarTwo], [], [], 'phenotype| geneCopy1, geneCopy2')\n\n ngenos=len(alphaList)\n self.phenotypeFactor.setCard( [ 2, numAlleles, numAlleles])\n #phenotypeFactor.val = zeros(1, prod(phenotypeFactor.card));\n values=np.zeros( (1, np.prod(self.phenotypeFactor.getCard()))).flatten().tolist()\n\n affectedAlphas=alphaList\n unaffectedAlphas=[ 1- alpha for alpha in alphaList]\n\n\n (allelesToGenotypes, genotypesToAlleles) = generateAlleleGenotypeMappers(numAlleles)\n assignments=IndexToAssignment( np.arange(np.prod(self.phenotypeFactor.getCard())), self.phenotypeFactor.getCard() )-1\n for z in range( np.prod(self.phenotypeFactor.getCard() ) ):\n curr_assign= assignments[z]\n curr_assign=assignments[z]\n genotype_num=allelesToGenotypes[curr_assign[1], curr_assign[2]]\n if curr_assign[0] == 0:\n values[z] = affectedAlphas[genotype_num]\n else:\n values[z] = unaffectedAlphas[genotype_num]\n self.phenotypeFactor.setVal( values )\n\n\n #genotype_num=allelesToGenotypes(assignment(2), assignment(3));\n\n\n def getVar(self):\n return self.phenotypeFactor.getVar()\n def getCard(self):\n return self.phenotypeFactor.getCard()\n def getVal(self):\n return self.phenotypeFactor.getVal()\n def getFactor(self):\n return self.phenotypeFactor\n def __str__(self):\n return self.phenotypeFactor.__str__()\n\n def __str__(self):\n return self.phenotypeFactor.__str__()\n\n\n##########################\n\nclass Ped(object):\n \"\"\" represents a pedigree record in a Ped file http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped \"\"\"\n\n def __init__(self,famid='.', indv='.', paternal='.', maternal='.', sex='.', phenotype='.'):\n \"\"\" attributes of a Ped object \"\"\"\n self.famid=famid\n self.individ=indv\n self.pid=paternal\n self.mid=maternal\n self.sex=sex\n self.pheno=phenotype\n\n\n def setfamid(self,famid):\n self.famid=famid\n\n def setindvid(self,indv):\n self.individ=indv\n\n def setmid(self,mid):\n self.mid=mid\n\n def setpid(self,pid):\n self.pid=pid\n\n def setsex(self,sex):\n self.sex=sex\n\n def setpheno(self,pheno):\n self.pheno=pheno\n\n\n\n def getfamid(self):\n return self.famid\n\n def getid(self):\n return self.individ\n\n def getmid(self):\n return self.mid\n\n def getpid(self):\n return self.pid\n\n def getsex(self):\n return self.sex\n\n def getpheno(self):\n return self.pheno\n\n\n def isFounder(self):\n return self.pid == '0' and self.mid == '0'\n\n def getParents(self):\n\n return ( self.pid, self.mid)\n\n\n def __str__(self):\n return \"\\t\".join( [ self.famid, self.individ, self.pid, self.mid, self.sex, self.pheno] )\n\n\nclass Pedfile(object):\n \"\"\" a Pedfile object has a list of Ped objects \"\"\"\n\n def __init__(self,filename):\n self.filename=filename\n self.fh=open(self.filename, 'r')\n self.pedlist=[]\n\n def parsePedfile(self):\n \"\"\" given a filehandle to a *.ped file read its contents and populate the list pedlist with Ped objects \"\"\"\n for line in self.fh:\n fields=line.strip().split('\\t')\n (famid, indv, pid, mid, sex, pheno)=fields[0:6]\n self.pedlist.append( Ped(famid, indv, pid, mid, sex, pheno) )\n\n \n def returnFounders(self):\n \"\"\" return the founders in a ped file (those with unknown paternal and maternids \"\"\"\n founders=[]\n\n for pedobj in self.pedlist:\n if pedobj.getpid() == \"0\":\n founders.append(pedobj)\n\n return founders\n\n def returnFounderIds(self):\n \"\"\" return the indiv ids of the founders in the ped file\"\"\"\n founderids=[]\n for pedobj in self.pedlist:\n if pedobj.getpid() == \"0\":\n founderids.append( pedobj.getid() )\n return founderids\n\n def returnNonFounderIds(self):\n \"\"\" return the indiv ids of the founders in the ped file\"\"\"\n nonfounderids=[]\n for pedobj in self.pedlist:\n if pedobj.getpid() != \"0\":\n nonfounderids.append( pedobj.getid() )\n return nonfounderids\n\n def returnNonFounders(self):\n \"\"\" return the founders in a ped file (those with unknown paternal and maternids \"\"\"\n nonfounders=[]\n\n for pedobj in self.pedlist:\n if pedobj.getpid() != \"0\":\n nonfounders.append(pedobj)\n\n return nonfounders\n\n def returnIndivids(self):\n \"\"\" return a list of indvi ids from the ped file \"\"\"\n #samplelist=[]\n return [ ped.getid() for ped in self.pedlist]\n\n def getPedList(self):\n return self.pedlist\n def getTotalSize(self):\n return len(self.pedlist)\n\n def yieldMembers(self):\n for pedobj in self.pedlist:\n yield pedobj\n\n def __str__(self):\n return \"\\n\".join( [ x.__str__() for x in self.pedlist ] )\n" }, { "alpha_fraction": 0.6863157749176025, "alphanum_fraction": 0.7442105412483215, "avg_line_length": 29.645160675048828, "blob_id": "0a1a76b17b4d700f9be7186e8dd219773d8cc43a", "content_id": "e8c8f724f79ebda14e822822476b89c63b38eef7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 77, "num_lines": 31, "path": "/PythonNotebooks/testFactorMaxMarginal.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['FactorMax']\nval=mat_struct[0,0]\ninput_factors = val['INPUT1'][0][0]\nvar = input_factors[0].flatten().tolist()\ncard=input_factors[1].flatten().tolist()\nvalue=input_factors[2].flatten().tolist()\nprint var\nprint card\nprint value\nINPUT1= Factor( var, card, value, 'test')\nINPUT2= val['INPUT2'].flatten()\nprint INPUT1\nprint INPUT2\nprint FactorMaxMarginalization(INPUT1, INPUT2)\n#example used in section 13.2 pg 555 of Friedman and Koller\nprint \"=====\"\npsi=Factor( [ 1,2,3], [3,2,2], [.25,.05,.15,.08,0,.09,.35,.07,.21,.16,0,.18])\nmaxfactor= FactorMaxMarginalization(psi, [2])\nprint maxfactor\nprint IndexToAssignment(np.arange(6),[3,2])\n" }, { "alpha_fraction": 0.7376237511634827, "alphanum_fraction": 0.7514851689338684, "avg_line_length": 19.612245559692383, "blob_id": "86ff9b6732bcf620cf67d00014f02397c8fa240d", "content_id": "54f67628fbc30361e5623bc83d6eed66e3c68a05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 78, "num_lines": 49, "path": "/PythonNotebooks/testCliqueTreeOperations.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nfrom Factor import *\nfrom PGMcommon import *\nfrom PedigreeFactors import *\nfrom FactorOperations import *\nfrom GeneticNetworkFactory import *\nfrom CliqueTree import *\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom CliqueTreeOperations import *\n#to create a clique tree, we start with a list of factors\n#and potentially some observed evidence\nalphaList=[.8,.6,.1]\nallelefreq=[.1,.9]\nchrom='12'\nposition=1000\ng1=GeneticNetworkFactory('sixperson.ped',alphaList,allelefreq, chrom,position)\ng1.constructNetwork()\nfactorList=g1.getFactorList()\n\n#for f in factorList:\n# print f\n# print \n#print \"+++++++++\"\n\ncTree = createCliqueTree(factorList)\nG=nx.from_numpy_matrix( cTree.getEdges() )\n\nnx.draw_shell(G)\nplt.show()\n#print cTree.getEdges()\n\n#cTree.toString()\n\n\nprunedCTree=PruneTree( cTree )\nP=CliqueTreeInitialPotential( prunedCTree )\n\n#for f in P.getNodeList():\n# print f\n# print\n\n\n\n\nG=nx.from_numpy_matrix( prunedCTree.getEdges() )\nnx.draw_shell(G)\nplt.show()\n" }, { "alpha_fraction": 0.5923784971237183, "alphanum_fraction": 0.5969107151031494, "avg_line_length": 39.27850341796875, "blob_id": "b5132ff6c2652a9dccd8fb63ae78206208620c59", "content_id": "371be786bccdbdfa6ed14e3f39ed6e62084ca22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21623, "license_type": "no_license", "max_line_length": 172, "num_lines": 535, "path": "/CliqueTreeOperations.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom CliqueTree import *\nfrom FactorOperations import *\n#import matplotlib.pyplot as plt\nimport networkx as nx\nimport pdb\n\ndef createCliqueTree( factorList,E=[]):\n \"\"\" return a Clique Tree object given a list of factors\n it peforms VE and returns the clique tree the VE\n ordering defines. See Chapter 9 of Friedman and Koller\n Probabilistic Graphical Models\"\"\"\n\n V=getUniqueVar(factorList)\n \n totalVars=len(V)\n cardinality=np.zeros(len(V)).tolist()\n for i in range(len(V)):\n for j in range(len(factorList)):\n try:\n indx= factorList[j].getVar().tolist().index( V[i] )\n cardinality[i]=factorList[j].getCard().tolist()[indx]\n break\n except:\n continue\n\n edges=np.zeros( (totalVars, totalVars))\n\n\n \"\"\" Set up adjacency matrix: for each factor, get the list of variables in its scope and create an edge between each variable in the factor \"\"\"\n for f in factorList:\n variableList=f.getVar()\n for j in range(len(variableList) ):\n for k in range (len(variableList) ):\n edges[ variableList[j]-1, variableList[k]-1 ]=1\n\n (nrows,nedges)=np.shape(edges) \n\n C=CliqueTree()\n C.setCard( cardinality )\n C.setEdges(np.zeros( (totalVars, totalVars)))\n C.setFactorList(factorList)\n C.setEvidence(E)\n C.setNodeList([])\n #print 'length of factorList: ', len(factorList)\n #print C.toString()\n cliquesConsidered = 0\n #pdb.set_trace()\n while cliquesConsidered < len(V):\n bestClique = 0\n bestScore = sys.maxint\n for i in range(nrows):\n score=np.sum( edges[i,:] )\n if score > 0 and score < bestScore:\n bestScore = score\n bestClique = i+1\n cliquesConsidered+=1\n \n (edges, factorList)=C.eliminateVar(bestClique, edges, factorList)\n\n return C\n\n\ndef PruneTree ( C ):\n \"\"\" prune a clique tree by determing if neighboring cliques are \n supersets of each other. E.g.: [A,B,E] -- [A,B] -- [A,D] \n pruned: [A,B,E] -- [A,D] \"\"\"\n\n ctree_edges=C.getEdges()\n (nrows,ncols)=np.shape( ctree_edges )\n totalNodes=nrows\n Cnodes=C.getNodeList()\n \n toRemove=[]\n #print range( totalNodes )\n\n for i in range ( totalNodes ):\n if i in toRemove: continue\n #np.nonzero returns tuple, hence the [0]\n #we collect the neighbors of the ith clique\n neighborsI = np.nonzero ( ctree_edges[i,:] )[0].tolist()\n for c in range ( len(neighborsI) ):\n j= neighborsI[c]\n assert ( i != j), 'i cannot equal j: PruneTree'\n if j in toRemove: continue\n #here is where we look for superset neighboring nodes in the CTree\n if sum ( [ x in Cnodes[j] for x in Cnodes[i] ] ) == len( Cnodes[i] ):\n for nk in neighborsI:\n cnodes_i = set ( Cnodes[i] )\n cnodes_nk= set ( Cnodes[nk] )\n if len( list ( set.intersection( cnodes_i, cnodes_nk) ) ) == len (Cnodes[i]):\n neighborsI_set=set( neighborsI )\n nk_set=set( [nk] )\n ctree_edges [ list( neighborsI_set - nk_set ), nk ] = 1\n ctree_edges [ nk, list( neighborsI_set - nk_set )] = 1\n break\n ctree_edges[i,:]=0\n ctree_edges[:,i]=0\n toRemove.append(i)\n toKeep = list ( set ( range( totalNodes ) ) - set ( toRemove ) )\n for indx in toRemove:\n Cnodes[indx]=[]\n Cnodes=[ item for item in Cnodes if len(item) > 0 ]\n ctree_edges= ctree_edges[np.ix_(toKeep, toKeep)]\n\n C.setNodeList( Cnodes )\n C.setEdges( ctree_edges )\n #pdb.set_trace()\n #return the pruned tree with the updated nodes and edges\n return C\n\ndef CliqueTreeObserveEvidence ( C, E ):\n \"\"\" given a CliqueTree object C and list of values E, which represent evidence, update\n the factors of the cliqueTree C to reflect the observed evidence.\n Note that ObserveEvidence in FactorOperations assumes E is a Nx2 matrix,\n here we build the Nx2 matrix by assuing the jth index of E is the evidence\n for the variable j\"\"\"\n factorList= C.getFactorList()\n for j in range ( len (E)):\n if E[j] > 0:\n factorList=ObserveEvidence( factorList, np.array(np.matrix( [ j+1, E[j]] ) ) )\n C.setFactorList(factorList)\n #return the new CliqueTree object with the updated evidence\n return C\n\n\ndef CliqueTreeInitialPotential( C ):\n \"\"\" given a clique tree object C, calculate the initial potentials for each of the cliques\n the factors in the updated clique list are FActor objects\"\"\"\n\n \n N= C.getNodeCount()\n totalFactorCount=C.getFactorCount()\n\n nodeList=C.getNodeList()\n factorList=C.getFactorList()\n\n cliqueList=[ Factor( [], [], [], str(i) ) for i in range(N) ]\n #edges=np.zeros( (N,N) )\n\n \"\"\" First assign the factors to appropriate cliques\n based on the skeleton cliqueTree cTree\"\"\"\n\n factorsUsed=np.zeros( totalFactorCount, dtype=int).tolist()\n #pdb.set_trace()\n for i in range(N):\n cliqueList[i].setVar( nodeList[i] )\n F=[]\n \"\"\" we add factors to the clique if they are in the variable scope of the clique \"\"\"\n for j in range( len(factorList) ):\n if len( factorList[j].getVar().tolist() ) == len ( list( set.intersection ( set(cliqueList[i].getVar().tolist() ), set( factorList[j].getVar().tolist() ) ) ) ):\n \n if factorsUsed[j] == 0:\n F.append( factorList[j] )\n factorsUsed[j] = 1\n #print F\n #pdb.set_trace()\n #F= [ f.getFactor() for f in F ]\n cliqueList[i]=ComputeJointDistribution ( F )\n #pdb.set_trace()\n C.setNodeList(cliqueList)\n #pdb.set_trace()\n return C\n\ndef getNextClique(P, messages):\n\n \"\"\" we need to come up wih a proper message passing order. A clique is ready to pass\n messages upward once its recieved all downstream messages from its neighbor (and vice versa)\n its ready to transmit downstream once it recieves all its upstream messages\n\n the ith clique C_i is ready to transmit to its neighbor C_j when C_i recieves all its\n messages from neigbors except C_j. In cTree message passing, each message is passed\n once. To get the process started we start with our initial potential cTree, P\n and an empty matrix of factors, representing messages passed between the nodes on the clique\n tree \"\"\"\n i=j=-1\n edges=P.getEdges()\n #print edges\n (nrow, ncol) = np.shape(edges)\n\n for r in range(nrow):\n \n #we want to ignore nodes with only one neighbor\n #becuae they are ready to pass messages\n if np.sum(edges[r,:] ) == 1:\n continue \n\n foundmatch=0\n\n for c in range(ncol):\n if edges[r,c] == 1 and messages[r,c].getVarCount() == 0:\n #list of indices indicating neighbors or r\n #print 'r,c: ', r, ' ', c\n #print 'edges[r,c]: ', edges[r,c]\n Nbs=np.nonzero(edges[:,r])[0]\n #print 'Nbs before:', Nbs\n Nbs=Nbs[np.nonzero(Nbs!= c)[0]]\n #print 'Nbs after: ', Nbs\n allnbmp=1 #neighbors messages passed?\n \n #find all of r's neighbors have sent messages *to* r\n for z in range( len(Nbs) ):\n #print messages[Nbs[z],r].getVarCount()\n if messages[ Nbs[z],r].getVarCount() == 0:\n allnbmp=0\n\n if allnbmp == 1:\n foundmatch=1\n break\n \n if foundmatch==1:\n #sys.stderr.write(\"found match!\\n\")\n i=r\n j=c\n break\n \n return (i,j)\n\n\ndef CliqueTreeCalibrate( P, isMax=False):\n \"\"\" this function performs sum-product or max-product algorithm for clique tree calibration.\n P is the CliqueTree object. isMax is a boolean flag that when set to True performs Max-Product\n instead of the default Sum-Product. The function returns a calibrated clique tree in which the\n values of the factors is set to final calibrated potentials.\n\n Once a tree is calibrated, in each clique (node) contains the marginal probability over the variables in\n its scope. We can compute the marginal probability of a variable X by choosing a clique that contains the\n variable of interest, and summing out non-query variables in the clique. See page 357 in Koller and Friedman\n\n B_i(C_i)= sum_{X-C_i} P_phi(X)\n\n The main advantage of clique tree calibration is that it facilitates the computation of posterior\n probabliity of all variables in the graphical model with an efficient number of steps. See pg 358\n of Koller and Friedman\n\n After calibration, each clique will contain the marginal (or max-mariginal, if isMax is set to True)\n\n \"\"\"\n \n np.set_printoptions(suppress=True)\n ctree_edges=P.getEdges()\n\n ctree_cliqueList=P.getNodeList()\n\n \"\"\" if max-sum, we work in log space \"\"\"\n if isMax == True:\n ctree_cliqueList= [ LogFactor (factor) for factor in ctree_cliqueList ]\n\n \n \n N=P.getNodeCount() #Ni is the total number of nodes (cliques) in cTree\n\n #dummyFactor=Factor( [], [], [], 'factor')\n #set up messsages to be passed\n #MESSAGES[i,j] represents the message going from clique i to clique j\n #MESSAGES will be a matrix of Factor objects\n MESSAGES=np.tile( Factor( [], [], [], 'factor'), (N,N))\n DUMMY=np.reshape( np.arange(N*N)+1, (N,N) )\n \n\n \"\"\"While there are ready cliques to pass messages between, keep passing\n messages. Use GetNextCliques to find cliques to pass messages between.\n Once you have clique i that is ready to send message to clique\n j, compute the message and put it in MESSAGES(i,j).\n Remember that you only need an upward pass and a downward pass.\"\"\"\n\n \"\"\" leaf nodes are ready to pass messages right away\n so we initialize MESSAGES with leaf message factors\n recall, a node is a leave if row sum is equal to 1\"\"\"\n for row in range(N):\n rowsum= np.sum( ctree_edges[row,:] )\n if rowsum ==1 :\n #Returns a tuple of arrays, one for each dimension, we want the first, hence the [0]\n leafnode=np.nonzero( ctree_edges[row,:] )[0].tolist()[0]\n #I discovered NumPy set operations http://docs.scipy.org/doc/numpy/reference/routines.set.html\n marginalize=np.setdiff1d( ctree_cliqueList[row].getVar(), ctree_cliqueList[leafnode].getVar() ).tolist()\n sepset=np.intersect1d( ctree_cliqueList[row].getVar(), ctree_cliqueList[leafnode].getVar() ).tolist()\n\n \"\"\" if isMax is false, this is sumproduct, so we do factor marginalization \"\"\"\n if isMax == 0:\n #MESSAGES(row,leafnode)=FactorMarginalization(P.cliqueList(row),marginalize);\n MESSAGES[row,leafnode]=FactorMarginalization(ctree_cliqueList[row], marginalize )\n if np.sum( MESSAGES[row,leafnode].getVal() ) != 1:\n newVal=MESSAGES[row,leafnode].getVal() / np.sum( MESSAGES[row,leafnode].getVal() )\n MESSAGES[row,leafnode].setVal(newVal)\n else:\n \"\"\" if isMax is true, this is max-marginalization\n don't normalize the value just yet\"\"\"\n MESSAGES[row,leafnode]=FactorMaxMarginalization( ctree_cliqueList[row], marginalize )\n\n\n \n \"\"\" now that the leaf messages are initialized, we begin with the rest of the clique tree\n now we do a single pass to arrive at the calibrated clique tree. We depend on\n GetNextCliques to figure out which nodes i,j pass messages to each other\"\"\"\n \n while True:\n (i,j)=getNextClique(P,MESSAGES)\n if sum ( [ i, j] ) == -2:\n break\n #print 'i: ', i, 'j: ', j\n \"\"\" similiar to above, we figure out the sepset and what variables to marginalize out\n between the two cliques\"\"\"\n marginalize=np.setdiff1d( ctree_cliqueList[i].getVar(), ctree_cliqueList[j].getVar() ).tolist()\n sepset=np.intersect1d( ctree_cliqueList[i].getVar(), ctree_cliqueList[j].getVar() ).tolist()\n\n \"\"\" find all the incoming neighbors, except j \"\"\"\n Nbs=np.nonzero( ctree_edges[:,i])[0] #returns a tuple ...\n Nbs_minusj=[ elem for elem in Nbs if elem != j ]\n #print 'Nbs_minusj: ', Nbs_minusj, ' [i]: ', [i]\n #see numpy for matlab users http://www.scipy.org/NumPy_for_Matlab_Users\n # these are incoming messages to the ith clique\n Nbsfactors=MESSAGES[np.ix_(Nbs_minusj, [i] )].flatten().tolist()\n #print DUMMY[np.ix_(Nbs_minusj, [i] )].flatten()\n #for f in Nbsfactors:\n #print f\n \"\"\" this is sum/product \"\"\"\n if isMax == 0:\n #print 'total number of Nbs factors: ', len(Nbsfactors)\n if len(Nbsfactors) == 1:\n Nbsproduct=FactorProduct( Nbsfactors[0], IdentityFactor(Nbsfactors[0]) )\n else:\n Nbsproduct=ComputeJointDistribution( Nbsfactors )\n #pdb.set_trace()\n #val=Nbsproduct.getVal()\n #rowcount=len(val)/3\n #print Nbsproduct.getVar()\n #print Nbsproduct.getCard()\n #print np.reshape( val, (rowcount,3))\n #now mulitply wiht the clique factor\n \n CliqueNbsproduct=FactorProduct( Nbsproduct, ctree_cliqueList[i] )\n \n CliqueMarginal= FactorMarginalization ( CliqueNbsproduct, marginalize )\n \n #normalize the marginal\n newVal=CliqueMarginal.getVal() / np.sum( CliqueMarginal.getVal() )\n\n CliqueMarginal.setVal( newVal )\n \n MESSAGES[i,j] = CliqueMarginal\n else:\n if len(Nbsfactors) == 1:\n Nbssum=Nbsfactors[0]\n else:\n Nbssum=reduce ( lambda x,y: FactorSum(x,y), Nbsfactors )\n CliqueNbsSum=FactorSum( Nbssum, ctree_cliqueList[i] )\n CliqueMarginal=FactorMaxMarginalization( CliqueNbsSum, marginalize )\n MESSAGES[i,j] = CliqueMarginal\n #print\n\n\n\n #######################################################################\n \"\"\" once out the while True loop, the clique tree has been calibrated\n here is where we compute final belifs (potentials) for the cliques and place them in \"\"\"\n\n for i in range ( len(ctree_cliqueList)):\n Nbs=np.nonzero( ctree_edges[:,i])[0]#returns a tuple\n Nbsfactors=MESSAGES[np.ix_(Nbs, [i])].flatten().tolist()\n\n if isMax == 0:\n if len(Nbsfactors) == 1:\n Nbsproduct=FactorProduct( Nbsfactors[0], IdentityFactor(Nbsfactors[0]) )\n else:\n Nbsproduct=ComputeJointDistribution ( Nbsfactors)\n \n CliqueNbsProduct=FactorProduct(Nbsproduct, ctree_cliqueList[i])\n #pdb.set_trace()\n ctree_cliqueList[i]=CliqueNbsProduct\n else:\n if len(Nbsfactors) == 1:\n Nbssum=Nbsfactors[0]\n else:\n Nbssum=reduce ( lambda x,y: FactorSum(x,y), Nbsfactors )\n CliqueNbsSum=FactorSum(Nbssum, ctree_cliqueList[i])\n ctree_cliqueList[i]=CliqueNbsSum\n \n P.setNodeList( ctree_cliqueList )\n #np.savetxt( 'numpy.cTree.edges.calibrated.txt',ctree_edges,fmt='%d', delimiter='\\t')\n \n #pdb.set_trace()\n #return P\n return (P, MESSAGES)\n #for k in range(len(ctree_cliqueList)):\n # print 'k: ', k\n # print ctree_cliqueList[k]\n #IndexToAssignment(1:prod(P.cliqueList(1).card), P.cliqueList(1).card)\n # I=np.arange(np.prod( ctree_cliqueList[k].getCard() ))\n # print IndexToAssignment( I, ctree_cliqueList[k].getCard() )\n # print \"==\"\n\n #return P\n\n\ndef CreatePrunedInitCtree(F,E=[]):\n \"\"\" 1. create cTree from list of factors F and evidence E\n 2. prune it\n 3. compute initial potential of the tree\n 4. return it\"\"\"\n\n cTree = createCliqueTree(F,E)\n prunedCTree=PruneTree( cTree )\n prunedCTree.incorporateEvidence()\n return CliqueTreeInitialPotential( prunedCTree )\n\n \n\ndef ComputeExactMarginalsBP( F, E=[], isMax=False, computeJoint=0):\n \"\"\" We take a list of Factor objects, observed Evidence E\n and returns marignal proabilities for the variables in the\n Bayesian network. If isMax is 1 it runs MAP inference ( *still need to\n do this *) otherwise it runs exact inference using Sum/Product algorithm.\n The ith element of the returned list represents the ith variable in the\n network and its marginal prob of the variable\n\n Note, we implicitly create, prune, initialize, and calibrate a clique tree\n constructed from the factor list F \"\"\"\n\n MARGINALS=[]\n #pdb.set_trace()\n P = CreatePrunedInitCtree(F,E)\n #G=nx.from_numpy_matrix( P.getEdges() )\n #nx.draw_shell(G)\n #plt.show()\n \n #plt.savefig('cliqueTree.png', bbox_inches=0)\n (P,MESSAGES) = CliqueTreeCalibrate(P,isMax)\n #pdb.set_trace()\n if computeJoint==1:\n jointDistribution=ComputeJointDistributionFromCalibratedCliqueTree(P, MESSAGES, isMax)\n else:\n jointDistribution=None\n #pdb.set_trace()\n #P = CliqueTreeCalibrate(P,isMax)\n cliqueList=P.getNodeList()\n \n fh=open('ExactMarginals.CliqueTree.log','a')\n for i in range(len(cliqueList)):\n out = \",\".join( map(str, cliqueList[i].getVar().tolist() )) \n outstring= \"node \" + str(i) + \":\\t\" + out +'\\n'\n fh.write(outstring)\n fh.write(\"\\n\")\n #np.savetxt( 'numpy.cTree.edges.calibrated.txt',P.getEdges(),fmt='%d', delimiter='\\t')\n np.savetxt('ExactMarginals.CliqueTree.log',P.getEdges(),fmt='%d', delimiter='\\t')\n \n \n \n \n \"\"\" get the list of unique variables \"\"\"\n V=getUniqueVar(F)\n \n\n for i in range ( len(V ) ):\n for j in range ( len(cliqueList ) ):\n if V[i] in cliqueList[j].getVar():\n marginalize=np.setdiff1d ( cliqueList[j].getVar(), V[i] ).tolist()\n if not marginalize:\n MARGINALS.append( cliqueList[j] )\n else:\n if isMax == 0:\n #mfactor=FactorMarginalization(P.cliqueList(j), marginalize);\n mfactor=FactorMarginalization( cliqueList[j], marginalize )\n newVal=mfactor.getVal() / np.sum( mfactor.getVal() )\n mfactor.setVal( newVal )\n MARGINALS.append ( mfactor )\n else:\n mfactor=FactorMaxMarginalization( cliqueList[j], marginalize )\n MARGINALS.append( mfactor )\n break\n\n \n return (MARGINALS,jointDistribution)\n\ndef ComputeJointDistributionFromCalibratedCliqueTree( P, MESSAGES, isMax=0):\n \n \"\"\" this is a function to attempt to compute the joint distribution from\n a calibrated clique tree (cTree). The arguments are: \n 1. The calibrated cTree, P, which is a CliqueTree object\n 2. The sepset beliefs are is the matrix MESSAGES\n The MESSAGES matrix is a matrix of Factor objects\n We can get the indices of the sepset messages\n from the edges of the clique tree. \n \n This function attempts to implement equation 10.10 in Koller and Friedman\n in section 10.2.3: A Calibrated Clique Tree as a Distribution\n \n P_\\Phi(X) = \\frac{ \\prod_{i \\in V_T} \\Beta_i(C_i) } { \\prod_{i-j} \\in E_T \\mu_i,j(S_i,j) }\n \n Basically A) multiply the clique beliefs \n B) multiply the sepset beliefs\n C) divide A/B \n \n \n \"\"\"\n cliqueFactors=P.getNodeList()\n \n \"\"\" if this is a max-marginal calibrated clique tree the values are in log space\n Just re-exponentiate them \"\"\"\n if isMax==1:\n cliqueFactors = [ ExpFactorNormalize(c) for c in cliqueFactors ]\n \n \"\"\" this is the numerator \"\"\"\n cliqueBeliefProducts=reduce(lambda x, y: FactorProduct(x,y), cliqueFactors)\n \n \"\"\" get the adjacency matrix of the clique tree \"\"\"\n adj_matrix=P.getEdges()\n \n \"\"\" get the nonzero indices, then compute the product of the sepset beliefs \"\"\"\n nonzero_rows=adj_matrix.nonzero()[0].tolist()\n nonzero_cols=adj_matrix.nonzero()[1].tolist()\n \n \n\n \n \n sepsetBeliefsFactors= [ MESSAGES[x,y] for (x,y) in zip(nonzero_rows, nonzero_cols) ] \n \n \"\"\" if this is a max-marginal calibrated clique tree the values are in log space\n Just re-exponentiate them \"\"\"\n if isMax == 1:\n sepsetBeliefsFactors = [ ExpFactorNormalize(c) for c in sepsetBeliefsFactors ]\n \n \n \n sepsetBeliefProducts= reduce( lambda x,y: FactorProduct(x,y), sepsetBeliefsFactors)\n \n \"\"\" the re-parameterization of the joint (clique tree invariant) \n divide the clique beliefs by the sepset messages \"\"\"\n jointDistrbution=FactorDiv(cliqueBeliefProducts, sepsetBeliefProducts)\n \n val=jointDistrbution.getVal()/np.sum( jointDistrbution.getVal() )\n jointDistrbution.setVal( val )\n \n return jointDistrbution\n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.6553258895874023, "alphanum_fraction": 0.6626390814781189, "avg_line_length": 47.35384750366211, "blob_id": "052a0afcbd86e4459fa3a6420618a716e35992f8", "content_id": "ed61124800b8bd503bbb5d69a2e2582775581716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "no_license", "max_line_length": 194, "num_lines": 65, "path": "/GeneticNetworkFactory.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom FactorOperations import *\nfrom PedigreeFactors import *\nimport itertools\nimport numpy as np\n\"\"\"\" Still not sure how this is going to work\n This class is a factory for generating a genetic network\n If we consider each location in the genome independent\n we generate a new network for each position along an interval.\n A network is a collection of factors, so we return a python\n list of Pedigree factors (either GenotypeAlleleFreqFactor for founders\n or GenotypeGivenParentsFactor for non-founders. Each phenotype is conditionally\n independent given its genotoype, so each member of the pedigree has a\n PhenotypeGivenGenotypeFactor\"\"\"\n\nclass GeneticNetworkFactory(object):\n \n\n def __init__(self, pedfile, alphaList, allelefreq, chrom, position):\n #parse pedfile\n self.alphaList=alphaList\n self.allelefreq=allelefreq\n self.totalAlleles=len(allelefreq)\n self.chrom=chrom\n self.pos=position\n\n self.pedigree=Pedfile(pedfile)\n self.pedigree.parsePedfile()\n self.pedlist=self.pedigree.getPedList()\n self.pedids=self.pedigree.returnIndivids()\n #print self.pedids\n\n #list of factors that will comprise the Genetic network\n self.totalFactors=self.pedigree.getTotalSize() * 2\n self.factorList=self.totalFactors*[None]\n\n \n def constructNetwork(self):\n totalPeople=self.pedigree.getTotalSize()\n for i in range( totalPeople ):\n \n \n if self.pedlist[i].isFounder():\n #print self.pedlist[i].getid()\n self.factorList[i]=GenotypeAlleleFreqFactor(self.allelefreq,i+1,self.pedlist[i].getid() + \" genotype \")\n #self.factorList[i]=GenotypeAlleleFreqFactor(self.allelefreq,self.pedlist[i].getid(),self.pedlist[i].getid())\n #factorList(i)=genotypeGivenAlleleFreqsFactor(alleleFreqs,i);\n else:\n #3print self.pedlist[i].getParents(), self.pedlist[i].getid()\n #GenotypeGivenParentsFactor(2,\"bart\",\"homer\",\"marge\",\"\"\"Bart | Homer, Marge \"\"\")\n #self.factorList[i]=GenotypeGivenParentsFactor(self.totalAlleles, self.pedlist[i].getid(), self.pedlist[i].getParents()[0], self.pedlist[i].getParents()[1], \"child|Father,Child\")\n parent1Index=self.pedids.index( self.pedlist[i].getParents()[0] )\n parent2Index=self.pedids.index( self.pedlist[i].getParents()[1] )\n child=self.pedlist[i].getid()\n parent1name=self.pedlist[parent1Index].getid()\n parent2name=self.pedlist[parent2Index].getid()\n name=child+\" genotype |\"+parent1name+\",\"+parent2name\n self.factorList[i]=GenotypeGivenParentsFactor(self.totalAlleles, i+1, parent1Index+1 , parent2Index+1 , name)\n \n name=self.pedlist[i].getid()+\" phenotype | \" + self.pedlist[i].getid() + \" genotype\"\n\n self.factorList[i+totalPeople]=PhenotypeGivenGenotypeFactor(self.alphaList,i+totalPeople+1,i+1, name )\n\n def getFactorList(self):\n return self.factorList\n\n\n" }, { "alpha_fraction": 0.69027179479599, "alphanum_fraction": 0.6959942579269409, "avg_line_length": 26.41176414489746, "blob_id": "9c2992617817dbf6c1fbdb56f4e689d9ea99368e", "content_id": "10d1a9842a0bf1c1fdead15c69d0ccf7db82f50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 114, "num_lines": 51, "path": "/PythonNotebooks/ocr_MAP.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "from Factor import *\nfrom PGMcommon import *\nfrom CliqueTree import *\nfrom CliqueTreeOperations import *\nfrom FactorOperations import *\nimport scipy.io as sio\nimport numpy as np\nimport pprint\nimport pdb\nimport matplotlib.pyplot as plt\nimport networkx as nx\nmatfile='/Users/amit/BC_Classes/PGM/Prog4/PA4Sample.mat'\nmat_contents=sio.loadmat(matfile)\nmat_struct=mat_contents['OCRNetworkToRun']\nval=mat_struct\nfactorList=[]\n\nALPHABET=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\nfor data in val:\n \n (var, card, values)=data[0]\n f= Factor( var[0].tolist(), card[0].tolist(), values[0].tolist(), 'factor' )\n factorList.append( f )\n\n#MARGINALS= ComputeExactMarginalsBP( factorList, [], 1 )\n#MAPAssignment=MaxDecoding( MARGINALS )\n#print \"\".join( [ALPHABET[idx] for idx in MAPAssignment] )\n\nMARGINALS= ComputeExactMarginalsBP( factorList, [], 1 )\nfor m in MARGINALS:\n log_val= m.getVal()\n prob_val_normalized=np.log( lognormalize( log_val ) )\n m.setVal(prob_val_normalized)\n\n\nMAPAssignment=MaxDecoding( MARGINALS )\nprint \"\".join( [ALPHABET[idx] for idx in MAPAssignment] )\n\nfor m in MARGINALS:\n print np.sum( lognormalize(m.getVal() ) )\n\n\n#V=getUniqueVar(factorList)\n#print 'unique variables:'\n#print V\n\n#cTree=CreatePrunedInitCtree(factorList)\n#G=nx.from_numpy_matrix( cTree.getEdges() )\n#nx.draw_shell(G)\n#plt.show()\n" }, { "alpha_fraction": 0.5952702760696411, "alphanum_fraction": 0.5993243455886841, "avg_line_length": 27.941177368164062, "blob_id": "e6d8e78b6e65a7eea8f3f483a8ce9e82cf7a1392", "content_id": "3217b3e442a436bf70fc4fe7ed9919eb6a80733a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 121, "num_lines": 51, "path": "/Factor.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nclass Factor(object):\n\n \"\"\" Represents a factor in a PGM. A factor has variable scope, cardinality, value, and potentially a name.\n A factor's values (which can potentially be multi-dimensional table, are represented as NumPy 1d-arrays.\n See Chapter10 Friedman pg. 358 and Koller for more info. \"\"\"\n\n\n\n def __init__(self, var=[], card=[], val=[], name= 'None'):\n \"\"\" a factor has list of variables, each with a cardinality, and for each possible assignment to its variable(s),\n a position in the val array.\"\"\"\n self.var= np.array(var)\n self.card=np.array(card)\n self.val=np.array(val)\n self.name=name\n\n def __str__(self):\n varstring= \" \".join ( map(str, self.var) )\n cardstring=\" \".join ( map(str, self.card) )\n valstring= \" \".join( map(str, self.val))\n return \"\\n\".join( [ 'name: ' + self.name,'var: '+ varstring, 'card: '+ cardstring, 'val: ' + valstring])\n\n\n def setVar(self, var):\n self.var=np.array(var)\n\n def getVar(self):\n return self.var\n\n def getVarCount(self):\n return len( self.var.tolist() )\n\n def setVal(self, val):\n self.val=np.array(val)\n\n def getVal(self):\n return self.val\n\n def setCard(self,card):\n self.card=np.array(card)\n\n def getCard(self):\n return self.card\n\n def getName(self):\n return self.name\n\n def setName(self, name):\n self.name=name\n\n\n\n\n" }, { "alpha_fraction": 0.6409705877304077, "alphanum_fraction": 0.6484430432319641, "avg_line_length": 39.885555267333984, "blob_id": "80f59e493196baae8ffed0576e2078272e0110c1", "content_id": "cdf59801c73aa21c25d1fb91b720b1549a6e36a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36802, "license_type": "no_license", "max_line_length": 174, "num_lines": 900, "path": "/FactorOperations.py", "repo_name": "indapa/pgmPy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom Factor import *\nimport numpy as np\nfrom PGMcommon import *\nimport sys\nimport itertools\nimport common\nimport pdb\n\ndef IndexToAssignment( I, D):\n\n \"\"\" given and index I (a row vector representing the indices of values a factor object's val field\n and D, an array representing the cadinality of variables in a factor object, this function produces\n a matrix of assignments, one assignment per row. See https://github.com/indapa/PGM/blob/master/Prog1/IndexToAssignment.m \"\"\"\n\n a=np.reshape ( np.arange(np.prod(D)).repeat(len(D)), (np.prod(D),len(D)))\n \n\n b=tmp=list( D[:-1] )\n tmp.insert(0,1)\n tmp =np.cumprod ( np.array (tmp) )\n b=np.tile( np.cumprod(b), (len(I), 1))\n #print b\n\n #print np.floor ( a /b )\n c=np.tile ( D, ( len(I), 1) )\n\n assignment = np.mod ( np.floor( a/b), c) +1\n return assignment\n\n\ndef AssignmentToIndex ( A, D):\n \"\"\" I = AssignmentToIndex(A, D) converts an assignment, A, over variables\n with cardinality D to an index into the .val vector for a factor.\n If A is a matrix then the function converts each row of A to an index.\n See https://github.com/indapa/PGM/blob/master/Prog1/AssignmentToIndex.m \"\"\"\n \n D=D.flatten(0) #turn array into vector (note that this forces a copy), see http://www.scipy.org/NumPy_for_Matlab_Users#head-fd74115e6798fbf3a628094a55d1cb2b2b5cdd3c\n I=np.array( [] )\n (nrowA,ncolA)=np.shape(A)\n\n if nrowA== 1 or ncolA ==1: #if assginments are 1 row or 1 col\n #sys.stderr.write(\"if block ...\\n\")\n b=tmp=list( D[:-1] )\n tmp.insert(0,1)\n \n tmp =np.cumprod ( np.array (tmp) )\n tmp=(np.array(np.matrix(tmp)))\n #print \"tmp: \", tmp\n \n a_flat=np.array ( np.matrix( A.flatten(0) ).transpose() )\n #print \"a flat: \", a_flat\n I= ( tmp * (a_flat-1) ) + 1\n return I\n \n\n else:\n #sys.stderr.write(\"else block ...\\n\")\n b=tmp=list( D[:-1] )\n tmp.insert(0,1)\n tmp =np.cumprod ( np.array (tmp) )\n tmp = np.tile( tmp, (nrowA,1) )\n #print tmp\n #print (A-1)\n I= np.sum( np.multiply(tmp, (A-1)), 1) + 1\n\n \n return np.array( np.matrix( I ).transpose() )\n\n\ndef SetValueOfAssignment( F, A, v, Vorder=None):\n \"\"\" % SetValueOfAssignment Sets the value of a variable assignment in a factor.\n%\n% F = SetValueOfAssignment(F, A, v) sets the value of a variable assignment,\n% A, in factor F to v. The order of the variables in A are assumed to be the\n% same as the order in F.var.\n%\n% F = SetValueOfAssignment(F, A, v, VO) sets the value of a variable\n% assignment, A, in factor F to v. The order of the variables in A are given\n% by the vector VO. See https://github.com/indapa/PGM/blob/master/Prog1/SetValueOfAssignment.m \"\"\"\n\n \n\n if Vorder == None:\n indx=AssignmentToIndex( A, F.getCard() )\n else:\n sys.stderr.write(\"assumes the order of variables in A are the sayme as in F.var ...\\n\")\n pass\n\n #http://stackoverflow.com/a/5183720, How to make List from Numpy Matrix in Python\n #http://stackoverflow.com/a/8373103, numpy function to set elements of array to a value given a list of indices\n indices=np.array(indx-1).flatten().tolist()\n zeros=np.zeros(len(A))\n zeros[indices]=v\n F.setVal( zeros.tolist() )\n\ndef GetValueOfAssignment( F, A, Vorder = None ):\n \"\"\" % GetValueOfAssignment Gets the value of a variable assignment in a factor.\n%\n% v = GetValueOfAssignment(F, A) returns the value of a variable assignment,\n% A, in factor F. The order of the variables in A are assumed to be the\n% same as the order in F.var.\n%\n% v = GetValueOfAssignment(F, A, VO) gets the value of a variable assignment,\n% A, in factor F. The order of the variables in A are given by the vector VO. See https://github.com/indapa/PGM/blob/master/Prog1/GetValueOfAssignment.m \"\"\"\n\n if Vorder == None:\n indx= AssignmentToIndex ( A, F.getCard() )\n else:\n sys.stderr.write(\"The order of the variables in A are assumed to be the same as the order in F var\\n\")\n pass\n #pdb.set_trace()\n indices=np.array(indx-1).flatten().tolist()\n return np.array ( np.matrix ( F.getVal()[indices] ))\n\ndef FactorProduct ( A, B):\n \"\"\" FactorProduct Computes the product of two factors.\n% C = FactorProduct(A,B) computes the product between two factors, A and B,\n% where each factor is defined over a set of variables with given dimension.\n% The factor data structure has the following fields:\n% .var Vector of variables in the factor, e.g. [1 2 3]\n% .card Vector of cardinalities corresponding to .var, e.g. [2 2 2]\n% .val Value table of size prod(.card)\n%\n% See also FactorMarginalization IndexToAssignment,\n% AssignmentToIndex, and https://github.com/indapa/PGM/blob/master/Prog1/FactorProduct.m \"\"\"\n\n #print \"A: \", A\n #print \"====\"\n #print \"B: \", B\n C=Factor()\n\n #check for empty factors\n if len( A.getVar() ) == 0 :\n sys.stderr.write(\"A factor is empty!\\n\")\n return B\n if len( B.getVar() ) == 0:\n sys.stderr.write(\"B factor is empty!\\n\")\n return A\n\n\n #check of variables that in both A and B have the same cardinality\n #print 'A.getVar(): ', A.getVar()\n #print 'B.getVar(): ',B.getVar()\n #setA= set( A.getVar() )\n #setB= set( B.getVar() )\n #intersect=np.array( list( setA.intersection(setB)))\n intersect=np.intersect1d( A.getVar(), B.getVar() ).tolist()\n #print \"Intersection of variables in FactorProduct \", intersect\n #print \"A var: \", A.getVar()\n #print \"B var: \", B.getVar()\n\n #if the intersection of variables in the two factors\n #is non-zero, then make sure they have the same cardinality\n if len(intersect) > 0:\n #iA=np.nonzero(intersect - A.getVar()==0)[0].tolist() # see this http://stackoverflow.com/a/432146, return the index of something in an array?\n iA=getIndex( A.getVar(), intersect )\n #print \"iA: \", iA\n #iB=np.nonzero(intersect - B.getVar()==0)[0].tolist()\n iB = getIndex ( B.getVar(), intersect )\n #print \"iB: \", iB\n\n # check to see if any of the comparisons in the array resulting from of a.getCard()[iA] == b.getCard()[iB] \n # are all False. If so print an error and exit\n if len( np.where( A.getCard()[iA].all() == B.getCard()[iB].all() ==False)[0].tolist() ) > 0:\n sys.stderr.write(\"dimensionality mismatch in factors!\\n\")\n sys.exit(1)\n\n #now set the variables of C to the union of variables in factors A and B\n #print 'setA ' ,setA\n #print 'setB ', setB\n #print list( setA.union(setB) )\n C.setVar( np.union1d ( A.getVar(), B.getVar() ).tolist() )\n #C.setVar ( list( setA.union(setB) ) )\n mapA=isMember(A.getVar(), C.getVar() )\n mapB=isMember(B.getVar(), C.getVar() )\n\n \n\n #Set the cardinality of variables in C\n C.setCard( np.zeros( len(C.getVar())).tolist() )\n C.getCard()[mapA]=A.getCard()\n C.getCard()[mapB]=B.getCard()\n\n #intitialize the values of the factor C to be zero\n C.setVal( np.zeros(np.prod(C.getCard())).tolist() )\n\n #some helper indices to tell what indices of A and B values to multiply\n assignments=IndexToAssignment( np.arange(np.prod(C.getCard())), C.getCard() ) #get the assignment of values of C\n indxA=AssignmentToIndex( assignments[:,mapA], A.getCard())-1 # re-arrange the assignment of C, to what it would be in factor A\n indxB=AssignmentToIndex( assignments[:,mapB], B.getCard())-1 # re-arange the assignment of C to what it would be in factorB\n\n \n\n c_val=A.getVal()[indxA.flatten().tolist()] * B.getVal()[indxB.flatten().tolist()] #now that we have the index into A.val and B.val vector, multiply them to factor product\n C.setVal ( c_val.tolist() )\n\n return C\n\ndef FactorMarginalization(A,V):\n \"\"\" FactorMarginalization Sums given variables out of a factor.\n B = FactorMarginalization(A,V) computes the factor with the variables\n in V summed out. The factor data structure has the following fields:\n .var Vector of variables in the factor, e.g. [1 2 3]\n .card Vector of cardinalities corresponding to .var, e.g. [2 2 2]\n .val Value table of size prod(.card)\n\n The resultant factor should have at least one variable remaining or this\n function will throw an error. See also FactorProduct, IndexToAssignment , and AssignmentToIndex\n Based on matlab code found here: https://github.com/indapa/PGM/blob/master/Prog1/FactorMarginalization.m \"\"\"\n\n #the resulting factor after marginalizing out variables in python list V that are in \n #the factor A\n B=Factor()\n\n #check for empy factor or variable list\n if len( A.getVar() ) == 0 or len(V) == 0:\n return A\n\n #construct the variables of the marginalized factor by \n #computing the set difference between A.var and V\n #These variables in the difference set will be the scope of the new factor\n setA=set( A.getVar() )\n setV=set(V)\n Bvar=np.array( list( setA.difference(setV)))\n mapB=isMember(Bvar, A.getVar()) #indices of the variables of the new factor in the original factor A\n #print mapB, Bvar\n\n #check to see if the new factor has empty scope\n if len(Bvar) == 0:\n sys.stderr.write(\"FactorMarginalization:Error, resultant factor has empty scope...\\n\")\n return None\n #set the marginalized factor's variable scope and cardinality\n B.setVar( Bvar.tolist() )\n B.setCard( A.getCard()[mapB] )\n B.setVal( np.zeros(np.prod(B.getCard())).tolist() )\n\n #compute some helper indices\n assignments=IndexToAssignment ( np.arange(np.prod(A.getCard()) ), A.getCard() )\n #indxB tells which values in A to sum together when marginalizing out the variable(s) in B\n indxB=AssignmentToIndex( assignments[:,mapB], B.getCard())-1\n\n #accum is a numpy implementation of matlab accumarray\n #accumarray sums data in each group\n #here the group(s) are defined in indxB\n #indxB is a map to tell which value in A.val to map the sum to\n #see http://blogs.mathworks.com/loren/2008/02/20/under-appreciated-accumarray/\n marginal_vals=accum(indxB, A.getVal() )\n \n #set the marginal values to the new factor with teh variable(s) in V summed(marginalized) out\n B.setVal( marginal_vals.tolist() )\n return B\n\n\n\ndef ObserveEvidence (INPUTS, EVIDENCE):\n\n \"\"\" ObserveEvidence Modify a vector of factors given some evidence.\n F = ObserveEvidence(INPUTS, EVIDENCE) sets all entries in the vector of factors,INPUTS,\n that are not consistent with the evidence, E, to zero. F is a vector of\n factors, each a data structure with the following fields:\n .var Vector of variables in the factor, e.g. [1 2 3]\n .card Vector of cardinalities corresponding to .var, e.g. [2 2 2]\n .val Value table of size prod(.card)\n EVIDENCE is an N-by-2 matrix, where each row consists of a variable/value pair.\n Variables are in the first column and values are in the second column. \"\"\"\n (nrows, ncols)=np.shape(EVIDENCE)\n #total_factors=len(INPUTS)\n #iterate through evidence\n for i in range(nrows):\n variable=EVIDENCE[i,0]\n value=EVIDENCE[i,1]\n #print 'var: ', variable, 'value: ', value\n if int(value) == 0:\n print \"Evidence is not set for variable: ', variable, ' in evidence matrix.\\n\"\n continue\n\n for factor in INPUTS:\n #the following returns a list\n indx=np.where( factor.getVar() == variable )[0].tolist()\n if indx: #if the indx is not empty, it contains the index value of the evidence variable in factor.val array\n indx=indx[0]\n \n if value > factor.getCard()[indx] or value < 0:\n sys.stderr.write(\"invalid evidene for variable X_'\" + str(variable) + \" = \" + str(value) + \"\\n\")\n sys.exit(1)\n\n #get the assignments of variables for the factor\n assignments=IndexToAssignment( np.arange(np.prod( factor.getCard() )), factor.getCard() )\n # now get the indices in the assignments that don't agree with the observed value (evidence)\n mask=np.where( assignments[:,indx] != value )[0].tolist()\n #we are going to update the val array for the current factor\n newvals=factor.getVal()\n #set the mask indices to zero and reset the val array of the factor\n newvals[mask]=0\n factor.setVal( newvals.tolist() )\n\n #now check to see the validity of the updated values of the factor\n #given the observed evidence. We cannot have all zero values for the factor!\n zeroIndices=np.where ( factor.getVal() == 0)[0].tolist()\n if len(zeroIndices) == len (factor.getVal() ):\n sys.stderr.write(\"All variable values are zero, which is not possible.\\n\")\n return INPUTS\n\ndef ComputeJointDistribution(INPUTS):\n \"\"\" ComputeJointDistribution Computes the joint distribution defined by a set of given factors\n\n Joint = ComputeJointDistribution(INPUTS) computes the joint distribution\n defined by a set of given factors\n\n Joint is a factor that encapsulates the joint distribution given by INPUTS\n INPUTS is a vector of Factor objects containing the factors defining the distribution\n\n \"\"\"\n\n totalFactors = len(INPUTS)\n #check for empty list of INPUTS\n\n if totalFactors== 0:\n sys.stderr.write(\"Empty factor list given as input\\n\")\n return Factor( [], [], [] )\n \n else:\n # see http://docs.python.org/library/functions.html#reduce for description of Python reduce function\n return reduce(lambda x, y: FactorProduct(x,y), INPUTS)\n\n\ndef ComputeMarginal(V, F, E):\n \"\"\"\n ComputeMarginal Computes the marginal over a set of given variables\n M = ComputeMarginal(V, F, E) computes the marginal over variables V\n in the distribution induced by the set of factors F, given evidence E\n\n M is a factor containing the marginal over variables V\n\n V is a vector containing the variables in the marginal e.g. [1 2 3] for X_1, X_2 and X_3.\n i.e. a result of FactorMarginalization\n\n F is a vector of factors (struct array) containing the factors\n defining the distribution\n\n E is an N-by-2 matrix, each row being a variable/value pair.\n Variables are in the first column and values are in the second column.\n If there is no evidence, pass in the empty matrix [] for E.\n\n \"\"\"\n totalFactors=len(F)\n #reshape a 1d array to 1 x ncol array\n #since ObserveEvidence requires Nx2 array, we reshape to a 2 column array\n #see http://stackoverflow.com/a/12576163 for reshaping 1d array to 2d array\n EVIDENCE= np.reshape( np.array ( E ), (-1,2) )\n #print np.shape(EVIDENCE)\n \n if totalFactors == 0:\n sys.stderr.write(\"empty factor list given as input.\\n\")\n return Factor( [], [], [])\n # union of all variables in list of factors F\n variableList=[] # a list of of lists, where each element is a list containing the variables of the factor in F\n for factor in F:\n var=factor.getVar().tolist()\n variableList.append( var )\n\n #get the union of variables across all the factor in F\n #see this http://stackoverflow.com/a/2151553, Pythonic Way to Create Union of All Values Contained in Multiple Lists\n union_variables = set().union(*variableList)\n #print union_variables\n #v contains the variables not in the list of variables in the marginal\n v=list( union_variables.difference(V) )\n \n # compute the joint distribution, but then reduce it, given the evidence\n # ComputeJointDistribution returns a factor, but ObserveEvidence expects a list\n # of factors as the first argument, so hence the need for brackets [ ]\n # ObserveEvidence returns a list, but we want the first element so thats why the [0]\n jointE= ObserveEvidence ( [ComputeJointDistribution ( F )], EVIDENCE )[0]\n\n #now we need to re-normaize the joint, since observe evidence doesn't do it for us\n jointE_normalizedVal = jointE.getVal()/np.sum( jointE.getVal() )\n jointE.setVal( jointE_normalizedVal.tolist() )\n\n return FactorMarginalization ( jointE, v)\n \n\ndef IdentityFactor( F ):\n return Factor ( F.getVar().tolist(), F.getCard().tolist(), np.ones( np.prod( F.getCard() ) ), F.getName()+ '_identity' )\n\n\n\ndef SumProductEliminateVar(z, factorList):\n\n \"\"\" this is a non-graph based sum-product variable elimination function\n z is a variable to eliminate\n pass in a list of factors\n\n 1. figure out which factor contain z in their variable scope\n 2. figure out which factors don't contain z in their scope\n 3. mulitply in all factors that have z\n 4. sum out z (marginalize) and return new factor with variable z eliminated\"\"\"\n\n useFactors = []# list of factors that contains the variable Z\n unusedFactors=[] #list of factors that don't contain variable Z\n scope = []\n\n \"\"\"get a list containining the index in self.factorLlist of factors\n that contain the variable Z to be eliminated\n get the scope of variables from the factors that contain variable Z \"\"\"\n for fi in factorList:\n if z in fi.getVar().tolist():\n useFactors.append(fi)#the ith factor is being currently involved in elimination\n scope=list(set.union(set(scope), fi.getVar().tolist() ))\n else:\n unusedFactors.append( fi )\n\n #for f in useFactors:\n # print 'useFactor: ', f\n #print '==='\n #for f in unusedFactors:\n # print 'unusedFactor: ', f\n\n psiFactor= ComputeJointDistribution ( useFactors )\n tauFactor=FactorMarginalization( psiFactor,[z] )\n\n #print 'psiFactor: ', psiFactor\n #print 'tauFactor: ', tauFactor\n return unusedFactors + [ tauFactor ]\n\ndef SumProductVE ( Z, F ):\n\n \"\"\" A wrapper function for SumProductEliminateVar\n sum-product variable elimination based on pseudocode algorithm 9.1 in Koller and Friedman\n We are giving a list of variables to eliminate in Z (in the order we want to\n elimiinate them) and a list of factors F\n eliminate each one getting getting the marginal distribution of the last variable in the list\n Z. \"\"\"\n\n for z in Z:\n F=SumProductEliminateVar(z, F)\n return reduce(lambda x, y: FactorProduct(x,y), F)\n\n\ndef FactorMaxMarginalization( A, V ):\n \"\"\" computes the factor with the variables in V *maxed* out.\n The resulting factor will have all the variables in A minus\n those variables in V. This is quite similiar to FactorMarginalization, but rather then summing out variables in V\n we take the max. In the code, this translates passing np.max as the function to accum\n See section 13.2 in Koller and Friedman for more information\"\"\"\n\n B=Factor()\n #check for empy factor or variable list\n if len( A.getVar() ) == 0 or len(V) == 0:\n return A\n Bvar=np.setdiff1d( A.getVar(), V)\n mapB=isMember(Bvar, A.getVar())\n\n if len(Bvar) == 0:\n sys.stderr.write(\"FactorMaxMarginalization: Error, resultant factor has empty scope...\\n\")\n return np.max (A.getVal() )\n #set the marginalized factor's variable scope and cardinality\n B.setVar( Bvar.tolist() )\n B.setCard( A.getCard()[mapB] )\n B.setVal( np.zeros(np.prod(B.getCard())).tolist() )\n\n #compute some helper indices\n assignments=IndexToAssignment ( np.arange(np.prod(A.getCard()) ), A.getCard() )\n #indxB tells which values in A to sum together when marginalizing out the variable(s) in B\n indxB=AssignmentToIndex( assignments[:,mapB], B.getCard())-1\n\n #here we pass in the function np.max\n #NumPy and Python are awesome\n max_vals=accum(indxB, A.getVal(), np.max )\n B.setVal( max_vals.tolist() )\n\n return B\n\n\ndef MaxProductEliminateVar(z, factorList):\n\n \"\"\" this is a non-graph based MAX-product variable elimination function\n z is a variable to eliminate\n pass in a list of factors\n\n 1. figure out which factor contain z in their variable scope\n 2. figure out which factors don't contain z in their scope\n 3. mulitply in all factors that have z\n 4. max marginalize out z and return new factor with variable z eliminated\"\"\"\n\n useFactors = []# list of factors that contains the variable Z\n unusedFactors=[] #list of factors that don't contain variable Z\n scope = []\n\n \"\"\"get a list containining the index in self.factorLlist of factors\n that contain the variable Z to be eliminated\n get the scope of variables from the factors that contain variable Z \"\"\"\n for fi in factorList:\n if z in fi.getVar().tolist():\n useFactors.append(fi)#the ith factor is being currently involved in elimination\n scope=list(set.union(set(scope), fi.getVar().tolist() ))\n else:\n unusedFactors.append( fi )\n\n \n \"\"\" psiFactor is an intermediate factor, prior to max-marginalization \"\"\"\n psiFactor= ComputeJointDistribution ( useFactors )\n tauFactor=FactorMaxMarginalization( psiFactor,[z] )\n\n \n \"\"\" we return tuple consisting of\n 1. a list factors that are unused, plus the result of max-marginal\n such that the variable z is not eliminated from the list of factors remaining.\n 2. For traceback, we return the intermediate factor generated in the process of eliminating\n variable z. \"\"\"\n return unusedFactors + [ tauFactor ], psiFactor\n\ndef TracebackMAP(FI, Z):\n \"\"\"We are back-tracing to the most probable assingments here ...\n See psuedo-code in Koller and Friedman page 557\n\n In order to return the most probable assignment from MaxProductVE\n we take in a list of intermediate factors, FI, that were generated in the process\n of MaxProductVE. Z is the same elimination ordering used as MaxProductVE.\n We traceback our steps by iterating in reverse order the elimination ordering Z.\n\n Following arguments section 13.2.2 page 558 in Koller and Friedman, as one eliminates\n variables you cannot determine their maximizing value. But you can compute their 'conditional'\n maximizing value - their max value given the other variables not eliminate yet. Once\n the last variable is eliminated, we can traceback to get the maximzing value of the remaining\n variables. Hence the reason for iterating thru the elimination ordering Z in reverse order\n \n Returns a python dictionary with key: variable value: variable assignment in the MAP\"\"\"\n\n z_i=Z[-1]\n f_i=FI[-1]\n Z=Z[:-1]\n FI=FI[:-1]\n\n #print 'z_i:', z_i\n #print 'f_i:', f_i\n\n values=f_i.getVal().tolist()\n fidx= IndexToAssignment( np.arange( np.prod( f_i.getCard() ) ), f_i.getCard() )\n maxidx=values.index(max(values))\n \n\n maxed_vars={} #key variable, value: max assignment value\n\n #print 'variable: ', z_i\n #print 'max value: ', fidx.flatten()[maxidx]\n maxed_vars[z_i]=int(fidx.flatten()[maxidx])\n #print maxed_vars\n #print\n for (z, f) in itertools.izip( reversed(Z), reversed(FI) ):\n #print z\n #print f\n #print 'setdiff: ', np.setdiff1d( f_i.getVar(), [z]).tolist()\n \n variables=np.setdiff1d( f_i.getVar(), [z]).tolist()\n evidence=[ [v, maxed_vars[v] ] for v in variables]\n #print 'Evidence: ',evidence\n f=ObserveEvidence( [f], np.matrix(evidence) )[0]\n #print f\n values=f.getVal().tolist()\n fidx= IndexToAssignment( np.arange( np.prod( f.getCard() ) ), f.getCard() )\n #print fidx\n #print max(values)\n maxidx=values.index(max(values))\n\n maxed_vars[z]=int(fidx.flatten()[maxidx])\n #print 'variable: ', z\n #print 'max value: ', fidx.flatten()[maxidx]\n \n #print\n #print maxed_vars\n return maxed_vars\n\n\n\ndef MaxDecoding ( F ):\n \"\"\" F is a list of max marginal factors passed in. The factors have a variable scope over a single variable only\n So no backtracing is inovlved, we just get the index of the highest number in the value array.\n The code here is based on https://github.com/indapa/PGM/blob/master/Prog4/MaxDecoding.m \"\"\"\n ASSIGNMENTS=[]\n for f in F:\n values=f.getVal().tolist()\n ASSIGNMENTS.append ( values.index( max(values) ) )\n return ASSIGNMENTS\n \ndef MaxDecodingNonUniq ( F ):\n \"\"\" F is a list of max marginal factors passed in. We don't assume that there is a unique\n max value. So we get the indices of the non-uniq max value as a tuple and add it to \"\"\"\n ASSIGNMENTS=[]\n for f in F:\n values=f.getVal().tolist()\n maxvalue=max(values)\n \"\"\" if the maxvalue is duplicated, we get the indices of where it resides in the value array \"\"\"\n if common.isMaxDuplicated(values):\n dup_indices_list=[dup for dup in sorted(common.list_duplicates(values)) ]\n dup_values= [ x for (x, y) in dup_indices_list ]\n dup_indices= [ y for (x, y) in dup_indices_list ]\n non_uniq_max_indices=tuple(dup_indices [ dup_values.index(maxvalue) ])\n ASSIGNMENTS.append ( non_uniq_max_indices )\n else:\n ASSIGNMENTS.append( values.index(maxvalue))\n return ASSIGNMENTS\n\n\ndef posterior_genotypes_values(factorList, ALPHABET,samplenames,bedstring,fh):\n \"\"\" given the factorlist of posterior marginals, the genotype alphabet, samplenames,bedstring position,\n and prettybase file handle, print to file the posterior marginals for all 10 possibel genotypes\n for each sample. \"\"\"\n genotype_factors=factorList[0:len(samplenames)]\n sample_factorObj_zip=zip(samplenames, genotype_factors)\n #print bedstring\n for sample, f in sample_factorObj_zip:\n #print sample, \": \"\n #values=f.getVal().tolist()\n \n #prob_val_normalized=( lognormalize( f.getVal() ) )\n prob_val_normalized=f.getVal()/np.sum(f.getVal())\n #print sample\n #val=f.getVal()\n #print np.sum(val)\n #print val/np.sum(val)\n #pdb.set_trace()\n #print prob_val_normalized.tolist()\n #genotype_probZip=zip(ALPHABET,values)\n posteriors=[]\n #print prob_val_normalized.tolist()\n for posterior_val in prob_val_normalized.tolist():\n #for posterior_val in values:\n posteriors.append(str(posterior_val))\n #posteriors.append(str(round(posterior_val,5) ))\n #print posteriors\n gstring=\"\\t\".join(posteriors)\n #print gstring\n outstring=\"\\t\".join([bedstring, sample,gstring])\n \n fh.write(outstring + \"\\n\")\n \n\n \ndef MaxProductVE ( Z, F ):\n\n \"\"\" A wrapper function for MaxProductEliminateVar\n sum-product variable elimination based on pseudocode algorithm 9.1 in Koller and Friedman\n We are giving a list of variables to eliminate in Z (in the order we want to\n elimiinate them) and a list of factors F\n eliminate each one getting getting the marginal distribution of the last variable in the list\n Z.\n\n Returns the probabliity of the MAP configuration as well as the variable assignments of the MAP configuration\"\"\"\n intermediateMaxFactors=[]\n for z in Z:\n (F, intermediateFactor)=MaxProductEliminateVar(z, F)\n intermediateMaxFactors.append ( intermediateFactor )\n \n #intermediateMaxFactors.append ( intermediateFactor )\n \n #MaxDecodingBT( intermediateMaxFactors, Z )\n bt_results=TracebackMAP( intermediateMaxFactors, Z )\n return (reduce(lambda x, y: FactorProduct(x,y), F), bt_results)\n\n\ndef FactorSum ( A, B):\n \"\"\" FactorSum Computes the sum of two factors.\n% Similiar to FactorProduct\n We would use this in log space where multiplication becomes addition\n% Based on the code here https://github.com/indapa/PGM/blob/master/Prog4/FactorSum.m \"\"\"\n\n\n C=Factor()\n\n #check for empty factors\n if len( A.getVar() ) == 0 :\n sys.stderr.write(\"A factor is empty!\\n\")\n return B\n if len( B.getVar() ) == 0:\n sys.stderr.write(\"B factor is empty!\\n\")\n return A\n\n\n #check of variables that in both A and B have the same cardinality\n #print 'A.getVar(): ', A.getVar()\n #print 'B.getVar(): ',B.getVar()\n #setA= set( A.getVar() )\n #setB= set( B.getVar() )\n #intersect=np.array( list( setA.intersection(setB)))\n intersect=np.intersect1d( A.getVar(), B.getVar() ).tolist()\n #print \"Intersection of variables in FactorProduct \", intersect\n #print \"A var: \", A.getVar()\n #print \"B var: \", B.getVar()\n\n #if the intersection of variables in the two factors\n #is non-zero, then make sure they have the same cardinality\n if len(intersect) > 0:\n #iA=np.nonzero(intersect - A.getVar()==0)[0].tolist() # see this http://stackoverflow.com/a/432146, return the index of something in an array?\n iA=getIndex( A.getVar(), intersect )\n #print \"iA: \", iA\n #iB=np.nonzero(intersect - B.getVar()==0)[0].tolist()\n iB = getIndex ( B.getVar(), intersect )\n #print \"iB: \", iB\n\n # check to see if any of the comparisons in the array resulting from of a.getCard()[iA] == b.getCard()[iB]\n # are all False. If so print an error and exit\n if len( np.where( A.getCard()[iA].all() == B.getCard()[iB].all() ==False)[0].tolist() ) > 0:\n sys.stderr.write(\"dimensionality mismatch in factors!\\n\")\n sys.exit(1)\n\n #now set the variables of C to the union of variables in factors A and B\n #print 'setA ' ,setA\n #print 'setB ', setB\n #print list( setA.union(setB) )\n C.setVar( np.union1d ( A.getVar(), B.getVar() ).tolist() )\n #C.setVar ( list( setA.union(setB) ) )\n mapA=isMember(A.getVar(), C.getVar() )\n mapB=isMember(B.getVar(), C.getVar() )\n\n\n\n #Set the cardinality of variables in C\n C.setCard( np.zeros( len(C.getVar())).tolist() )\n C.getCard()[mapA]=A.getCard()\n C.getCard()[mapB]=B.getCard()\n\n #intitialize the values of the factor C to be zero\n C.setVal( np.zeros(np.prod(C.getCard())).tolist() )\n\n #some helper indices to tell what indices of A and B values to multiply\n assignments=IndexToAssignment( np.arange(np.prod(C.getCard())), C.getCard() ) #get the assignment of values of C\n indxA=AssignmentToIndex( assignments[:,mapA], A.getCard())-1 # re-arrange the assignment of C, to what it would be in factor A\n indxB=AssignmentToIndex( assignments[:,mapB], B.getCard())-1 # re-arange the assignment of C to what it would be in factorB\n #print 'indxA ', indxA\n #print 'indxB ', indxB\n\n\n c_val=A.getVal()[indxA.flatten().tolist()] + B.getVal()[indxB.flatten().tolist()] #now that we have the index into A.val and B.val vector, multiply them to factor product\n C.setVal ( c_val.tolist() )\n\n return C\n\n\ndef LogFactor( F ):\n \"\"\" return a factor whose values are the natural log of the orginal factor F \"\"\"\n \n return Factor ( F.getVar().tolist(), F.getCard().tolist(), np.log ( F.getVal() ).tolist(), F.getName() )\n\n\ndef ExpFactorNormalize ( logF ):\n \"\"\" exponentiate a factor to probablity space from log space\n Since moving from log space to probablity space incures a decrease in dynamic range,\n factors should be normalized before applying the transform. One trick we use here is\n to shift every entry by the maximum entry. For example: phi[i] = exp{logPhi[i] -c}\n The value of c is max(logPhi). This type of transformation ensures the resulting factor\n has a maximum entry of 1 and prevents overflow. See page 360 of Koller and Friedman text\"\"\"\n logPhi=logF.getVal()\n #phi=lognormalize( logPhi )\n phi=np.exp(logPhi-np.max(logPhi) )\n logF.setVal( phi )\n return logF\n\ndef ExpFactor( logF ):\n \"\"\" similiar to above, but don't normalize \"\"\"\n logPhi=logF.getVal()\n phi=np.exp( logPhi )\n logF.setVal( phi )\n return logF\n\n\n\n\n\n\ndef FactorDiv ( A, B):\n \"\"\" FactorProduct Computes the dividend of two factors.\n% Similiar to Factor Product, but if we divide 0/0, return 0\n see page 365 in Koller and Friedman for definition of FactorDivision \"\"\"\n\n #print \"A: \", A\n #print \"====\"\n #print \"B: \", B\n C=Factor()\n\n #check for empty factors\n if len( A.getVar() ) == 0 :\n sys.stderr.write(\"A factor is empty!\\n\")\n return B\n if len( B.getVar() ) == 0:\n sys.stderr.write(\"B factor is empty!\\n\")\n return A\n\n\n #check of variables that in both A and B have the same cardinality\n #print 'A.getVar(): ', A.getVar()\n #print 'B.getVar(): ',B.getVar()\n #setA= set( A.getVar() )\n #setB= set( B.getVar() )\n #intersect=np.array( list( setA.intersection(setB)))\n intersect=np.intersect1d( A.getVar(), B.getVar() ).tolist()\n #print \"Intersection of variables in FactorProduct \", intersect\n #print \"A var: \", A.getVar()\n #print \"B var: \", B.getVar()\n\n #if the intersection of variables in the two factors\n #is non-zero, then make sure they have the same cardinality\n if len(intersect) > 0:\n #iA=np.nonzero(intersect - A.getVar()==0)[0].tolist() # see this http://stackoverflow.com/a/432146, return the index of something in an array?\n iA=getIndex( A.getVar(), intersect )\n #print \"iA: \", iA\n #iB=np.nonzero(intersect - B.getVar()==0)[0].tolist()\n iB = getIndex ( B.getVar(), intersect )\n #print \"iB: \", iB\n\n # check to see if any of the comparisons in the array resulting from of a.getCard()[iA] == b.getCard()[iB] \n # are all False. If so print an error and exit\n if len( np.where( A.getCard()[iA].all() == B.getCard()[iB].all() ==False)[0].tolist() ) > 0:\n sys.stderr.write(\"dimensionality mismatch in factors!\\n\")\n sys.exit(1)\n\n #now set the variables of C to the union of variables in factors A and B\n #print 'setA ' ,setA\n #print 'setB ', setB\n #print list( setA.union(setB) )\n C.setVar( np.union1d ( A.getVar(), B.getVar() ).tolist() )\n #C.setVar ( list( setA.union(setB) ) )\n mapA=isMember(A.getVar(), C.getVar() )\n mapB=isMember(B.getVar(), C.getVar() )\n\n \n\n #Set the cardinality of variables in C\n C.setCard( np.zeros( len(C.getVar())).tolist() )\n C.getCard()[mapA]=A.getCard()\n C.getCard()[mapB]=B.getCard()\n\n #intitialize the values of the factor C to be zero\n C.setVal( np.zeros(np.prod(C.getCard())).tolist() )\n\n #some helper indices to tell what indices of A and B values to multiply\n assignments=IndexToAssignment( np.arange(np.prod(C.getCard())), C.getCard() ) #get the assignment of values of C\n indxA=AssignmentToIndex( assignments[:,mapA], A.getCard())-1 # re-arrange the assignment of C, to what it would be in factor A\n indxB=AssignmentToIndex( assignments[:,mapB], B.getCard())-1 # re-arange the assignment of C to what it would be in factorB\n \n numerator=A.getVal()[indxA.flatten().tolist()]\n denominator=B.getVal()[indxB.flatten().tolist()]\n \n #print numerator\n #print denominator\n #print zip(numerator, denominator)\n val= map( lambda x: common.zerodiv_tuple(x), zip(numerator,denominator) )\n #print val\n C.setVal ( val )\n \n return C\n\n\n\n\n\n\ndef variableStride( f ):\n \"\"\" given a Factor object f, calculate its variable stride in value array of the factor \"\"\"\n strides=[]\n iF=IndexToAssignment ( np.arange(np.prod(f.getCard()) ), f.getCard() )\n variables=f.getVar()\n cardinalties=f.getCard()\n \n \n for i in range(0, len(variables) ):\n #assignment_slice=iF[:,i]\n #var_card=cardinalties[i]\n #print variables[i], cardinalties[i]\n curr_stride=iF[:,i]\n if cardinalties[i] > 1:\n stride=np.where(curr_stride==2)\n else:\n stride=0\n strides.append(stride)\n continue\n #print variables[i], cardinalties[i], stride[0][0]\n strides.append( stride[0][0] )\n #print \n \n return strides\n\n\n\n\ndef IndexOfAssignment( f, strides, assignment):\n \"\"\" given a Factor object f and the strides of each of the variables of f and \n a list of assignments of those variables, return the position in the value array of that assignment\n see page 358 box 10.a in Friedman and Koller text on how efficiently map a particular variable\n assignment to the and index in the value array of a factor\n \"\"\"\n \n idx=0\n \n #cardinalities=f.getCard()\n for (ass, stride) in zip( assignment, strides):\n #print ass, stride\n idx +=(ass *stride)\n\n return idx\n\n\n\n\n\n" } ]
17
JulinaM/Deep-Networks-for-Graph-Representation
https://github.com/JulinaM/Deep-Networks-for-Graph-Representation
1b698ef96b612d95e48e10051e215d5068c637f2
30430344c198117a8fe79f841d2fe8ac5a12206a
b27f405120762a5f32f3eb10ed739d0298ea5c98
refs/heads/main
2023-03-29T21:48:40.396374
2021-04-07T08:09:47
2021-04-07T08:09:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5860384702682495, "alphanum_fraction": 0.5926457643508911, "avg_line_length": 30.926605224609375, "blob_id": "2a140f398ddbf1ffedc9cb406ee87230e7013aaf", "content_id": "bde05c68ab394d7bcd7f82600fa9126232e7cc22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3481, "license_type": "no_license", "max_line_length": 141, "num_lines": 109, "path": "/src/autoencoder.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.autograd as ag\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchsummary import summary\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom scipy.optimize import minimize, NonlinearConstraint\nimport random\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\n\nfrom math import sqrt\nimport time\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print('%r %2.2f ms' % \\\n (method.__name__, (te - ts) * 1000))\n return result\n return timed\n\nclass GaussianNoise(nn.Module):\n def __init__(self, stddev):\n super().__init__()\n self.stddev = stddev\n\n def forward(self, din):\n if self.training:\n return din + torch.randn(din.size()) * self.stddev\n return din\n\nclass DropoutNoise(nn.Module):\n def __init__(self, p):\n super().__init__()\n self.p = p\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n def forward(self, x):\n t = torch.rand(x.size()).to(self.device)\n a = t > self.p\n \n return(x*a)\n\nclass BasicBlock(nn.Module):\n def __init__(self, input_shape, n_neurons, activation='relu', noise=None, noise_arg=None):\n super().__init__()\n self.n_neurons = n_neurons\n self.input_shape = input_shape\n \n self.has_noise = False\n \n if noise=='gaussian':\n self.has_noise = True\n self.noise = GaussianNoise(noise_arg)\n elif noise=='dropout':\n self.has_noise = True\n self.noise = DropoutNoise(noise_arg)\n \n self.dense_layer = nn.Linear(self.input_shape, self.n_neurons)\n \n activations_map = {'relu':nn.ReLU, 'tanh':nn.Tanh, 'sigmoid':nn.Sigmoid}\n self.activation = activations_map[activation]()\n\n def forward(self, features):\n x=features\n \n if self.has_noise:\n x = self.noise(x)\n\n x = self.dense_layer(features)\n x = self.activation(x)\n \n return(x)\n\nclass SDAE(nn.Module):\n def __init__(self, features, input_shape, hidden_layers, activation='relu', last_activation='relu', noise_type='dropout', noise_arg=0.2):\n super().__init__()\n self.features = features\n\n self.inputs = [input_shape] + hidden_layers\n \n n = len(self.inputs)\n encoder_units = [BasicBlock(self.inputs[0], self.inputs[1], activation=activation, noise=noise_type, noise_arg=noise_arg)]\n encoder_units.extend([BasicBlock(self.inputs[i], self.inputs[i+1], activation=activation) for i in range(1, n-1)])\n \n self.encoder = nn.Sequential(*encoder_units)\n \n decoder_units = [BasicBlock(self.inputs[i], self.inputs[i-1], activation=activation) for i in range(n-1,1,-1)]\n decoder_units.append(BasicBlock(self.inputs[1], self.inputs[0], activation=last_activation))\n \n self.decoder = nn.Sequential(*decoder_units)\n \n def forward(self, idx):\n encoded = self.encoder(self.features[idx])\n \n decoded = self.decoder(encoded)\n \n return(decoded)\n\n" }, { "alpha_fraction": 0.5990557670593262, "alphanum_fraction": 0.6121699810028076, "avg_line_length": 28.183673858642578, "blob_id": "1e6dbeafe604000312ac7da086d6def3c6682800", "content_id": "beb7dc830d76e123e5347a2c87070a78127e9b55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5719, "license_type": "no_license", "max_line_length": 168, "num_lines": 196, "path": "/src/dgnr.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.autograd as ag\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchsummary import summary\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom scipy.optimize import minimize, NonlinearConstraint\nimport random\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\n\nfrom math import sqrt\nimport time\n\nfrom autoencoder import SDAE\nfrom utils import *\nfrom random_graphs import connected_components\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print('%r %2.2f ms' % \\\n (method.__name__, (te - ts) * 1000))\n return result\n return timed\n\ndef normalize(M):\n #Put diagonal elements to 0\n M = M - np.diag(np.diag(M))\n \n #Normalizing by row\n D_inv = np.diag(np.reciprocal(np.sum(M,axis=0)))\n M = np.dot(D_inv, M)\n\n return M\n\ndef zero_one_normalisation(matrix, e=1e-5):\n M = np.max(matrix)\n m = np.min(matrix)\n r = (matrix-m) / (M-m + e)\n return(r)\n\ndef PCO(A, K, alpha):\n \"\"\"\n For a graph represented by its adjacency matrix *A*, computes the co-occurence matrix by random \n surfing on the graph with returns. 1-alpha is the probability to make, at each step, a return \n to the original step.\n \"\"\"\n A=np.array(A, dtype=float)\n \n #The adjacency matrix A is first normalized\n A=normalize(A) \n \n n=A.shape[0]\n \n I=np.eye(n)\n \n P=I\n M=np.zeros((n, n))\n \n for i in range(K):\n P = alpha*np.dot(P,A) + (1-alpha)*I\n M = M+P\n \n return(M)\n\ndef PPMI(M):\n \"\"\"Computes the shifted positive pointwise mutual information (PPMI) matrix\n from the co-occurence matrix (PCO) of a graph.\"\"\"\n \n M=normalize(M)\n cols = np.sum(M, axis=0)\n rows = np.sum(M, axis=1).reshape((-1,1))\n s = np.sum(rows)\n \n P = s*M\n P /= cols\n P /= rows\n \n #P[np.where(P<0)] = 1.0\n P = np.log(P)\n\n #To avoid NaN when applying log\n P[np.isnan(P)] = 0.0\n P[np.isinf(P)] = 0.0\n P[np.isneginf(P)] = 0.0\n P[np.where(P<0)] = 0.0\n \n return(P)\n\ndef sdae(input_net, input_number, hidden_layers, n_epochs=100, batch_size=1, activation='sigmoid', last_activation='sigmoid'):\n #hidden_layers=[500,200,100]\n #input_numer=784\n # use gpu if available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n normalized_input_net = zero_one_normalisation(input_net)\n tensor_net = torch.Tensor(normalized_input_net).to(device)\n N = tensor_net.size()[0]\n idx = torch.arange(N).long()\n\n model = SDAE(tensor_net, input_number, hidden_layers, activation=activation, last_activation=last_activation).to(device)\n\n # create an optimizer object\n # Adam optimizer with learning rate 1e-3\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n # mean-squared error loss\n criterion = nn.MSELoss()\n\n #summary(model, (input_number,))\n \n train = torch.utils.data.TensorDataset(idx, idx)\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=False)\n\n for epoch in range(n_epochs): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n idx, _ = data\n \n idx=idx.long().to(device)\n inputs=tensor_net[idx]\n\n #inputs=torch.flatten(inputs)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(idx)\n \n loss = criterion(outputs, inputs)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n\n print('[%d, %5d] loss: %.5f' %\n (epoch + 1, i + 1, running_loss / input_number))\n\n print('Finished Training')\n \n return(model, train_loader, tensor_net)\n\ndef get_embeddings(train_loader, N, model, size_encoded=100):\n trainiter = iter(train_loader)\n embeddings = np.zeros((N, size_encoded))\n\n for i,q in enumerate(trainiter):\n idx = q[0]\n for j in idx:\n embedded = model.encoder(model.features[j]).cpu().detach().numpy()\n embeddings[j,:] = embedded.reshape((size_encoded,))\n \n return(embeddings)\n \n@timeit\ndef dngr_pipeline(network, N, hidden_layers, K=10, alpha=0.2, n_epochs=100, batch_size=1, activation='sigmoid', last_activation='sigmoid'):\n ppmi_net = PPMI(PCO(network, K, alpha))\n model, train_loader, tensor_net = sdae(ppmi_net, N, hidden_layers, n_epochs=n_epochs, batch_size=batch_size, activation=activation, last_activation=last_activation)\n \n print(\"[*] Visualizing an example's output...\")\n trainiter = iter(train_loader)\n idx, _ = trainiter.next()\n\n print(tensor_net[idx])\n print(model(idx))\n\n print(mean_squared_error(tensor_net[idx].cpu().detach().numpy(), model(idx).cpu().detach().numpy()))\n \n print(\"[*] Getting the embeddings and visualizing t-SNE...\")\n embeddings=get_embeddings(train_loader, N, model, size_encoded=hidden_layers[-1])\n \n cmps = connected_components(network)\n targets = [0 for i in range(N)]\n\n for i, cmp in enumerate(cmps):\n for n in cmp:\n targets[n] = i\n \n visualize_TSNE(embeddings, targets)\n \n return(embeddings, model, train_loader)" }, { "alpha_fraction": 0.5961456298828125, "alphanum_fraction": 0.6111348867416382, "avg_line_length": 23.851064682006836, "blob_id": "9bd7d5e5b68e0a57b356adcb319bc583c5f5883b", "content_id": "bb474ab01a8c3c3e959aa88f93564cd6616d0e5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2335, "license_type": "no_license", "max_line_length": 98, "num_lines": 94, "path": "/src/random_graphs.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.autograd as ag\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchsummary import summary\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom scipy.optimize import minimize, NonlinearConstraint\nimport random\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\n\nfrom math import sqrt\nimport time\n\ndef random_graph(p, size=(100,100)):\n return(np.array([[int(random.random() < p) for i in range(size[1])] for j in range(size[0])]))\n\ndef random_undirected_graph(p, size=(100,100)):\n graph = random_graph(p, size=size)\n graph[np.arange(size[0]),np.arange(size[1])]=0 #nullify the diagonal\n graph = np.maximum(graph, graph.T) #make it symmetric\n \n return(graph)\n\ndef random_graph_with_fixed_components(p, nodes_per_component=[50,50]):\n nodes_per_component = np.array(nodes_per_component)\n n_nodes = nodes_per_component.sum()\n n_cmp = nodes_per_component.shape[0]\n \n graph = np.zeros((n_nodes, n_nodes))\n nodes = np.arange(n_nodes)\n np.random.shuffle(nodes)\n \n cmp_nodes = []\n acc=0\n \n for i in range(n_cmp):\n cmp = nodes[acc:(acc+nodes_per_component[i])]\n cmp_nodes.append(cmp)\n acc += nodes_per_component[i]\n \n size = cmp.shape[0]\n submatrix=np.ix_(cmp,cmp)\n\n graph[submatrix] = random_undirected_graph(p, (size,size))\n \n return(graph)\n\ndef neighbors(adj, i):\n return (np.where(adj[i,:]==1)[0])\n\ndef dfs(adj, i):\n n = adj.shape[0] #number of nodes in the graph\n visited = [False for k in range(n)]\n \n stack = [i]\n \n while(len(stack)>0):\n k = stack.pop()\n neighborhood = neighbors(adj, k)\n visited[k] = True\n \n for n in neighborhood:\n if not visited[n]:\n stack.append(n)\n \n return(np.where(visited))\n\ndef connected_components(adj):\n n = adj.shape[0]\n \n visited = np.array([0 for k in range(n)])\n s = np.sum(visited)\n \n comp=[]\n \n while s<n:\n i = np.where(1-visited)[0][0]\n \n cmp = dfs(adj, i)\n visited[cmp] = 1\n s = np.sum(visited)\n \n comp.append(list(cmp[0]))\n \n return(np.array(comp))\n\nif __name__ == '__main__':\n pass" }, { "alpha_fraction": 0.5650569796562195, "alphanum_fraction": 0.5818243026733398, "avg_line_length": 25.16666603088379, "blob_id": "bef4e8073443578ea40a6c86402a1a4e2b8cf5b1", "content_id": "f1a2ab4b4cb3f6e4e80227602e20b69bfd0eb937", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2982, "license_type": "no_license", "max_line_length": 100, "num_lines": 114, "path": "/src/utils.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport time\nfrom sklearn.manifold import TSNE\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix, average_precision_score\n\nfrom scipy.spatial.distance import pdist, squareform\n\ndef compute_pr_auc(P,S):\n y_pred = S.clone().detach()\n\n y_pred[y_pred < 0.] = 0.\n y_pred[y_pred > 1.] = 1.\n \n y_pred = S.cpu().detach().numpy().reshape((-1,))\n y_true = P.cpu().detach().numpy().reshape((-1,))\n\n return(average_precision_score(y_true, y_pred))\n\ndef compute_confusion_matrix(y_true, y_pred):\n y_pred = y_pred.clone().detach()\n\n y_pred[y_pred >= 0.5] = 1.\n y_pred[y_pred < 0.5] = 0.\n\n y_pred = y_pred.cpu().detach().numpy().reshape((-1,))\n y_true = y_true.cpu().detach().numpy().reshape((-1,))\n\n return(confusion_matrix(y_true, y_pred))\n\ndef compute_auc(P,S):\n y_pred = S.clone().detach()\n\n y_pred[y_pred < 0.] = 0.\n y_pred[y_pred > 1.] = 1.\n\t\n y_pred = S.cpu().detach().numpy().reshape((-1,))\n y_true = P.cpu().detach().numpy().reshape((-1,))\n\n return(roc_auc_score(y_true, y_pred))\n\ndef compute_accuracy(y_true, y_pred):\n y_pred = y_pred.clone().detach()\n\n y_pred[y_pred >= 0.5] = 1.\n y_pred[y_pred < 0.5] = 0.\n\n y_pred = y_pred.cpu().detach().numpy().reshape((-1,))\n y_true = y_true.cpu().detach().numpy().reshape((-1,))\n\n return(accuracy_score(y_true, y_pred))\n\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print('%r %2.2f ms' % \\\n (method.__name__, (te - ts) * 1000))\n return result\n return timed\n\ndef visualize_TSNE(embeddings,target):\n tsne = TSNE(n_components=2, init='pca',\n random_state=0, perplexity=30)\n data = tsne.fit_transform(embeddings)\n #plt.figure(figsize=(12, 6))\n plt.title(\"TSNE visualization of the embeddings\")\n plt.scatter(data[:,0],data[:,1],c=target)\n\n return\n\ndef compute_similarity(net):\n M = 1 - pdist(net, metric='Jaccard')\n M = squareform(M)\n M = M + np.eye(*M.shape)\n M[np.isnan(M)] = 0.\n\n return(M)\n\ndef readnet(net_path):\n\treturn(np.genfromtxt(net_path,delimiter='\\t'))\n\ndef extract_samples(K, tensor):\n \"\"\"\n Extract K random samples from tensor.\n\n Parameters\n ----------\n K : int\n Wanted number of samples\n tensor : tensor of any size\n tensor from which extract the samples \n (the first dimension will be considered as the sample dim)\n\n Returns\n -------\n samples : tensor of same type as the tensor parameter of shape (K,...)\n the K samples extracted from tensor\n\n \"\"\"\n perm = torch.randperm(tensor.size(0))\n idx = perm[:K]\n samples = tensor[idx]\n \n return (samples)" }, { "alpha_fraction": 0.5544765591621399, "alphanum_fraction": 0.5700565576553345, "avg_line_length": 29.828947067260742, "blob_id": "1feb19138e66607907ba3391f057dd6b0549b984", "content_id": "24b18bc730f47568c1cd4e73dceaf10b06d04f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9371, "license_type": "no_license", "max_line_length": 149, "num_lines": 304, "path": "/src/pu_learner.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.autograd as ag\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchsummary import summary\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom scipy.optimize import minimize, NonlinearConstraint\nimport random\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\n\nfrom math import sqrt\nimport time\n\nfrom autoencoder import *\nfrom utils import *\nfrom random_graphs import *\nfrom dgnr import *\n\ndef old_pu_learning(x, y, P, k = 7, alpha = 0.2, gamma = 0.3, maxiter=1000):\n Fd = x.shape[1]\n Ft = y.shape[1]\n Nd = x.shape[0]\n Nt = y.shape[0]\n \n #Number of variables\n N_variables = Fd * k + Ft * k\n \n print(\"Number of variables:\", N_variables) \n print(\"Finding positive and negative examples...\")\n \n Ipos = np.where(P==1.)\n Ineg = np.where(P==0.)\n\n print(\"Number of positive examples:\", Ipos[0].shape[0])\n print(\"Number of negative/unlabelled examples:\", Ineg[0].shape[0])\n \n alpha_rac = sqrt(alpha)\n \n @timeit\n def objective(z):\n H = z[:Fd*k].reshape((Fd,k))\n W = z[-Ft*k:].reshape((Ft,k))\n \n M = P - (x @ H @ np.transpose(W) @ np.transpose(y))\n \n M[Ineg] *= alpha_rac\n \n L = torch.sum(M**2) + gamma/2 * (np.sum(H**2, axis=(0,1)) + np.sum(W**2, axis=(0,1)))\n print(L)\n\n return(L)\n \n def constraint(z):\n H = z[:Fd*k].reshape((Fd,k))\n W = z[-Ft*k:].reshape((Ft,k))\n S = x @ H @ np.transpose(W) @ np.transpose(y)\n S = S.reshape((-1,))\n \n return(S)\n \n nlc = NonlinearConstraint(constraint, np.zeros(Nt*Nd), np.ones(Nt*Nd))\n\n print(\"Going to minimize... Maximum number of iterations:\", maxiter)\n res=minimize(objective, x0 = np.random.randn(N_variables), options={'maxiter':maxiter, 'disp':'True'}, constraints=[nlc], method='trust-constr')\n \n print(\"\\n\\nSolved.\")\n \n z=res['x']\n H = z[:Fd*k].reshape((Fd,k))\n W = z[-Ft*k:].reshape((Ft,k))\n\n print(\"Now computing Z=HW^T, then will compute S...\")\n \n S = x @ H @ np.transpose(W) @ np.transpose(y)\n \n return(S)\n\ndef pu_learning(x, y, P, pos_train_mask, neg_train_mask, k = 7, alpha = 0.2, gamma = 0.3, maxiter=1000, lr=0.1):\n Fd = x.shape[1]\n Ft = y.shape[1]\n Nd = x.shape[0]\n Nt = y.shape[0]\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n #Number of variables\n N_variables = Fd * k + Ft * k\n \n print(\"Number of variables:\", N_variables) \n print(\"Finding positive and negative examples...\")\n \n P = torch.Tensor(P).to(device)\n x = torch.Tensor(x).to(device)\n y = torch.Tensor(y).to(device)\n \n x_norm = torch.linalg.norm(x)\n y_norm = torch.linalg.norm(y)\n\n Ipos = pos_train_mask\n Ineg = neg_train_mask\n train_mask = torch.logical_or(Ipos,Ineg)\n\n print(\"Number of positive examples:\", P[Ipos].size()[0])\n print(\"Number of negative/unlabelled examples:\", P[Ineg].size()[0])\n \n alpha_rac = sqrt(alpha)\n \n def objective(H,W):\n M = P - torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))\n \n M[Ineg] *= alpha_rac\n M[~train_mask] = 0.\n \n L = torch.sum(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))\n\n return(L)\n \n def constraint(z):\n z = torch.Tensor(z).to(device)\n H = z[:Fd*k].resize(Fd,k)\n W = z[-Ft*k:].resize(Ft,k)\n\n S = torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))\n S = S.reshape((-1,)).cpu().detach().numpy()\n \n return(S)\n \n #nlc = NonlinearConstraint(constraint, np.zeros(Nt*Nd), np.ones(Nt*Nd))\n\n print(\"Going to minimize... Maximum number of iterations:\", maxiter)\n #res=minimize(objective, x0 = np.random.randn(N_variables), options={'maxiter':maxiter, 'disp':'True'}, constraints=[nlc], method='trust-constr')\n W = ag.Variable(torch.rand(Ft,k).to(device)/y_norm, requires_grad=True)\n H = ag.Variable(torch.rand(Fd,k).to(device)/x_norm, requires_grad=True)\n \n opt = torch.optim.Adam([H,W], lr=lr)\n\n for i in range(maxiter):\n # Zeroing gradients\n opt.zero_grad()\n\n # Evaluating the objective\n obj = objective(H,W)\n\n # Calculate gradients\n obj.backward() \n opt.step()\n if i%1000==0: \n S = torch.chain_matmul(x, H, torch.transpose(W,0,1), torch.transpose(y,0,1))\n auc = compute_auc(P[train_mask],S[train_mask])\n print(\"Objective:\", obj, \"(auc:\", auc, \")\")\n\n print(\"\\n\\nSolved.\")\n \n print(\"Now computing Z=HW^T, then will compute S...\")\n \n S = torch.chain_matmul(x, H, torch.transpose(W,0,1), torch.transpose(y,0,1))\n \n return(S, H, W)\n\ndef get_train_test_masks(P, train_size=0.8):\n P = torch.Tensor(P)\n Ipos = (P == 1.)\n Ineg = (P == 0.)\n\n pos_train = Ipos * torch.rand(P.size())\n pos_train[Ineg] = 1.\n pos_train = pos_train < train_size\n train_neg_rel_size = torch.sum(pos_train) / torch.sum(Ineg)\n \n neg_train = Ineg * torch.rand(P.size())\n neg_train[Ipos] = 1.\n neg_train = neg_train < train_neg_rel_size\n\n train = pos_train + neg_train\n test = ~train\n pos_test = torch.logical_and(test, Ipos)\n neg_test = torch.logical_and(test, Ineg)\n\n return(pos_train, neg_train, pos_test, neg_test)\n\ndef new_pu_learning(x, y, P, k = 7, alpha = 0.2, gamma = 0.3, maxiter=1000, lr=1e-3, batch_size=100):\n Fd = x.shape[1]\n Ft = y.shape[1]\n Nd = x.shape[0]\n Nt = y.shape[0]\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n #Number of variables\n N_variables = Fd * k + Ft * k\n N_examples = Nd*Nt\n \n P = torch.Tensor(P).to(device)\n cartesian_product = torch.Tensor([[u, v] for u in x for v in y]).to(device)\n \n print(\"Spliting train and test sets...\")\n pos_train, neg_train, pos_test, neg_test = get_train_test_masks(P, train_size=0.8)\n\n train_mask = torch.logical_or(pos_train, neg_train)\n\n flat_train_mask = train_mask.flatten()\n\n print(\"Building the train loader...\")\n train = torch.utils.data.TensorDataset(cartesian_product[flat_train_mask], P[train_mask].flatten())\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=False)\n\n print(\"Number of variables:\", N_variables)\n print(\"Finding positive and negative examples...\")\n \n x = torch.Tensor(x).to(device)\n y = torch.Tensor(y).to(device)\n \n print(\"Number of train examples:\", P.size()[0])\n print(\"Number of positive examples in train set:\", P[pos_train].size()[0])\n print(\"Number of negative/unlabelled examples in train set:\", P[neg_train].size()[0])\n \n alpha_rac = sqrt(alpha)\n \n def objective(a, H, W, b, P):\n M = P.clone().detach()\n for i in range(P.size()[0]):\n s = torch.chain_matmul(a[i].reshape(1,Fd), H, torch.transpose(W, 0, 1), torch.transpose(b[i].reshape(1,Ft), 0, 1))\n M[i] -= s.item()\n\n Ineg = (P==0.)\n\n M[Ineg] *= alpha_rac\n \n L = torch.mean(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))\n\n return(L)\n \n def objective_prime(a, H, W, b, P):\n M = P - torch.einsum('ij,jk,kl,li->i', a, H, torch.transpose(W, 0, 1), torch.transpose(b, 0, 1))\n \n Ineg = (P==0.)\n\n M[Ineg] *= alpha_rec\n L = torch.mean(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))\n\n return(L)\n\n def total_loss(x,H,W,y,P):\n M = P - torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))\n\n M[~train_mask] = 0.\n\n M[neg_train] *= alpha_rac\n\n L = torch.mean(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))\n\n return(L)\n\n print(\"Going to minimize... Maximum number of epochs:\", maxiter)\n \n W = torch.zeros(Ft,k).to(device)\n W /= torch.linalg.norm(W)*torch.linalg.norm(y) #for initial W and H not to be too big\n H = torch.zeros(Fd,k).to(device)\n H /= torch.linalg.norm(H)*torch.linalg.norm(x)\n\n W = ag.Variable(W, requires_grad=True)\n H = ag.Variable(H, requires_grad=True)\n \n opt = torch.optim.Adam([H,W], lr=lr)\n \n batch_num = N_examples//batch_size\n\n for k in range(maxiter):\n for i, batch in enumerate(train_loader, 0):\n # Zeroing gradients\n opt.zero_grad()\n\n inputs, labels = batch\n\n a, b = inputs[:,0,:], inputs[:,1,:]\n \n # Evaluating the objective\n obj = objective(a,H,W,b,labels)\n\n # Calculate gradients\n obj.backward() \n opt.step()\n \n if i%(batch_size//4) == 0:\n S = torch.chain_matmul(x, H, torch.transpose(W,0,1), torch.transpose(y,0,1))\n stdm = torch.std(S[train_mask])\n auc = compute_auc(P[train_mask],S[train_mask])\n loss = total_loss(x,H,W,y,P)\n\n print(\"[epoch=\",k,\", batch_num=\",i,\"]\",\"Objective: \", loss.item(), \"(standard deviation: \", stdm.item(), \", auc: \", auc.item(), \")\")\n\n print(\"\\n\\nSolved.\")\n \n print(\"Now computing Z=HW^T, then will compute S...\")\n \n S = torch.chain_matmul(x, H, torch.transpose(W,0,1), torch.transpose(y,0,1))\n \n return(S, H, W)" }, { "alpha_fraction": 0.5642725825309753, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 34.26258850097656, "blob_id": "4f9305052e62adc12225284a46cfae829924c69c", "content_id": "4707245c67a3e5d43eb3d30d2e3924f81b39cec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9802, "license_type": "no_license", "max_line_length": 171, "num_lines": 278, "path": "/src/pu_learning.py", "repo_name": "JulinaM/Deep-Networks-for-Graph-Representation", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom math import sqrt\nfrom sklearn.model_selection import KFold\n\nfrom autoencoder import *\nfrom utils import *\nfrom random_graphs import *\nfrom dgnr import *\n\ndef get_train_test_masks(P, train_size=0.8, test_balance=True):\n P = torch.Tensor(P)\n Ipos = (P == 1.)\n Ineg = (P == 0.)\n\n pos_train = Ipos * torch.rand(P.size())\n pos_train[Ineg] = 1.\n pos_train = pos_train < train_size\n train_neg_rel_size = torch.sum(pos_train) / torch.sum(Ineg)\n \n neg_train = Ineg * torch.rand(P.size())\n neg_train[Ipos] = 1.\n neg_train = neg_train < train_neg_rel_size\n\n train = pos_train + neg_train\n\n if not test_balance:\n test = ~train\n pos_test = torch.logical_and(test, Ipos)\n neg_test = torch.logical_and(test, Ineg)\n else:\n test = ~train\n pos_test = torch.logical_and(test,Ipos)\n neg_test = torch.logical_and(test, Ineg)\n\n num_pos_test = torch.sum(pos_test)\n test_neg_rel_size = num_pos_test / torch.sum(neg_test)\n \n neg_test = neg_test * torch.rand(P.size())\n neg_test[train] = 1.\n neg_test[Ipos] = 1.\n \n neg_test = neg_test < test_neg_rel_size\n\n return(pos_train, neg_train, pos_test, neg_test)\n\nclass CustomMSELoss(nn.Module):\n def __init__(self, alpha=0.2):\n super(CustomMSELoss, self).__init__()\n self.alpha=sqrt(alpha)\n\n def forward(self, inputs, targets):\n #comment out if your model contains a sigmoid or equivalent activation layer\n neg_mask = (targets == 0.)\n \n M = (targets-inputs)**2\n M[neg_mask] *= self.alpha\n\n loss_res = torch.mean(M)\n\n return loss_res\n\nclass PU_Learner(nn.Module):\n def __init__(self, k, Fd, Ft, X, Y, Nd, Nt, activation='identity', has_bias=False):\n super().__init__()\n self.k = k\n self.Fd = Fd\n self.Ft = Ft\n \n self.H = torch.nn.Parameter(torch.randn(Fd, k)*1/sqrt(k))\n self.W = torch.nn.Parameter(torch.randn(k, Ft)*1/sqrt(k))\n if not has_bias:\n self.b_x = torch.zeros(Nd)\n self.b_y = torch.zeros(Nt)\n else:\n self.b_x = torch.nn.Parameter(torch.randn(Nd))\n self.b_y = torch.nn.Parameter(torch.randn(Nt))\n\n self.X = (X - X.mean(0))/(X.std(0)+1e-7)\n self.Y = (Y - Y.mean(0))/(Y.std(0)+1e-7)\n\n activ_dict = {'sigmoid': torch.nn.Sigmoid, 'identity': torch.nn.Identity}\n self.activation = activ_dict[activation]()\n\n def forward(self, id_x, id_y):\n \tdot = torch.einsum('ij,jk,kl,li->i', self.X[id_x], self.H, self.W, torch.transpose(self.Y[id_y], 1, 0))\n\n \treturn(self.activation(dot + self.b_x[id_x] + self.b_y[id_y]))\n\ndef pu_learning(k, x, y, P, pos_train, neg_train, pos_test, neg_test, n_epochs=100, batch_size=100, lr=1e-3, alpha=1.0, gamma=0., activation='identity'):\n #hidden_layers=[500,200,100]\n #input_numer=784\n # use gpu if available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n Fd = x.shape[1]\n Ft = y.shape[1]\n Nd = x.shape[0]\n Nt = y.shape[0]\n \n #Number of variables\n N_variables = Fd * k + Ft * k\n\n cartesian_product = torch.Tensor([[i, j] for i in range(Nd) for j in range(Nt)]).long().to(device)\n\n x = torch.Tensor(x).to(device)\n y = torch.Tensor(y).to(device)\n\n P = torch.Tensor(P).to(device)\n\n train_mask = torch.logical_or(pos_train, neg_train)\n test_mask = torch.logical_or(pos_test, neg_test)\n flat_train_mask = train_mask.flatten()\n\n print(\"Building the train loader...\")\n train = torch.utils.data.TensorDataset(cartesian_product[flat_train_mask], P[train_mask].flatten())\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n \n batch_num = len(train_loader)\n\n print(\"Number of variables:\", N_variables)\n print(\"Finding positive and negative examples...\") \n\n print(\"Number of train examples:\", P[train_mask].size()[0])\n print(\"Number of positive examples in train set:\", P[pos_train].size()[0])\n print(\"Number of negative/unlabelled examples in train set:\", P[neg_train].size()[0])\n\n model = PU_Learner(k, Fd, Ft, x, y, Nd, Nt, activation=activation).to(device)\n\n # create an optimizer object\n # Adam optimizer with learning rate 1e-3\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=gamma)\n\n # mean-squared error loss\n criterion = CustomMSELoss(alpha=alpha)\n\n for epoch in range(n_epochs): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n \n inputs=inputs.to(device)\n \n id_x_batch, id_y_batch = inputs[:,0], inputs[:,1]\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(id_x_batch, id_y_batch)\n \n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n \n S = model.activation(torch.chain_matmul(model.X, model.H, model.W, torch.transpose(model.Y,0,1)) + model.b_x.unsqueeze(-1).expand(-1,Nt) + model.b_y.expand(Nd,-1))\n \"\"\"print(S)\n print(model.H)\n print(model.W)\n print(model.b_x)\n print(model.b_y)\"\"\"\n auc = compute_auc(P[train_mask].clone(), S[train_mask].clone())\n acc = compute_accuracy(P[train_mask].clone(), S[train_mask].clone())\n\n print('[%d] loss: %.3f, auc: %.3f, acc: %.3f' %\n (epoch + 1, running_loss / (batch_num), auc, acc))\n running_loss = 0.0\n\n print('Finished Training')\n print(\"Now computing Z=HW^T, then will compute S...\")\n \n print(x.size(), model.H.size(), model.W.size(), y.size())\n S = model.activation(torch.chain_matmul(model.X, model.H, model.W, torch.transpose(model.Y,0,1)) + model.b_x.unsqueeze(-1).expand(-1,Nt) + model.b_y.expand(Nd,-1))\n \n return(S, model.H, model.W, model.b_x, model.b_y, train_mask, test_mask)\n\ndef pu_learning_new(k, x, y, P, n_epochs=100, batch_size=100, lr=1e-5, train_size=0.8, alpha=1.0, gamma=0., test_balance=True, activation='identity'):\n print(\"Spliting train and test sets...\")\n pos_train, neg_train, pos_test, neg_test = get_train_test_masks(P, train_size=train_size, test_balance=test_balance)\n \n return(pu_learning(k, x, y, P, pos_train, neg_train, pos_test, neg_test, \n n_epochs=n_epochs, batch_size=batch_size, lr=lr, \n alpha=alpha, gamma=gamma, activation=activation))\n\ndef cross_validate(k, x, y, P, N_folds, n_epochs=100, batch_size=100, lr=1e-5, train_size=0.8, alpha=1.0, gamma=0., activation='identity'):\n pos_mask = (P==1)\n neg_mask = (P==0)\n \n N_pos = pos_mask.sum()\n N_neg = neg_mask.sum()\n \n N = min(N_pos, N_neg)\n \n pos_idx = pos_mask.nonzero()\n neg_idx = neg_mask.nonzero()\n \n pos_idx = extract_samples(N, pos_idx)\n neg_idx = extract_samples(N, neg_idx)\n \n kfold = KFold(n_splits=N_folds, shuffle=False)\n \n kfold_pos = list(kfold.split(pos_idx))\n kfold_neg = list(kfold.split(neg_idx))\n \n S_auc = 0\n S_acc = 0\n S_pr = 0\n \n S_sol = 0\n\n for fold in range(N_folds):\n print(\"Fold %d\" % (fold+1))\n print(\"Preparing the masks...\")\n pos_train_idx = pos_idx[kfold_pos[fold][0]]\n #print(pos_train_idx.max(0))\n pos_test_idx = pos_idx[kfold_pos[fold][1]]\n neg_train_idx = neg_idx[kfold_neg[fold][0]]\n neg_test_idx = neg_idx[kfold_neg[fold][1]]\n \n pos_train_mask = torch.zeros(pos_mask.size(),dtype=bool)\n pos_train_mask[pos_train_idx[:,0],pos_train_idx[:,1]] = True\n \n pos_test_mask = torch.zeros(pos_mask.size(),dtype=bool)\n pos_test_mask[pos_test_idx[:,0],pos_test_idx[:,1]] = True\n \n neg_train_mask = torch.zeros(neg_mask.size(),dtype=bool)\n neg_train_mask[neg_train_idx[:,0],neg_train_idx[:,1]] = True\n \n neg_test_mask = torch.zeros(neg_mask.size(),dtype=bool)\n neg_test_mask[neg_test_idx[:,0],neg_test_idx[:,1]] = True\n \n test_mask = torch.logical_or(pos_test_mask, neg_test_mask)\n \n print(\"Starting to learn...\")\n S, H, W, b_x, b_y, _, _ = pu_learning(k, x, y, P, \n pos_train_mask, neg_train_mask, pos_test_mask, neg_test_mask, \n n_epochs=n_epochs, batch_size=batch_size, lr=lr, \n alpha=alpha, gamma=gamma, activation=activation)\n \n print(\"Evaluating on test set...\")\n auc, pr, acc, _ = eval_test_set(P, S, test_mask)\n \n S_auc += auc\n S_acc += acc\n S_pr += pr\n\n S_sol += S * auc\n\n return(S_sol/S_auc, S_auc/N_folds, S_pr/N_folds, S_acc/N_folds)\n\ndef eval_test_set(P, S, test):\n print(\"Evaluation on the test set...\")\n print(\"Test set statistics:\")\n n_pos = int(P[test].sum().item())\n n_neg = int((1-P[test]).sum().item())\n\n print(\"Number of positive examples:\", n_pos)\n print(\"Number of negative/unlabelled examples:\", n_neg)\n \n auc = compute_auc(P[test],S[test])\n pr = compute_pr_auc(P[test], S[test])\n acc = compute_accuracy(P[test],S[test])\n confusion = compute_confusion_matrix(P[test], S[test])\n \n print(\"\\nROC auc: %f\" % auc)\n print(\"PR auc: %f\" % pr)\n print(\"Accuracy: %f\" % acc)\n print(\"Confusion matrix:\")\n print(confusion)\n \n return(auc,pr,acc,confusion)" } ]
6
tebeka/rs2019-words
https://github.com/tebeka/rs2019-words
aa24557f2697ce28fecb8d1941810d7588a7b9dc
8f26e1c2d6c14a4167ebada3a60f144c426408aa
6e5f7ac5a27c0f811ab516c283125436f23921a8
refs/heads/master
2022-06-26T20:30:54.134796
2022-06-14T06:41:50
2022-06-14T06:41:50
175,226,520
1
0
BSD-3-Clause
2019-03-12T14:16:43
2021-12-26T06:14:27
2022-06-14T06:41:55
Python
[ { "alpha_fraction": 0.6830188632011414, "alphanum_fraction": 0.7283018827438354, "avg_line_length": 13.722222328186035, "blob_id": "f1a1fbb62e5cc9aeabee13d0de78dfa68857d530", "content_id": "ac398db76ef4766a42fd0f75de22839c50cdaf58", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 265, "license_type": "permissive", "max_line_length": 86, "num_lines": 18, "path": "/README.md", "repo_name": "tebeka/rs2019-words", "src_encoding": "UTF-8", "text": "# Reversim 2019 Top Words\n\nA small survey on [Reversim 2019 proposals](https://summit2019.reversim.com/proposals)\n\nResults [here](words.txt)\n\n## Depedencies\n\nSee [requirements.txt](requirements.txt)\n\n## Running\n\n python words.py\n\n\n## Licence\n\n[BSD](LICENSE.txt)\n" }, { "alpha_fraction": 0.6297151446342468, "alphanum_fraction": 0.6420323252677917, "avg_line_length": 23.50943374633789, "blob_id": "01c4c9c37429976de3112dd6fe2ed7054208890c", "content_id": "0b2ba6c65a657267e8507ecb1fac89f47c8093eb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "permissive", "max_line_length": 69, "num_lines": 53, "path": "/words.py", "repo_name": "tebeka/rs2019-words", "src_encoding": "UTF-8", "text": "import bz2\nfrom collections import Counter\n\nimport spacy\nfrom bs4 import BeautifulSoup\nfrom spacy.parts_of_speech import PUNCT, SPACE, SYM\n\n\ndef is_word(tok):\n if tok.is_stop:\n return False\n if tok.pos in {PUNCT, SPACE, SYM}:\n return False\n\n text = tok.text\n if text in stop_words:\n return False\n\n # \"that's\" is tokenized to [\"that\", \"'s\"]\n return text[:1].isalnum()\n\n\ndef top_words(nlp, words, n=20):\n text = '\\n'.join(words)\n doc = nlp(text)\n counts = Counter(tok.text.lower() for tok in doc if is_word(tok))\n total = sum(counts.values())\n for word, count in counts.most_common(40):\n percent = count / total * 100\n print(f'{word:<20}{percent:.2f}%')\n\n\nnlp = spacy.load('en')\n# Custom(?) stop words\nstop_words = {'i', 'in', 'talk', 'we', 'the', 'it'}\n\nwith bz2.open('proposals.html.bz2') as fp:\n soup = BeautifulSoup(fp, 'lxml')\n\ntitles = soup('h4')\nprint(f'Total of {len(titles)} submissions')\n\ntitle_words = [t.text.lower() for t in titles]\nprint('\\nTitle words:')\ntop_words(nlp, title_words)\n\nabstracts = soup('div', class_='session__abstract')\nabstract_words = [a.text.lower() for a in abstracts]\nprint('\\nAbstract words:')\ntop_words(nlp, abstract_words)\n\nprint('\\nAll words:')\ntop_words(nlp, title_words + abstract_words)\n" }, { "alpha_fraction": 0.460317462682724, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 14.75, "blob_id": "8ca3362c460059ba6057e362c4cf70c590fcf285", "content_id": "27071f01ad5ecb9e25f1944825710610b4eeccf4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 63, "license_type": "permissive", "max_line_length": 22, "num_lines": 4, "path": "/requirements.txt", "repo_name": "tebeka/rs2019-words", "src_encoding": "UTF-8", "text": "beautifulsoup4 ~= 4.11\nipython ~= 8.4\nspacy ~= 3.3\nlxml ~= 4.9\n" } ]
3
nihar-mukhiya/text-generator
https://github.com/nihar-mukhiya/text-generator
af2a5ecad74d9b94132a8547d94672117394f954
2d34ca11e6e8e34a1891a7fdaf5d2e988dc51714
15959118ecbb6a497806ef426bc2c4739a7ef7eb
refs/heads/master
2020-03-28T08:20:18.200660
2018-09-10T09:55:08
2018-09-10T09:55:08
147,958,582
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.419205904006958, "alphanum_fraction": 0.4302862286567688, "avg_line_length": 12.064934730529785, "blob_id": "8536907f601ed9ae8b9c979a9c232c35961a4766", "content_id": "be18b0ccb0a24c3cd70d9aa7cdfee271c25c62b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1083, "license_type": "no_license", "max_line_length": 48, "num_lines": 77, "path": "/tg.py", "repo_name": "nihar-mukhiya/text-generator", "src_encoding": "UTF-8", "text": "\r\nimport pprint\r\n\r\n\r\n\r\nf = open(\"test.txt\")\r\nmessage = f.read()\r\nlist = message.split()\r\n#print(list)\r\ncounts = {}\r\nprint(list)\r\nstart_at = -1\r\ni = 1\r\nmy_set = set(list)\r\nfor w in my_set:\r\n print(w)\r\n w = {}\r\nprint(w)\r\nfor word in list:\r\n if(word not in counts.keys()):\r\n p = list.index(word, start_at + 1)\r\n start_at = p\r\n p+=1\r\n\r\n\r\n\r\n\r\n else:\r\n if(list.index(word) != (len(list) - 1)):\r\n p =list.index(word, start_at + 1)\r\n\r\n p+=1\r\n else:\r\n p = list.index(word)\r\n\r\n\r\n\r\n m = (list[p])\r\n\r\n w[i] = m\r\n i+=1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n #counts[word] = []\r\n #counts[word] = p\r\n #print(counts[word])\r\n\"\"\"\r\n if word in counts:\r\n\r\n counts[word] += 1\r\n p = list.index(word, start_at + 1)\r\n start_at = p\r\n\r\n print(p)\r\n else:\r\n\r\n counts[word] = 1\r\n #print(counts)\r\n #p = list.index(word)\r\n #print(p)\r\n print(counts)\r\n\r\n p+=1\r\n print(p)\r\n next = list[p]\r\n print(next)\r\n #pprint.pprint(counts)\r\n\r\n\"\"\"" }, { "alpha_fraction": 0.45306122303009033, "alphanum_fraction": 0.5061224699020386, "avg_line_length": 13.4375, "blob_id": "d204769b195a01d1edc148998b1dbedabed1b6ed", "content_id": "9e23f67f4ffee3464aead1b10299ccb5fe02b56e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 55, "num_lines": 16, "path": "/matmul.py", "repo_name": "nihar-mukhiya/text-generator", "src_encoding": "UTF-8", "text": "\"\"\"\r\n\r\nthis program displays probability\r\n\r\n\r\nimport numpy as np\r\na = [0.6, 0.4]\r\n\r\nb = [[0.7, 0.3], [0.6, 0.4]]\r\nn = int(input(\"prediction of how many ahead u want: \"))\r\nwhile(n):\r\n x = np.matmul(a, b)\r\n a = x\r\n n-=1\r\n print(x)\r\n\"\"\"" } ]
2
beallio/media-server-status
https://github.com/beallio/media-server-status
6c23ba34c5b81f291f20a803db0e3e5d9ba249fa
50936dd807cc3bccf40f6521cf00c35413bf6ed2
49a664d37799db9d9e93208939afa9d19632ef1b
refs/heads/master
2021-01-13T02:37:23.340474
2015-01-04T07:15:47
2015-01-04T07:15:47
24,974,710
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5072992444038391, "alphanum_fraction": 0.6970803141593933, "avg_line_length": 15.606060981750488, "blob_id": "00bfae7cfef839c25e4111d932eb99e7d1dfacd7", "content_id": "e570a145f75e6306ec40d9083497f68d6bd8992b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 548, "license_type": "no_license", "max_line_length": 24, "num_lines": 33, "path": "/requirements.txt", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "Babel==1.3\nFlask==0.10.1\nFlask-Assets==0.10\nFlask-Babel==0.9\nFlask-Images==2.0.0\nFlask-Mail==0.9.0\nFlask-OpenID==1.2.1\nFlask-SQLAlchemy==2.0\nFlask-WTF==0.10.2\nJinja2==2.7.3\nMarkupSafe==0.23\nPIL==1.1.7\nPillow==2.5.3\nSQLAlchemy==0.9.7\nWTForms==2.0.1\nWerkzeug==0.9.6\nargparse==1.2.1\nblinker==1.3\ncertifi==14.05.14\nflup==1.0.2\ngunicorn==19.1.1\nitsdangerous==0.24\npillowcase==2.0.0\npsutil==2.1.1\npy-sonic==0.2.2\npython-forecastio==1.2.1\npython-openid==2.2.5\npytz==2014.7\nrequests==2.4.0\nspeaklater==1.3\nwebassets==0.10.1\nwsgiref==0.1.2\nxmltodict==0.9.0\n" }, { "alpha_fraction": 0.7245013117790222, "alphanum_fraction": 0.7467067837715149, "avg_line_length": 30.270587921142578, "blob_id": "f3173e37dfc481dd141b1f9bf3ea860682c0a389", "content_id": "de367bcfd765680edace5aeb1d0f222e7feab7df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2657, "license_type": "no_license", "max_line_length": 161, "num_lines": 85, "path": "/README.md", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "# Server Status #\n\n## Screenshots ##\n\n#### Now Playing ####\n![nowplaying](https://bytebucket.org/beallio/media-server-dashboard/raw/3ddb5482ea252ef402eb5d65f90514fac8cb94fa/serverstatus/docs/now_playing_screenshot.png)\n\n\n#### Recently Added ####\n![nowplaying](https://bytebucket.org/beallio/media-server-dashboard/raw/3ddb5482ea252ef402eb5d65f90514fac8cb94fa/serverstatus/docs/recently_added_screenshot.png)\n\n\n## Introduction ##\nServer status dashboard written in python 2.7, using flask on the backend, and bootstrap and jQuery for design.\n\n\n## Setup ##\n\nInstallation instructions are for a Debian-based distro. You will need to adjust accordingly to your linux-based setup.\n\nRun the following commands (assuming the following is not installed on your system)\n\n sudo apt-get update && sudo apt-get -y upgrade\n sudo apt-get install python-dev libjpeg-dev zlib1g-dev libpng12-dev pip virtualenv virtualenvwrapper git\n\nNote the image libraries are in support of Pillow\n\nNow to setup the folder to contain the app and virtual environment\n\n cd mkdir /var/www/serverstatus\n mkvirtualenv venv\n \nClone the repository to your system\n\n sudo git clone https://bitbucket.org/beallio/media-server-dashboard.git\n\n\nInstall additional python requirements in virtual environment\n\n pip install -r requirements.txt\n\nSetup config file\n \n vim config.py\n \nMove setup file outside of root app directory (by default the app assumes the location is \"/var\". \nYou'll need to adjust the import in serverstatus/__init__.py if you place it elsewhere).\n\n sudo mv config.py /var/config.py\n \nChange permissions the user that will run gunicorn and the WSGI (e.g. $APPUSER/$APPGROUP)\n\n sudo chown $APPUSER:$APPGROUP /var/config.py\n\nRun test server to ensure repository and python requirements installed correctly\n\n ./__init__.py\n\n\n### Gunicorn on Apache ### \n\n sudo -u $USER gunicorn wsgi:application -b $INTERNAL_IP:$PORT --workers=5\n\n$USER = user dedicated to running application (e.g. 'www', 'status', 'flask')\n\n$INTERNAL_IP = Internal IP address of your server (e.g. '10.0.10.1')\n\n$PORT = internal port Gunicorn will run on\n\n\n### Apache Configuration Edits for Proxying ###\n\n ProxyPass /$SUBPATH http://$INTERNAL_IP:$PORT\n\n ProxyPassReverse /$SUBPATH http://$INTERNAL_IP:$PORT\n\nEnsure Apache module [\"mod_proxy\"](http://httpd.apache.org/docs/2.2/mod/mod_proxy.html) is installed \n\nFor security it's recommended you move \"config.py\" from a web accessible directory \nto a protected directory, ensuring $USER has read privileges\n\n\n\n### Acknowledgement ###\nLast, but not least, kudos to this [person](http://d4rk.co/) for the inspiration." }, { "alpha_fraction": 0.48670491576194763, "alphanum_fraction": 0.4942648708820343, "avg_line_length": 30.442623138427734, "blob_id": "9b09ba0289bd7ce056d105f6839d3eea19a5bbdb", "content_id": "1e33c2fbcc3db14ea5a41f300f5e0874efb7f5ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3836, "license_type": "no_license", "max_line_length": 82, "num_lines": 122, "path": "/serverstatus/static/js/functions.js", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "var api_base_url = window.location.href + \"api/\";\nvar html_base_url = window.location.href + \"html/\";\n\n// Auto refresh elements\n(function ($) {\n $(document).ready(function () {\n $.ajaxSetup(\n {\n cache: false\n });\n\n var $systeminfo = $(\".system-info\");\n var $storage = $(\".storage\");\n var $weather = $(\".weather\");\n var $services = $(\".services\");\n var $media = $(\".media\");\n\n\n function load_systeminfo() {\n $systeminfo.load(\"html/system_info\", function () {\n var $transcodes = $(\".transcodes\");\n $transcodes.load(\"html/plex_transcodes\");\n });\n\n }\n\n var load_storage = $storage.load(\"html/storage\");\n var load_services = $services.load(\"html/services\");\n var load_weather = $weather.load(\"html/forecast\");\n var load_media = $media.load(\"html/media\");\n\n function get_server_ip() {\n $.getJSON(api_base_url + \"ip_address\", function (data) {\n $(\"#server_ip\").text(data.wan_ip);\n });\n }\n\n function get_client_ip() {\n $.getJSON('http://api.hostip.info/get_json.php', function (data) {\n $(\"#client_ip\").text(data.ip);\n });\n }\n\n // FUNCTIONS TO UPDATE NETWORK SPEED AND PING\n function update_ping() {\n $.getJSON(api_base_url + \"ping\", function (data) {\n $(\"#ping\").text(data.ping + \" ms\");\n });\n }\n\n function update_network_speed() {\n var $downspeed = $(\"#download\");\n var $downspeed_progressbar = $(\"#progress-bar-down\");\n var $upspeed = $(\"#upload\");\n var $upspeed_progressbar = $(\"#progress-bar-up\");\n $.getJSON(api_base_url + \"network_speed\", function (data) {\n var up = data.up.toFixed(2);\n var down = data.down.toFixed(2);\n $downspeed.text(down + \" Mbps\");\n $upspeed.text(up + \" Mbps\");\n var down_progressbar_width = down * (10 / 6);\n var up_progressbar_width = up * (10 / 6);\n $downspeed_progressbar.css(\"width\", down_progressbar_width + \"%\");\n $upspeed_progressbar.css(\"width\", up_progressbar_width + \"%\");\n });\n };\n\n // END FUNCTIONS TO UPDATE NETWORK SPEED AND PING\n\n function on_local_network(url) {\n var client_ip = getJson('http://api.hostip.info/get_json.php').ip;\n var server_ip = getJson(api_base_url + \"ip_address\").wan_ip;\n console.log(client_ip, server_ip, client_ip === server_ip);\n return client_ip === server_ip;\n };\n\n // Load at start of page\n load_systeminfo();\n load_storage;\n load_services;\n load_weather;\n load_media;\n\n update_network_speed();\n update_ping();\n //get_server_ip();\n //get_client_ip();\n\n // Refresh every 30 seconds\n var refreshId = setInterval(function () {\n load_systeminfo();\n }, 30000);\n\n // Refresh every 1 minute\n var refreshId = setInterval(function () {\n update_network_speed();\n update_ping();\n\n load_media;\n }, 60000);\n\n // Refresh every 10 minutes\n var refreshId = setInterval(function () {\n //get_server_ip();\n //get_client_ip();\n load_storage;\n load_weather;\n load_services;\n }, 600000);\n });\n\n\n // Enable bootstrap tooltips\n $(function () {\n $(\"[rel=tooltip]\").tooltip();\n $(\"[rel=popover]\").popover();\n });\n\n $(document).ready(function () {\n $(\"body\").tooltip({ selector: '[data-toggle=tooltip]' });\n });\n})(jQuery);\n" }, { "alpha_fraction": 0.6516736149787903, "alphanum_fraction": 0.6516736149787903, "avg_line_length": 14.949999809265137, "blob_id": "15cb92fce1b380048fc243c0b5c3da97243eeb6c", "content_id": "0ff5cbb5b54c1ee06c077732ecc9939856e2cab5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 956, "license_type": "no_license", "max_line_length": 69, "num_lines": 60, "path": "/serverstatus/assets/exceptions.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nCustom exceptions for managing different servers\n\"\"\"\n\n\nclass MissingConfigFile(Exception):\n \"\"\"\n Config file not found\n \"\"\"\n pass\n\n\nclass MissingForecastIOKey(Exception):\n \"\"\"\n No Forecast.IO API key found\n \"\"\"\n pass\n\n\nclass PlexAPIKeyNotFound(Exception):\n \"\"\"\n No Plex API key found\n \"\"\"\n pass\n\n\nclass MissingConfigValue(Exception):\n \"\"\"\n General exception catch all for missing config values\n \"\"\"\n pass\n\n\nclass PlexConnectionError(Exception):\n \"\"\"\n Error connecting to specified Plex server\n \"\"\"\n pass\n\n\nclass PlexAPIDataError(Exception):\n \"\"\"\n Plex returned malformed data, or data in a format unfamiliar with\n (perhaps an API change)\n \"\"\"\n pass\n\n\nclass PlexImageError(Exception):\n \"\"\"\n Error retrieving image cover from Plex server\n \"\"\"\n pass\n\n\nclass SubsonicConnectionError(Exception):\n \"\"\"\n Error connection to specified Subsonic server\n \"\"\"\n pass" }, { "alpha_fraction": 0.5866013169288635, "alphanum_fraction": 0.6096950173377991, "avg_line_length": 38.068084716796875, "blob_id": "e3b79c02c351ce456c46bec4909aeec5004dd883", "content_id": "598d806dc4a8cdba0c1da972d5768451c2880977", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9180, "license_type": "no_license", "max_line_length": 116, "num_lines": 235, "path": "/serverstatus/assets/sysinfo.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "import datetime\nimport subprocess\nimport os\nimport time\nimport urllib2\nfrom collections import OrderedDict\nfrom math import floor, log\nimport logging\n\nimport psutil\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_bytes(value, unit, output_str=False, decimals=2, auto_determine=False):\n \"\"\"\n\n :param value: int\n :param unit: str\n :param output_str: bool\n :param decimals: int\n :param auto_determine: bool\n :return: str or int or float\n \"\"\"\n assert any([type(value) == int, type(value) == float, type(value) is long])\n assert all([type(decimals) is int, type(output_str) is bool, type(auto_determine) is bool, value >= 0])\n conversions = dict(B=0, KB=1, MB=2, GB=3, TB=4, PB=5, EB=6, ZB=7, YB=8)\n assert unit in conversions\n base = 1024.0\n converted_value = float(value) / base ** conversions[unit]\n if auto_determine and value > 0:\n # Generate automatic prefix by bytes\n base_power = floor(log(float(value)) / log(base))\n swap_conversion_values = {conversions[x]: x for x in conversions}\n while base_power not in swap_conversion_values:\n # future proofing. Not really necessary.\n base_power -= base_power\n unit = swap_conversion_values[base_power]\n converted_value = value / base ** conversions[unit]\n if output_str:\n if decimals < 0:\n decimals = 0\n return '{:,.{decimal}f} {unit}'.format(converted_value, decimal=decimals, unit=unit)\n else:\n return converted_value\n\n\ndef get_wan_ip(site='http://myip.dnsdynamic.org/'):\n return urllib2.urlopen(site).read()\n\n\ndef get_partitions(partitions=None):\n if partitions is None:\n partitions = psutil.disk_partitions(all=True)\n return {p[1]: psutil.disk_usage(p[1]) for p in partitions if p[0] != 0}\n\n\ndef get_ping(host=\"8.8.8.8\", kind='avg', num=4):\n # solution from http://stackoverflow.com/questions/316866/ping-a-site-in-python\n \"\"\"\n returns ping time to selected site\n host: site, ip address to ping\n kind:\n num: number of pings to host\n\n :param host: string\n :param kind: string\n :param num: int\n :return: float\n \"\"\"\n assert kind in ['max', 'avg', 'mdev', 'min']\n assert type(int(num)) is int\n ping = subprocess.Popen([\"ping\", \"-c\", str(num), host], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, error = ping.communicate()\n out = out.split('\\n')\n try:\n out = [x for x in out if x.startswith('rtt')][0]\n out_mapped = zip(out.split()[1].split('/'), out.split()[3].split('/'))\n out_mapped = {x[0]: x[1] for x in out_mapped}\n out = out_mapped[kind]\n except IndexError:\n # most likely no ping returned, system offline\n out = 0\n return float(out)\n\n\ndef get_system_uptime():\n def append_type(x, kind):\n \"\"\"\n Return 0 if days/hours/minutes equals 0 otherwise append correct plural \"s\" to type\n ex. if systems up for 2 hours, returns \"2 hours\" likewise return \"1 hour\" if system has been up for 1 hour\n \"\"\"\n assert type(x) is int and type(kind) is str\n if x == 0:\n return x\n else:\n return '{} {}'.format(str(x), kind + 's' if x != 1 else kind)\n\n boot_time = datetime.datetime.fromtimestamp(psutil.boot_time()).replace(microsecond=0)\n time_now = datetime.datetime.now().replace(microsecond=0)\n delta = time_now - boot_time\n formatted_time = str(delta).split(',')\n try:\n # System's been up a day or more\n hours = formatted_time[1].strip().split(':')\n except IndexError:\n # System's been up for less than day\n hours = formatted_time[0].strip().split(':')\n formatted_time[0] = 0\n hours.pop(2)\n hours, mins = [int(hour) for hour in hours]\n formatted_time = dict(days=formatted_time[0], hours=append_type(hours, 'hour'), min=append_type(mins, 'minute'))\n output = dict(\n boottime=boot_time,\n uptime=delta,\n uptime_formatted=formatted_time)\n return output\n\n\ndef return_network_io():\n \"\"\"\n returns Network bytes sent, and received\n :rtype : list\n \"\"\"\n network_io = psutil.net_io_counters()\n return [network_io.bytes_sent, network_io.bytes_recv]\n\n\ndef get_network_speed(sleep=1):\n assert type(sleep) is int\n start_time = datetime.datetime.now()\n start_data = return_network_io()\n time.sleep(sleep)\n time_delta = datetime.datetime.now() - start_time\n end_data = return_network_io()\n bits = 8\n return dict(up=convert_bytes((end_data[0] - start_data[0]) / time_delta.seconds * bits, 'MB'),\n down=convert_bytes((end_data[1] - start_data[1]) / time_delta.seconds * bits, 'MB'))\n\n\ndef get_total_system_space(digits=1):\n \"\"\"\n returns total system disk space formatted, ex.\n {'total': '8,781.9 GB', 'used': '3,023.0 GB', 'pct': 34.4, 'free': '5,313.4 GB'}\n\n :rtype : dict\n :param digits: int\n :return: dict\n \"\"\"\n assert type(digits) is int\n all_partitions = psutil.disk_partitions(all=True)\n # limit total disk space to those paritions mounted in \"/dev/\"\n partitions_to_keep = [partition for partition in all_partitions if partition.device.startswith('/dev/')]\n partitions = get_partitions(partitions_to_keep)\n disk_space = dict(total=sum([partitions[partition].total for partition in partitions]),\n used=sum([partitions[partition].used for partition in partitions]),\n free=sum([partitions[partition].free for partition in partitions]))\n disk_space_formatted = {k: convert_bytes(disk_space[k], 'GB', True, digits, True) for k in disk_space}\n disk_space_formatted['pct'] = round(float(disk_space['used']) / float(disk_space['total']) * 100.0, digits)\n return disk_space_formatted\n\n\ndef get_partitions_space(partitions, digits=1, sort='alpha'):\n \"\"\"\n {'Home': {'total': '168.8 GB', 'pct': 44.4, 'free': '85.3 GB', 'used': '74.9 GB'},\n 'Incoming': {'total': '293.3 GB', 'pct': 48.2, 'free': '137.0 GB', 'used': '141.4 GB'}}\n :param partitions:\n :param digits:\n :return:\n \"\"\"\n assert type(partitions) is dict\n system_partitions = get_partitions()\n # return disk space for each partition listed in config\n # test if listed partition actually exists in system first to avoid throwing an error\n disk_space = {p: system_partitions[partitions[p]] for p in partitions if partitions[p] in system_partitions}\n disk_space_formatted = {p: dict(total=convert_bytes(disk_space[p].total, 'GB', True, digits, True),\n used=convert_bytes(disk_space[p].used, 'GB', True, digits, True),\n free=convert_bytes(disk_space[p].free, 'GB', True, digits, True)) for p in\n disk_space}\n for p in disk_space:\n disk_space_formatted[p]['pct'] = round(float(disk_space[p].used) / float(disk_space[p].total) * 100.0,\n digits)\n if sort.lower() == 'alpha':\n # place in ordered dictionary so paths always display in alphabetical order on page\n disk_space_formatted = OrderedDict(sorted(disk_space_formatted.items(), key=lambda x: x[0]))\n return disk_space_formatted\n\n\ndef get_load_average():\n os_averages = os.getloadavg()\n cpu_count = psutil.cpu_count()\n final_averages = [average / cpu_count for average in os_averages]\n return final_averages\n\n\nclass GetSystemInfo(object):\n def __init__(self):\n pass\n\n def get_info(self):\n \"\"\"\n Returns system information in a dictionary\n mem_total: Total RAM in the system in megabytes as float, ex. \"7876.88671875\"\n mem_available: Unused RAM in the system in megabytes as float, ex. \"4623.8671875\"\n mem_used_pct: mem_available / mem_total as float, ex. \"41.3\"\n load_avg: tuple of avg loads at 1 min, 5 min, and 15 min, respectively, ex. \"(0.52, 0.51, 0.43)\"\n partitions: dictionary of partitions on system, truncated ex, {''/mnt/Entertainment'':\n sdiskusage(total=56955559936, used=15403667456, free=38635122688, percent=27.0)}\n uptime_formatted: dictionary of uptime split in days, hours, min, ex.\n {'hours': '2 hours', 'days': '6 days', 'min': '26 minutes'}\n :return: dict\n \"\"\"\n mem_info = psutil.virtual_memory()\n system_uptime = get_system_uptime()\n load_avg = get_load_average()\n return dict(mem_total=convert_bytes(mem_info[0], 'MB'),\n mem_available=convert_bytes(mem_info[1], 'MB'),\n mem_used_pct=mem_info[2],\n mem_bars=self._memory_bars(mem_info[2]),\n load_avg=load_avg,\n uptime_formatted=system_uptime['uptime_formatted'])\n\n @staticmethod\n def _memory_bars(val_pct):\n mid = 50\n upper = 80\n ret = dict(xmin=min(val_pct, mid),\n xmid=min(val_pct - mid, upper - mid),\n xmax=min(val_pct - upper, 100 - upper))\n return {k: max(ret[k], 0) for k in ret}\n\n\nif __name__ == '__main__':\n pass" }, { "alpha_fraction": 0.5777337551116943, "alphanum_fraction": 0.5846654176712036, "avg_line_length": 33.149757385253906, "blob_id": "fb315c2adfa750709b46f81628d5f8fc4f119d6d", "content_id": "78361ff94d37ff8962d209a246658a039ed0094c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7069, "license_type": "no_license", "max_line_length": 87, "num_lines": 207, "path": "/serverstatus/views.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nRouting file for flask app\nHandles routing for requests\n\"\"\"\n\nimport json\nimport datetime\n\nfrom flask import render_template, Response, request\n\nfrom serverstatus import app\nfrom assets import apifunctions\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n \"\"\"\n Base index view at \"http://www.example.com/\"\n \"\"\"\n start_time = datetime.datetime.now()\n return render_template('index.html',\n title=app.config['WEBSITE_TITLE'],\n time=datetime.datetime.now() - start_time,\n testing=app.config['TESTING'])\n\n\[email protected]('/api/<data>', methods=['GET'])\ndef get_json(data):\n \"\"\"\n Returns API data data based on \"http://www.example.com/api/<data>\" \n call where <data> is function is a function in the APIFunction \n class in the apifunctions module. \n Returns data in JSON format.\n \"\"\"\n values, status = BACKENDCALLS.get_data(data)\n json_data = json.dumps(values)\n # set mimetype to prevent client side manipulation since we're not using\n # jsonify\n return Response(json_data, status=status, mimetype='application/json')\n\n\[email protected]('/html/<data>')\ndef html_generator(data):\n \"\"\"\n Returns html rendered jinja templates based on \"http://www.example.com/html/<data>\"\n call where <data> is a jinja template in the \"templates\" directory.\n Returns rendered html in plain text to client, so we use this data to \n load divs via jQuery on the client side\n \"\"\"\n values, status = BACKENDCALLS.get_data(data)\n start = datetime.datetime.now()\n rendered_html = render_template(data + '.html', values=values)\n app.logger.debug(\n 'Render time for {}: {}'.format(data, datetime.datetime.now() - start))\n # set mimetype to prevent users browser from rendering rendered HTML\n return Response(rendered_html, status=status, mimetype='text/plain')\n\n\[email protected]('/img/<data>')\ndef get_img_data(data):\n \"\"\"\n Returns image to client based on \"http://www.example.com/img/<data>\"\n request where <data> is a flask request such as \n \"http://www.example.com/img/subsonic?cover=28102\"\n \"\"\"\n start = datetime.datetime.now()\n resp = BACKENDCALLS.get_image_data(request)\n app.logger.debug('Image request time for {}: {}'\n .format(data, datetime.datetime.now() - start))\n return resp\n\n\nclass BackEndCalls(object):\n \"\"\"\n Provides access points into the API Functions of the backend.\n Also loads API configs to remedy issues where the config hasn't been\n loaded for a particular server.\n\n Provides access for images requests to Plex and Subsonic\n \"\"\"\n\n def __init__(self):\n self.api_functions = None\n self.api_functions = self.get_api_functions()\n\n def get_api_functions(self):\n \"\"\"\n Provides access to API Functions module through class\n :return: API_Functions\n \"\"\"\n self._load_apis()\n return self.api_functions\n\n def get_data(self, data):\n \"\"\"\n From flask request at http://servername.com/api/{api_call} fetches\n {api_call} from apifunctions module, and returns data.\n\n Disallows public access to any function in apifunctions starting with\n \"_\" (underscore)\n :type data: unicode or LocalProxy\n :return:\n \"\"\"\n values = None\n status = 404\n values = getattr(self.api_functions, str(data).lstrip('_'))()\n status = 200\n \"\"\"\n try:\n values = getattr(self.api_functions, str(data).lstrip('_'))()\n status = 200\n except (AttributeError, TypeError) as err:\n app.logger.error(err)\n # no api function for call, return empty json\n except:\n app.logger.error('An unknown error occurred')\"\"\"\n return values, status\n\n def get_image_data(self, flask_request):\n \"\"\"\n Parses flask request from\n http://servername.com/img/{plex | subsonic}?14569852 where\n {plex|subsonic} is the server requested. Routes request to appropriate\n server to get thumbnail image data\n\n :type flask_request: werkzeug.local.Request\n :return:\n \"\"\"\n\n def parse_request(request_args):\n parsed_values = dict()\n for arg in request_args:\n if request_args[arg] == '':\n parsed_values['plex_id'] = arg\n continue\n try:\n parsed_values[arg] = bool(request_args[arg])\n except ValueError:\n parsed_values[arg] = request_args[arg]\n return parsed_values\n\n resp = Response('null', status=404, mimetype='text/plain')\n # convert to string since flask requests returns unicode\n data_low = str(flask_request.view_args.get('data', None).lower())\n if data_low == 'plex':\n args = parse_request(flask_request.args)\n resp = Response(\n self.api_functions._get_plex_cover_art(args), status=200,\n mimetype='image/jpeg')\n elif data_low == 'subsonic':\n resp = Response(self._check_subsonic_request(flask_request),\n status=200,\n mimetype='image/jpeg')\n return resp\n\n def _load_apis(self):\n \"\"\"\n Check if api_functions is set, set if not.\n\n :return:\n \"\"\"\n if self.api_functions is None:\n self.api_functions = apifunctions.APIFunctions(app.config)\n\n def _check_subsonic_request(self, request_args):\n \"\"\"\n Parses flask request to determine parameters for requesting cover art\n from Subsonic server\n\n Parameters\n ----------\n request_args : flask.Request.args\n Description of parameter `request_args`.\n \"\"\"\n query_string = request_args.query_string\n args = request_args.args\n try:\n # check if only cover id was submitted\n # e.g. /img/subsonic?28102\n cover_id = int(query_string)\n cover_size = None\n except ValueError:\n try:\n # check if cover id is included in request_args\n # e.g. /img/subsonic?cover=28102\n cover_id = args['cover']\n except KeyError:\n # we need a cover to look up\n raise\n try:\n # check if cover size is included in request_args\n # e.g. /img/subsonic?cover=28102&size=145\n cover_size = args['size']\n try:\n # check if cover size is an integer\n cover_size = int(cover_size)\n except ValueError:\n # incorrect cover size requested\n cover_size = None\n except KeyError:\n # cover size not included in request_args\n cover_size = None\n return self.api_functions._get_subsonic_cover_art(cover_id, cover_size)\n\n\nBACKENDCALLS = BackEndCalls()\n" }, { "alpha_fraction": 0.6527777910232544, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 23.08333396911621, "blob_id": "c934c507496df510fc29a9e1b7d12f68b7359168", "content_id": "637e6b8d9639a50f6ca139041aad5c3a1aa66dce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 78, "num_lines": 12, "path": "/__init__.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nMain initializing file. If called from command line starts WSGI debug testing\nserver\n\"\"\"\n\nif __name__ == '__main__':\n from serverstatus import app\n\n app.config.update(DEBUG=True, TESTING=True)\n app.run(host='0.0.0.0')\n print 'Test server running...'" }, { "alpha_fraction": 0.5970109701156616, "alphanum_fraction": 0.5996797680854797, "avg_line_length": 33.063636779785156, "blob_id": "bc8fed43354f05bfed25c31e3b677957ef5b2f14", "content_id": "d6154379f7b27c5cb27798d22fc103589208ae7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3747, "license_type": "no_license", "max_line_length": 78, "num_lines": 110, "path": "/serverstatus/__init__.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nInitialize and setup flask app\n\"\"\"\n\nimport imp\nimport os\nimport logging\nimport logging.handlers as handlers\n\nfrom flask import Flask\n\n\napp = Flask(__name__)\n\n# update config for flask app\napp.config.update(\n APPNAME='server_status',\n LOGGINGMODE=logging.DEBUG,\n APPLOCATION=os.path.join(os.path.dirname(os.path.dirname(\n os.path.realpath(__file__)))),\n LOG_LOCATION='/tmp',\n TEMP_LOCATION='/tmp',\n CONFIG_LOCATION='/var/config.py')\napp.config['TEMP_IMAGES'] = os.path.join(app.config['TEMP_LOCATION'],\n 'flask-images')\napp.config['APP_MODULESLOCATION'] = os.path.join(app.config['APPLOCATION'],\n 'serverstatus')\n\nimport views\nimport assets\nfrom assets.exceptions import MissingConfigFile\nfrom assets.services import SubSonic\n\n\ndef _setup_logger():\n \"\"\"\n Setup application logging object\n\n :return: logging object\n \"\"\"\n mod_logger = None\n # use dir name thrice to return to base module path\n log_directory = app.config.get('LOG_LOCATION', None)\n log_location = os.path.join(log_directory,\n '_'.join([app.config['APPNAME'], '.log']))\n if not os.path.isdir(log_directory):\n try:\n os.mkdir(log_directory)\n except IOError:\n pass\n if os.path.isdir(log_directory):\n file_handler = handlers.RotatingFileHandler(filename=log_location,\n maxBytes=3145728)\n formatter = logging.Formatter(\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n \"%Y-%m-%d %H:%M:%S\")\n file_handler.setFormatter(formatter)\n logging.getLogger('').addHandler(file_handler)\n mod_logger = logging.getLogger(__name__)\n mod_logger.setLevel(app.config['LOGGINGMODE'])\n mod_logger.debug('LOGGER initialized at {}'.format(log_location))\n return mod_logger\n\n\ndef _load_config_file(mod_logger=None):\n def gen_contents(config_data):\n # list module contents\n \"\"\"\n Generator to return modules from config file\n :type config_data: __builtin__.module\n \"\"\"\n mods = dir(config_data)\n for config_attrib in mods:\n # exclude objects that aren't our data\n if not config_attrib.startswith('__'):\n # create dict object since flask app config only accepts dicts\n # on updates\n config_value = getattr(config_data, config_attrib)\n if config_attrib in app.config:\n mod_logger.warning(\n 'Overwriting existing config value {} with {}'.format(\n config_attrib, config_value))\n result = {config_attrib: config_value}\n yield result\n\n # import config file\n config_location = app.config.get('CONFIG_LOCATION', None)\n try:\n config_data_file = imp.load_source('config', config_location)\n for data in gen_contents(config_data_file):\n app.config.update(data)\n if mod_logger:\n mod_logger.info(\n 'Config file loaded from {}'.format(config_location))\n except IOError as e:\n errs = dict(err=e.strerror, dir_location=config_location)\n logger_msg = ('{err}: Configuration file could not be found at '\n '\"{dir_location}\"').format(**errs)\n mod_logger.critical(logger_msg)\n raise MissingConfigFile(logger_msg)\n\n\nlogger = _setup_logger() # initialize LOGGER\n\n# import config data from config file into flask app object\n_load_config_file(logger)\n\n# remove initialization functions from namespace\ndel _load_config_file\ndel _setup_logger\n" }, { "alpha_fraction": 0.6148949861526489, "alphanum_fraction": 0.6518141031265259, "avg_line_length": 26.068965911865234, "blob_id": "f3e46170b51b636549f3b9e3e4a037c71b6ad42c", "content_id": "abbb507fc628673b5d51c7d2f7aa052eeb3de0e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1571, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/config.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nInternal configuration file for Server Status app\nChange the values according to your own server setup.\n\nBy default, the app is set to initialize the config file from \"/var/config.py\"\nIf you wish to change the location you'll need to change the location of the\nfile in serverstatus/__init__.py\n\nFor ForecastIO you'll need to go to https://developer.forecast.io/ and sign up\nfor an API key (at the time of writing the first 1,000 calls/day to the API are\nfree.\n\n\n\"\"\"\n\nSUBSONIC_INFO = dict(\n url='http://192.168.0.1',\n serverpath='/rest',\n port=4040,\n user='user',\n password='password',\n api=1.8,\n appname='py-sonic',\n external_url='http://www.example.com/subsonic'\n)\n\nPLEX_INFO = dict(\n external_url='http://www.example.com/plex',\n internal_url='http://192.168.0.1',\n internal_port=32400,\n user='user',\n password='password',\n auth_token='AUTH_TOKEN',\n local_network_auth=False\n)\n\nSERVERSYNC_INFO = dict(\n lockfile_path='/tmp/server_sync.lockfile')\n\nCRASHPLAN_INFO = dict(\n logfile_path='/usr/local/crashplan/log/app.log')\n\nPARTITIONS = dict(Partition_Name_1='/mnt/partition1',\n Partition_Name_2='/mnt/partition2',\n Partition_Name_3='/mnt/partition3',\n Root='/',\n Home='/home')\n\nINTERNAL_IP = 'http://192.168.0.1'\nWEATHER = dict(\n Forecast_io_API_key='FORECASTIOKEY',\n Latitude=37.8030,\n Longitude=-122.4360,\n units='us')\nSERVER_URL = 'http://www.example.com'\nDEBUG = False\nSECRET_KEY = 'my secret'\nWEBSITE_TITLE = 'Your title here'\n\n" }, { "alpha_fraction": 0.6029059290885925, "alphanum_fraction": 0.6035446524620056, "avg_line_length": 30.315000534057617, "blob_id": "143b48c1fca147ea94faed0f1302431eb3930787", "content_id": "1f8e07a9c04ee7de49b6f32e34ce9c5a839d9140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6263, "license_type": "no_license", "max_line_length": 80, "num_lines": 200, "path": "/serverstatus/assets/apifunctions.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nServes as backend for returning information about server to jQuery and Jinja\ntemplates. Data is returned in the form of dicts to mimic JSON formatting.\n\"\"\"\nfrom collections import OrderedDict\nimport logging\n\nfrom serverstatus.assets.weather import Forecast\nfrom serverstatus.assets.services import CheckCrashPlan, ServerSync, Plex, \\\n SubSonic\nfrom serverstatus.assets.sysinfo import GetSystemInfo, get_network_speed, \\\n get_ping, get_wan_ip, get_partitions_space, get_total_system_space\nimport serverstatus.assets.wrappers as wrappers\n\n\nclass APIFunctions(object):\n \"\"\"\n Serves as backend for returning information about server to jQuery and Jinja\n templates. Data is returned in the form of dicts to mimic JSON formatting.\n\n Any function within the APIFunctions class maybe called externally as long\n as the function does not start with \"_\". For example, a user/website may\n return data from http://foobar.com/api/system_info but not\n http://foobar.com/api/_get_plex_cover_art\n\n Examples to return data:\n To return system info:\n http://foobar.com/api/system_info\n\n To return network speed:\n http://foobar.com/api/network_speed\n \"\"\"\n def __init__(self, config):\n self.logger = LOGGER\n LOGGER.debug('{} initialized'.format(__name__))\n self.config = config\n self.subsonic = None\n self.plex = None\n self.server_sync = None\n self.crashplan = None\n self.weather = None\n\n @staticmethod\n @wrappers.logger('debug')\n def system_info():\n \"\"\"\n Returns data for system info section (memory, load, uptime)\n\n :return: dict\n \"\"\"\n get_system_info = GetSystemInfo()\n output = get_system_info.get_info()\n return output\n\n @staticmethod\n @wrappers.logger('debug')\n def network_speed():\n \"\"\"\n Returns server network speed. Sleep defines the length of time in\n between polling for network IO data to calculate speed based off delta\n\n :return: dict\n \"\"\"\n return get_network_speed(sleep=5)\n\n @staticmethod\n @wrappers.logger('debug')\n def ping():\n \"\"\"\n Returns ping from Google DNS (default)\n\n :return: dict\n \"\"\"\n return dict(ping='{:.0f}'.format(get_ping()))\n\n @wrappers.logger('debug')\n def storage(self):\n \"\"\"\n Returns formatted storage data based off options selected in Config file\n\n :return: dict\n \"\"\"\n paths = get_partitions_space(self.config['PARTITIONS'])\n return dict(total=get_total_system_space(), paths=paths)\n\n @wrappers.logger('debug')\n def ip_address(self):\n \"\"\"\n Returns servers internal and external IP addresses\n\n :return: dict\n \"\"\"\n return dict(wan_ip=get_wan_ip(), internal_ip=self.config['INTERNAL_IP'])\n\n @wrappers.logger('debug')\n def services(self):\n \"\"\"\n Returns sorted status mappings for servers listed in config file\n :return: dict\n \"\"\"\n self._load_configs()\n servers = [self.plex, self.subsonic, self.server_sync, self.crashplan]\n servers_mapped = [getattr(server, 'status_mapping') for server in\n servers]\n servers_dict = OrderedDict()\n for server in servers_mapped:\n servers_dict = OrderedDict(servers_dict.items() + server.items())\n return servers_dict\n\n @wrappers.logger('debug')\n def media(self):\n \"\"\"\n Returns now playing data for Plex and Subsonic (if any), and recently\n added items for both\n\n :return: dict\n \"\"\"\n self._load_configs()\n subsonic = self.subsonic\n plex = self.plex\n return dict(\n subsonic_nowplaying=subsonic.now_playing(),\n plex_nowplaying=plex.now_playing(),\n subsonic_recentlyadded=subsonic.recently_added(num_results=6),\n plex_recentlyadded=plex.recently_added(num_results=6))\n\n @wrappers.logger('debug')\n def forecast(self):\n \"\"\"\n Gets forecast data from forecast.io\n\n :return: dict\n \"\"\"\n self._load_configs()\n self.weather.reload_data()\n return self.weather.get_data()\n\n @wrappers.logger('debug')\n def plex_transcodes(self):\n \"\"\"\n Gets number of transcodes from Plex\n\n :return: dict\n \"\"\"\n self._load_configs()\n return dict(plex_transcodes=self.plex.transcodes)\n\n def _get_plex_cover_art(self, args):\n \"\"\"\n Gets Plex cover art passing flask requests into Plex class\n\n :return: image\n \"\"\"\n self._load_configs()\n return self.plex.get_cover_image(**args)\n\n def _get_subsonic_cover_art(self, cover_id, size):\n \"\"\"\n Gets subsonic cover art passing flask requests into Subsonic class\n\n :return: image\n \"\"\"\n self._load_configs()\n cover_id = int(cover_id)\n return self.subsonic.get_cover_art(cover_id, size)\n\n def _load_configs(self):\n \"\"\"\n Loads config data for Service subclasses if not already loaded to\n prevent errors.\n :return: Service class\n \"\"\"\n if self.subsonic is None:\n try:\n self.subsonic = SubSonic(self.config['SUBSONIC_INFO'])\n except KeyError:\n LOGGER.debug('Subsonic not loaded yet')\n if self.plex is None:\n try:\n self.plex = Plex(self.config['PLEX_INFO'])\n except KeyError:\n LOGGER.debug('Plex not loaded yet')\n if self.server_sync is None:\n try:\n self.server_sync = ServerSync(self.config['SERVERSYNC_INFO'])\n except KeyError:\n LOGGER.debug('Server Sync not loaded yet')\n if self.crashplan is None:\n try:\n self.crashplan = CheckCrashPlan(self.config['CRASHPLAN_INFO'])\n except KeyError:\n LOGGER.debug('CrashPlan not loaded yet')\n if self.weather is None:\n try:\n self.weather = Forecast(self.config['WEATHER'])\n except KeyError:\n LOGGER.debug('weather not loaded yet')\n\n\nLOGGER = logging.getLogger(__name__)\n" }, { "alpha_fraction": 0.6300366520881653, "alphanum_fraction": 0.636884868144989, "avg_line_length": 31.538860321044922, "blob_id": "18c1e1f8f33dac2920ba62e898caba0aa842bf14", "content_id": "263d3362b3d20892229e879c8db7559950f2ffde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6279, "license_type": "no_license", "max_line_length": 78, "num_lines": 193, "path": "/serverstatus/tests/test_serverstatus.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "import urllib2\nimport unittest\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nfrom flask import Flask\nfrom flask.ext.testing import LiveServerTestCase\n\nfrom serverstatus import app\nfrom serverstatus.assets.apifunctions import APIFunctions\nfrom serverstatus.assets.services import ServerSync, SubSonic\nfrom serverstatus.assets.weather import Forecast\n\n\nclass TestApiFunctions(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n self.apifunctions = APIFunctions(app.config)\n\n def test_ping(self):\n self.assertTrue(isinstance(self.apifunctions.ping(), dict))\n\n def test_system_info(self):\n self.assertTrue(isinstance(self.apifunctions.system_info(), dict))\n\n def test_storage(self):\n self.assertTrue(isinstance(self.apifunctions.storage(), dict))\n\n def test_network_speed(self):\n self.assertTrue(isinstance(self.apifunctions.network_speed(), dict))\n\n def test_services(self):\n self.assertTrue(isinstance(self.apifunctions.services(), OrderedDict))\n\n def test_weather(self):\n self.assertTrue(isinstance(self.apifunctions.forecast(), dict))\n\n def test_media(self):\n results = self.apifunctions.media()\n self.assertIsInstance(results, dict)\n for key in results:\n if 'plex_nowplaying' in key:\n self.plex_nowplaying(results[key])\n if 'plex_recentlyadded' in key:\n self.plex_recentlyadded(results[key])\n if 'subsonic_nowplaying' in key:\n self.subsonic_nowplaying(results[key])\n\n def test_plex_transcodes(self):\n self.assertTrue(isinstance(self.apifunctions.plex_transcodes(), dict))\n\n def plex_recentlyadded(self, result):\n self.assertIsInstance(result, dict)\n for vid_type in result:\n self.assertIsInstance(result[vid_type], list)\n for video in result[vid_type]:\n self.assertIsInstance(video, dict)\n\n def subsonic_recentlyadded(self, result):\n self.assertIsInstance(result, list)\n for album in result:\n self.assertIsInstance(result[album], dict)\n\n def plex_nowplaying(self, result):\n if not result:\n self.assertIs(result, None)\n if result:\n self.assertIsInstance(result, list)\n for video in result:\n self.assertIsInstance(video, dict)\n\n def subsonic_nowplaying(self, result):\n if not result:\n self.assertIs(result, None)\n if result:\n for key in result:\n self.assertIsInstance(result[key], dict)\n\n\nclass TestSubSonicServer(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n self.config = app.config['SUBSONIC_INFO']\n self.config_test_values = dict(\n url='http://192.168.1.100',\n port=40,\n user='guestuser',\n password='password',\n serverpath='/subbybad/'\n )\n\n def est_bad_config_values(self):\n for key in self.config_test_values:\n config = deepcopy(self.config)\n config[key] = self.config_test_values[key]\n self.subsonic = SubSonic(config)\n print key, self.config_test_values[key]\n self.assertTrue(self.subsonic.connection_status)\n\n def test_bad_server_url(self):\n bad_url = 'http://192.168.1.100'\n config = deepcopy(self.config)\n config['url'] = bad_url\n self.subsonic = SubSonic(config)\n self.assertFalse(self.subsonic.connection_status)\n\n def test_bad_port(self):\n config = deepcopy(self.config)\n config['port'] = 40\n self.subsonic = SubSonic(config)\n self.assertFalse(self.subsonic.connection_status)\n\n def test_bad_username(self):\n config = deepcopy(self.config)\n config['user'] = 'guestuser'\n self.subsonic = SubSonic(config)\n self.assertFalse(self.subsonic.connection_status)\n\n def test_bad_password(self):\n config = deepcopy(self.config)\n config['password'] = 'password'\n self.subsonic = SubSonic(config)\n self.assertFalse(self.subsonic.connection_status)\n\n def test_bad_serverpath(self):\n config = deepcopy(self.config)\n config['serverpath'] = '/subbybad/'\n self.subsonic = SubSonic(config)\n self.assertFalse(self.subsonic.connection_status)\n\n\nclass TestForecastIO(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n self.config = app.config['WEATHER']\n\n def test_bad_apikey(self):\n config = deepcopy(self.config)\n config['Forecast_io_API_key'] = 'thisisabadkey'\n with self.assertRaises(ValueError):\n Forecast(config)\n\n\nclass TestServerSync(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n self.config = app.config['SERVERSYNC_INFO']\n\n def test_no_lockfile_path(self):\n config = deepcopy(self.config)\n del config['lockfile_path']\n serversync = ServerSync(config)\n self.assertFalse(serversync.connection_status)\n\n def test_bad_lockfile_path(self):\n config = deepcopy(self.config)\n config['lockfile_path'] = '/tmp/badfile.lock'\n serversync = ServerSync(config)\n self.assertFalse(serversync.connection_status)\n\n\nclass TestLiveServer(LiveServerTestCase):\n server_address = 'http://192.168.1.101/status/'\n\n def create_app(self):\n self.app = Flask(__name__)\n self.app.config.update(TESTING=True, LIVESERVER_PORT=8943)\n return self.app\n\n def test_server_is_up_and_running(self):\n response = urllib2.urlopen(TestLiveServer.server_address)\n self.assertEqual(response.code, 200)\n\n\n\"\"\"\nclass TestDebugServer(flaskTestCase):\n def create_app(self):\n app = Flask(__name__)\n app.config['TESTING'] = True\n return app\n\n def test_some_json(self):\n functions_to_test = (func for func in dir(APIFunctions) if not\n func.startswith('_'))\n for func in functions_to_test:\n test_api = '/api/' + func\n response = self.client.get(test_api)\n print 'hi'\n self.assertEquals(response.json, dict())\n\"\"\"\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5412545204162598, "alphanum_fraction": 0.5444556474685669, "avg_line_length": 39.60304260253906, "blob_id": "6a931ffce662672dd2b25f35ea791d6fc04a2fae", "content_id": "8b6cc241a4151aba0d6fc31a33dfbc7f2add7687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34675, "license_type": "no_license", "max_line_length": 80, "num_lines": 854, "path": "/serverstatus/assets/services.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "import os\nimport logging\nimport urllib2\nimport urlparse\nfrom collections import OrderedDict\nfrom operator import itemgetter\nfrom time import localtime, strftime\nimport datetime\nfrom cStringIO import StringIO\n\nfrom PIL import Image, ImageOps\nimport libsonic\nimport xmltodict\n\nfrom serverstatus import app\nimport serverstatus.assets.exceptions as exceptions\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Service(object):\n def __init__(self, service_config):\n assert isinstance(service_config, dict)\n self.logger = LOGGER\n self.logger.debug(\n '{} class initialized'.format(self.__class__.__name__))\n self.service_config = service_config\n self._services_status_mapping = self._status_mappings_dict()\n self._service_name = None\n self._connect_status = None\n self._server_full_url = None\n self._resolved_status_mapping = dict()\n self._temp_img_dir = app.config.get('TEMP_IMAGES', '/tmp')\n\n @property\n def service_name(self):\n return self._service_name\n\n @property\n def status_mapping(self):\n self._resolved_status_mapping = self._map_connection_status()\n return self._resolved_status_mapping\n\n @property\n def connection_status(self):\n self._connect_status = self._test_server_connection()\n return self._connect_status\n\n @property\n def server_full_url(self):\n return self._server_full_url\n\n @property\n def external_url(self):\n return self._get_config_attrib('external_url')\n\n @staticmethod\n def convert_date_fmt(date_str, fmt_str_in, fmt_str_out):\n dt_value = datetime.datetime.strptime(date_str, fmt_str_in)\n return dt_value.strftime(fmt_str_out)\n\n def _test_server_connection(self):\n # method to be overridden by subclasses\n return\n\n def _map_connection_status(self):\n service_name = self._service_name\n output = {service_name: dict()}\n try:\n output = {service_name: self._services_status_mapping[\n str(self.connection_status)]}\n output[service_name][\n 'title'] = self._add_service_name_to_status_mapping()\n if self.external_url:\n output[service_name]['external_url'] = self.external_url\n except KeyError:\n pass\n return output\n\n def _add_service_name_to_status_mapping(self):\n delim = '-'\n service_name = self._service_name\n if delim in service_name:\n title = service_name.split(delim)\n title = ' '.join([w.title() for w in title])\n else:\n title = service_name.title()\n return title\n\n def _get_config_attrib(self, attrib):\n try:\n return self.service_config[attrib]\n except KeyError:\n # Config attribute not found\n return None\n\n @staticmethod\n def _status_mappings_dict():\n return dict(\n False=dict(\n text='Offline',\n icon='icon-off icon-white',\n css_class='btn_mod btn btn-xs btn-danger',\n ),\n True=dict(\n text='Online',\n icon='icon-ok icon-white',\n css_class='btn_mod btn btn-xs btn-success',\n ),\n ServerSyncActive=dict(\n text='Online',\n icon='icon-download icon-white',\n css_class='btn_mod btn btn-xs btn-success',\n ),\n BackupServerActive=dict(\n text='Active',\n icon='icon-upload icon-white',\n css_class='btn_mod btn btn-xs btn-success',\n ),\n Waiting=dict(\n text='Pending',\n icon='icon-pause icon-white',\n css_class='btn_mod btn btn-xs btn-warning',\n )\n )\n\n def _log_warning_for_missing_config_value(self, cls_name, config_val,\n default):\n # Log warning that config value for plex is missing from config file.\n # Using default value instead\n self.logger.warning(\n ('{config_val} missing from config value for {cls_name}. '\n 'Using {default} instead').\n format(cls_name=cls_name, default=default, config_val=config_val))\n\n @staticmethod\n def _convert_xml_to_json(resp_output):\n return xmltodict.parse(resp_output)\n\n @staticmethod\n def _build_external_img_path(service_name):\n base_path = 'img/'\n return ''.join([base_path, service_name, '?'])\n\n def _test_file_path(self, file_path_key):\n # //TODO Needed\n output = None\n try:\n file_path = self.service_config[file_path_key]\n if os.path.exists(file_path):\n output = file_path\n except KeyError as err:\n self.logger.error(err)\n finally:\n return output\n\n\nclass SubSonic(Service):\n def __init__(self, server_info):\n Service.__init__(self, server_info)\n self._service_name = 'subsonic'\n self.conn = libsonic.Connection(baseUrl=self.service_config['url'],\n username=self.service_config['user'],\n password=self.service_config[\n 'password'],\n port=self.service_config['port'],\n appName=self.service_config['appname'],\n apiVersion=self.service_config['api'],\n serverPath=self.service_config[\n 'serverpath'])\n self._connect_status = self._test_server_connection()\n self._server_full_url = self._get_server_full_url()\n self._resolved_status_mapping = self._map_connection_status()\n self._img_base_url = self._build_external_img_path(\n self._service_name) + 'cover='\n\n def recently_added(self, num_results=None):\n \"\"\"\n Returns recently added entries.\n\n :param num_results: number of recently added results to return\n :type num_results: int\n :return: list of [dict]\n \"\"\"\n\n def recently_added_generator(num):\n recently_added = self.conn.getAlbumList(\"newest\", num)['albumList'][\n 'album']\n for album in recently_added:\n yield album\n return\n\n if num_results is None:\n num_results = 10\n return [self._get_entry_info(entry, min_size=145, max_size=500) for\n entry in recently_added_generator(num_results)]\n\n def get_cover_art(self, cover_art_id, size=None):\n assert isinstance(cover_art_id, int)\n if any([size is None, size <= 0, type(size) is not int]):\n return self.conn.getCoverArt(aid=cover_art_id)\n else:\n if size > 2000:\n # set max limit on size of photo returned\n size = 2000\n return self.conn.getCoverArt(aid=cover_art_id, size=size)\n\n def now_playing(self):\n \"\"\"\n Returns now playing entries from Subsonic server in list format. Each\n entry in list represents one song currently playing from server. Each\n entry in list is a dict\n\n :returns: list of [dict]\n \"\"\"\n entries = []\n now_playing = self.conn.getNowPlaying()\n try:\n many_songs_playing = isinstance(now_playing['nowPlaying']['entry'],\n list)\n except (KeyError, TypeError):\n # no songs playing\n return None\n if many_songs_playing:\n # multiple songs playing\n entries = [self._get_entry_info(entry) for entry in\n now_playing['nowPlaying']['entry']]\n elif not many_songs_playing:\n # single song playing\n entries.append(\n self._get_entry_info(now_playing['nowPlaying']['entry']))\n # remove entries from now playing if user hasn't touched them or\n # playlist auto advanced in X min\n results = [self._get_entry_info(entry, max_size=800) for entry in\n entries if entry['minutesAgo'] <= 10]\n if results:\n return results\n else:\n return None\n\n def set_output_directory(self, directory):\n # //TODO remove extraneous code\n self._temp_img_dir = directory\n return self._temp_img_dir == directory\n\n def _test_server_connection(self):\n \"\"\"\n Test if we're able to connect to Subsonic server.\n\n :return: bool - True if able to connect, false otherwise\n :raise: exceptions.SubsonicConnectionError\n \"\"\"\n connection_status = False\n try:\n connection_status = self.conn.ping()\n assert connection_status\n except AssertionError:\n err = 'Unable to reach Subsonic server'\n self.logger.error(err)\n # raise exceptions.SubsonicConnectionError(err)\n finally:\n return connection_status\n\n def _create_cover_art_file(self, cover_art_id, size=None):\n \"\"\"\n size in get_cover_art method for subsonic returns a square image with\n dimensions in pixels equal to size\n :param cover_art_id:\n :return:\n \"\"\"\n # set default image size in pixels\n if size is None:\n size = 600\n img_data = self.conn.getCoverArt(aid=cover_art_id, size=size)\n cover_dir = self._temp_img_dir # temp storage for created image files\n filename = 'cover'\n ext = '.jpg'\n short_filepath = filename + str(cover_art_id) + '_' + str(size) + ext\n full_filepath = os.path.join(cover_dir, short_filepath)\n if not os.path.exists(cover_dir):\n # check if filepath exists. Attempt to create if it doesn't\n try:\n os.mkdir(cover_dir)\n except IOError:\n self.logger.error(\n 'Failed to create cover art directory: {}'.format(\n full_filepath))\n return\n if not os.path.isfile(full_filepath):\n self.logger.info('Write cover art file: {}'.format(full_filepath))\n with open(full_filepath, 'wb') as img_file:\n img_file.write(img_data.read())\n return full_filepath\n\n def _get_entry_info(self, entry, min_size=None, max_size=None):\n \"\"\"\n appends URL coverart link to Subsonic entry dict\n :param entry: subsonic entry\n :type entry: dict\n :return: dict\n \"\"\"\n assert type(entry) == dict\n if min_size:\n min_size = 145\n if max_size:\n max_size = 1200\n # create url link to thumbnail coverart, and full-size coverart\n cover_art_link = [''.join([self._img_base_url,\n str(entry.get('coverArt', entry['id'])),\n '&size=',\n str(size)]) for size in (min_size, max_size)]\n entry.update(coverArtExternalLink_sm=cover_art_link[0],\n coverArtExternalLink_xl=cover_art_link[1])\n try:\n created_date = self.convert_date_fmt(entry[u'created'],\n '%Y-%m-%dT%H:%M:%S.%fZ',\n '%m/%d/%Y %I:%M%p')\n except ValueError as dt_conv_err:\n self.logger.error('Error converting date: {}'.format(dt_conv_err))\n else:\n entry[u'created'] = created_date\n try:\n # Return progress on currently playing song(s). No good way to do\n # this since Subsonic doesn't have access to this info through\n # it's API. Calculate progress by taking last time\n # song was accessed divide by progress\n entry['progress'] = min(\n float(entry['minutesAgo']) / float(entry['duration'] / 60), 1)\n except KeyError:\n entry['progress'] = 1\n finally:\n entry.update(progress_pct='{:.2%}'.format(entry['progress']),\n progress=entry['progress'] * 100)\n return entry\n\n def _get_server_full_url(self):\n serverpath = self.service_config['serverpath'].strip('/')\n try:\n serverpath, _ = serverpath.split('/')\n except ValueError as err:\n self.logger.warning(\n 'Issue parsing Subsonic server path: {}'.format(err))\n return '{url}:{port:d}/{path}'.format(url=self.service_config['url'],\n port=self.service_config['port'],\n path=serverpath)\n\n\nclass CheckCrashPlan(Service):\n def __init__(self, server_info):\n Service.__init__(self, server_info)\n self._service_name = 'backups'\n self.file_path = self._test_file_path('logfile_path')\n self._connect_status = self._test_server_connection()\n self._resolved_status_mapping = self._map_connection_status()\n\n def _test_server_connection(self):\n items_to_keep = ['scanning', 'backupenabled']\n with open(self.file_path, 'r') as log_file:\n items = [line.lower().split() for line in log_file.readlines()\n for x in items_to_keep if x in line.lower()]\n # remove \"=\" from list\n for item in items:\n item.remove('=')\n items_values = [True if item[1] == 'true' else False for item in items]\n if all(items_values):\n return 'BackupServerActive'\n elif any(items_values):\n return 'Waiting'\n else:\n return False\n\n\nclass ServerSync(Service):\n def __init__(self, server_info):\n Service.__init__(self, server_info)\n self.server_info = server_info\n self.lockfile_path = self.server_info.get('lockfile_path', None)\n self._service_name = 'server-sync'\n self._connect_status = self._test_server_connection()\n self._resolved_status_mapping = self._map_connection_status()\n\n def _test_server_connection(self):\n try:\n return os.path.exists(self.lockfile_path)\n except TypeError:\n self.logger.debug('Server Sync Lockfile does not exist at {}'.\n format(self.lockfile_path))\n return False\n\n\nclass Plex(Service):\n \"\"\"\n Note: Plex requires a PlexPass for access to the server API. Plex won't\n allow you to connect to API otherwise\n\n Provides media metadata information from Plex\n \"\"\"\n url_scheme = 'http://'\n\n def __init__(self, server_config):\n Service.__init__(self, server_config)\n assert type(server_config) is dict\n self.service_config = server_config\n self._service_name = 'plex'\n self.server_internal_url_and_port = self._get_full_url_and_port\n try:\n self._server_full_url = server_config['external_url']\n except KeyError as err:\n self.logger.error(\n 'Missing config value {config_value} from {cls}'.format(\n config_value='external_url',\n cls=self.__class__.__name__))\n raise exceptions.MissingConfigValue(err)\n self._connect_status = self._test_server_connection()\n self._resolved_status_mapping = self._map_connection_status()\n self._transcodes = 0\n self._cover_mapping = dict()\n self._img_base_url = self._build_external_img_path(self._service_name)\n\n def recently_added(self, num_results=None):\n \"\"\"\n\n :type num_results: int or unknown\n :return: dict of [lists]\n \"\"\"\n\n def process_video_data(videos):\n # sort the recently added list by date in descending order\n videos = sorted(videos, key=itemgetter('@addedAt'), reverse=True)\n # trim the list to the number of results we want\n videos_trimmed = videos[:num_results]\n return [self._get_video_data(video) for video in videos_trimmed]\n\n if not self._connect_status:\n return None\n if any([num_results is None, type(num_results) is not int]):\n # Check if correct for maximum number of results is entered\n # if not set default\n num_results = 6\n api_call = 'recentlyadded'\n json_data = self._get_xml_convert_to_json(api_call)\n # the media value we want are contained in lists so loop through the\n # MediaContainer, find the lists of data, and return each value in\n # the lists. The lists contain Movies and Shows separately.\n\n movies = [media for value in json_data['MediaContainer'] if\n type(json_data['MediaContainer'][value]) == list for\n media in json_data['MediaContainer'][value] if\n media['@type'] != 'season']\n tv_shows = [media for value in json_data['MediaContainer'] if\n type(json_data['MediaContainer'][value]) == list for\n media in json_data['MediaContainer'][value] if\n media['@type'] == 'season']\n # remove extra data\n del json_data\n return dict(Movies=process_video_data(movies),\n TVShows=process_video_data(tv_shows))\n\n def now_playing(self):\n \"\"\"\n Returns now playing data from Plex server in a JSON-like dictionary\n\n :return: dict()\n \"\"\"\n\n def generate_video_data(vid_data, api_call=None):\n \"\"\"\n Generator function for creating relevant video data. Takes JSON\n data, checks if is data is an OrderedDict\n then grabs the relevant data if the video is a TV show or Movie.\n \"\"\"\n # In JSON form Plex returns multiple videos as a list of\n # OrderedDicts, and a single video as an OrderedDict\n # Convert the single video to a list for processing\n if isinstance(vid_data, OrderedDict):\n video_list = list()\n video_list.append(vid_data)\n elif isinstance(vid_data, list):\n video_list = vid_data\n else:\n # Plex returned data that we haven't seen before.\n # Raise exception to warn user.\n msg = (\n 'Plex returned API data that does not match to known '\n 'standards.Plex return data as {} when it should return a '\n 'list or OrderedDict').format(type(vid_data))\n self.logger.error(msg)\n raise exceptions.PlexAPIDataError(msg)\n for video in video_list:\n # Grab relevant data about Video from JSON data, send the API\n # call to calculate _transcodes, otherwise it will skip and\n # return 0\n yield self._get_video_data(video, api_call)\n return\n\n self._transcodes = 0 # reset serverinfo count\n api_call = 'nowplaying'\n now_playing_relevant_data = list()\n json_data = self._get_xml_convert_to_json(api_call)\n if not int(json_data['MediaContainer']['@size']):\n # Nothing is currently playing in plex\n return None\n for vid in generate_video_data(json_data['MediaContainer']['Video'],\n api_call):\n now_playing_relevant_data.append(vid)\n return now_playing_relevant_data\n\n def get_cover_image(self, plex_id, thumbnail=None, local=None):\n \"\"\"\n Returns binary jpeg object for Plex item found local temp directory as\n set in config file. Checks request argument against mapped value from\n Plex item ID\n\n :param plex_id: metadata coverart ID that corresponds to mapping\n dictionary\n :type plex_id: str\n :param thumbnail: boolean values that tells us to return thumbnail\n image if True. Returns full scale image if False\n :type thumbnail: bool or NoneType\n :param local: boolean value that tells us to pull image from Plex\n server or return local copy\n :type local: bool or NoneType\n :return: binary\n :raises: exceptions.PlexImageError\n \"\"\"\n\n def open_image(ext):\n try:\n return open(os.path.join(self._temp_img_dir, plex_id + ext),\n 'rb')\n except IOError as img_err:\n raise exceptions.PlexImageError(img_err)\n\n thumbnail = thumbnail is not None\n local = local is not None\n if self._cover_mapping is None:\n # if _cover_mapping is empty we need to initialize Now Playing\n self.now_playing()\n if thumbnail:\n resp = open_image('.thumbnail')\n elif local:\n resp = open_image('.jpg')\n else:\n try:\n resp = urllib2.urlopen(\n urlparse.urljoin(self.server_internal_url_and_port,\n self._cover_mapping[plex_id]))\n except (TypeError, urllib2.HTTPError) as err:\n raise exceptions.PlexImageError(err)\n return resp\n\n @property\n def transcodes(self):\n \"\"\"\n Returns number of current number of Plex transcode sessions\n\n >>> 0\n >>> 1\n\n :return: int\n \"\"\"\n server_info = self.plex_server_info()\n self._transcodes = server_info.get('transcoderActiveVideoSessions', 0)\n return self._transcodes\n\n def plex_server_info(self):\n json_show_data = self._get_xml_convert_to_json('serverinfo')\n server_data = json_show_data.get('MediaContainer', None)\n data_dict = {str(key.strip('@')): server_data[key] for key in\n server_data if type(server_data[key]) is unicode or\n type(server_data[key]) is str}\n for key in data_dict:\n try:\n data_dict[key] = int(data_dict[key])\n except ValueError:\n if ',' in data_dict[key]:\n split_values = data_dict[key].split(',')\n data_dict[key] = [int(val) for val in split_values]\n return data_dict\n\n def _test_server_connection(self):\n \"\"\"\n Test if connection to Plex is active or not\n >>> True\n >>> False\n :return: bool\n \"\"\"\n resp = None\n try:\n if self.service_config['local_network_auth']:\n # local network authentication required\n # // TODO Need to complete code for authorization if necessary\n pass\n except KeyError:\n pass\n resp = self._get_plex_api_data('serverinfo')\n is_connectable = resp is not None\n if not is_connectable:\n self.logger.error('Could not connect to Plex server')\n return is_connectable\n\n def _get_api_url_suffix(self, url_suffix):\n \"\"\"\n https://code.google.com/p/plex-api/wiki/PlexWebAPIOverview\n contains information required Plex HTTP APIs\n\n serverinfo: Transcode bitrateinfo, myPlexauthentication info\n nowplaying: This will retrieve the \"Now Playing\" Information of the PMS.\n librarysections: Contains all of the sections on the PMS. This acts as\n a directory and you are able to \"walk\" through it.\n prefs: Gets the server preferences\n servers: get the local List of servers\n ondeck: Show ondeck list\n channels_all: Returns all channels installed in Plex Server\n channels_recentlyviewed: Get listing of recently viewed channels\n recentlyadded: Gets listing of recently added media, in descending\n order by date added\n metadata: Returns metadata from media, e.g. /library/metadata/<val>\n when <val> is an integer tied to a specific episode or movie\n\n >>> '/library/recentlyAdded'\n\n :param the_data_were_looking_for:\n :return:\n \"\"\"\n url_api_mapping = dict(\n serverinfo='/',\n nowplaying='/status/sessions',\n librarysections='/library/sections',\n prefs='/:/prefs',\n servers='/servers',\n ondeck='/library/onDeck',\n channels_all='/channels/all',\n recentlyadded='/library/recentlyAdded',\n metadata='/library/metadata/'\n )\n try:\n results = url_api_mapping[url_suffix]\n except KeyError as err:\n self.logger.error(err)\n raise exceptions.PlexAPIKeyNotFound(err)\n return results\n\n @property\n def _get_full_url_and_port(self):\n \"\"\"\n builds out internal url with port\n\n >>> 'http://localhost:32400'\n >>> 'http://192.168.0.1:32400'\n\n :return: str\n \"\"\"\n port = str(self.service_config.get('internal_port', '32400'))\n if port != self.service_config.get('internal_port') or str(port) != \\\n self.service_config.get('internal_port'):\n self._log_warning_for_missing_config_value(\n cls_name=self.__class__.__name__, default=port,\n config_val='port')\n try:\n internal_url = self.service_config['internal_url'].replace(\n Plex.url_scheme, '').lstrip('/')\n except KeyError:\n internal_url = 'localhost'\n self._log_warning_for_missing_config_value(\n cls_name=self.__class__.__name__,\n default=internal_url, config_val='internal_url')\n return ''.join([Plex.url_scheme, internal_url, ':', port])\n\n def _get_plex_api_data(self, api_call, api_suffix=None):\n \"\"\"\n Call plex api, and return XML data\n\n For /status/sessions:\n >>> '<MediaContainer size=\"0\"></MediaContainer>'\n\n :param api_call:\n :return: str\n :raises: exceptions.PlexConnectionError\n \"\"\"\n if api_suffix is None:\n # no extra api call for this\n api_suffix = ''\n try:\n full_api_call = ''.join(\n [self._get_api_url_suffix(api_call), api_suffix])\n resp = urllib2.urlopen(\n urlparse.urljoin(self.server_internal_url_and_port,\n full_api_call))\n output = resp.read()\n except urllib2.URLError as err:\n self.logger.error('Error connecting to Plex')\n raise exceptions.PlexConnectionError(err)\n else:\n resp.close()\n return output\n\n def _get_xml_convert_to_json(self, api_key, api_suffix=None):\n \"\"\"\n Gets Plex data based on api key and converts Plex XML response to JSON\n format\n\n :type api_key: str\n :type api_suffix: unknown or str\n :return:\n \"\"\"\n xml_data = self._get_plex_api_data(api_key, api_suffix)\n return self._convert_xml_to_json(xml_data)\n\n def _get_video_data(self, video, get_type=None):\n is_now_playing = get_type == 'nowplaying'\n # need a separate dict for section mapping since Plex returns different\n # data for Now Playing and Recently Added\n library_section_mapping = {'1': 'Movies', '2': 'TV Shows'}\n # need a separate dict for section mapping since Plex returns different\n # data for Now Playing and Recently Added\n # all the video.gets below are to handle the different mappings\n # Plex sends for Now Playing/Recently Added.\n vidtype = video.get('@librarySectionTitle',\n library_section_mapping.get(\n video.get('@librarySectionID', 0)))\n if vidtype == 'TV Shows':\n video_data = self._get_tv_show_data(video, get_type)\n elif vidtype == 'Movies':\n release_date = video['@originallyAvailableAt']\n video_data = dict(showtitle=video['@title'],\n summary=video['@summary'],\n releasedate=self.convert_date_fmt(release_date,\n '%Y-%m-%d',\n '%m/%d/%Y'))\n else:\n # encountered an unexpected video type\n msg = 'Unexpected media type {} encountered'.format(vidtype)\n self.logger.error(msg)\n raise exceptions.PlexAPIDataError(msg)\n if is_now_playing:\n # only applicable if we want to retrieve now playing data from Plex\n plex_path_to_art = video.get('@grandparentThumb', video['@thumb'])\n try:\n # this is only relevant for videos that are currently playing\n video_data['progress'] = (float(video['@viewOffset']) / float(\n video['@duration'])) * 100.0\n except KeyError:\n # video's not playing - not an issue\n video_data['progress'] = 0\n # add common elements to video dict\n else:\n plex_path_to_art = video['@thumb']\n self._save_cover_art(self.server_internal_url_and_port +\n plex_path_to_art)\n arturlmapped_value = os.path.basename(plex_path_to_art)\n video_data.update(type=vidtype,\n art_external_url=''.join([self._img_base_url,\n arturlmapped_value]),\n added_at=strftime('%m/%d/%Y %I:%M %p',\n localtime(int(video['@addedAt']))))\n # converts direct plex http link to thumbnail to internal mapping\n # security through obfuscation /s\n self._cover_mapping[arturlmapped_value] = plex_path_to_art\n video_data['rating'] = float(video.get('@rating', 0))\n return video_data\n\n def _save_cover_art(self, cover_loc):\n # retrieve image data from Plex server metadata\n img_data = StringIO(urllib2.urlopen(\n urlparse.urljoin(self.server_internal_url_and_port,\n cover_loc)).read())\n # check if temp directory exists, if not attempt to create directory\n if not os.path.exists(self._temp_img_dir):\n try:\n os.mkdir(self._temp_img_dir)\n self.logger.info('Creating temporary image directory {}'.\n format(self._temp_img_dir))\n except OSError as err:\n self.logger.error(('Failure creating temporary image directory'\n ' {}.\\nError message {}').format(\n self._temp_img_dir, err))\n raise\n img = Image.open(img_data)\n exts = ('.jpg', '.thumbnail')\n sizes = [(568, 852), (144, 214)]\n # create filepaths to temp images in temp directory\n img_filepaths = [os.path.join(self._temp_img_dir, ''.join(\n [str(cover_loc.split('/')[-1]), ext])) for ext in exts]\n # index 0 = size tuple\n # index 1 = path to file\n size_and_fps = zip(sizes, img_filepaths)\n for img_file in size_and_fps:\n # preserve original file for multiple manipulations\n temp_img = img.copy()\n size = img_file[0]\n filepath = img_file[1]\n if not os.path.exists(filepath):\n # create plex cover art file if file does not exist\n try:\n temp_img = ImageOps.fit(image=temp_img, size=size,\n method=Image.ANTIALIAS)\n temp_img.save(filepath, \"JPEG\")\n self.logger.info(\n 'Write image file: {}'.format(filepath))\n except IOError as pil_err:\n self.logger.error(\n 'Image file write failure at {}. Reason: {}'.\n format(filepath, pil_err))\n else:\n self.logger.debug('Image file already exists at: {}'.\n format(filepath))\n return img_filepaths[0]\n\n def _get_tv_show_data(self, video, get_type=None):\n is_now_playing = get_type == 'nowplaying'\n video_data = dict(showtitle=\n video.get('@parentTitle',\n video.get('@grandparentTitle')),\n episode_number=int(video.get('@leafCount',\n video.get('@index'))),\n summary=video.get('@parentSummary',\n video.get('@summary'))\n if video['@summary'] != '' else 'Not available',\n season=video['@title'] if\n video['@title'].lower() == 'specials'\n else int(video['@title'].lstrip('Season ')) if not\n is_now_playing else int(video['@parentIndex']))\n if isinstance(video_data['season'], int):\n video_data['season'] = '{0:02d}'.format(video_data['season'])\n if not is_now_playing:\n json_show_data = self._get_xml_convert_to_json('serverinfo',\n video['@key'].\n lstrip('/'))\n video = json_show_data['MediaContainer']\n video_data.update(rating=video.get('@grandparentContentRating', ''),\n studio=video['@grandparentStudio'])\n try:\n # if there's more than one episode in the season\n video = video['Video'][\n int(video_data['episode_number']) - 1]\n except KeyError:\n # first show in season\n video = video['Video']\n # get originally date playing on TV\n try:\n aired_date = video['@originallyAvailableAt']\n aired_date = self.convert_date_fmt(aired_date, \"%Y-%m-%d\",\n \"%m/%d/%Y\")\n except KeyError:\n aired_date = 'Not available'\n video_data.update(title=video['@title'], aired_date=aired_date)\n\n # Set individual show summary to parent summary if show summary does\n # not exist\n if video['@summary'] != '':\n video_data['summary'] = video['@summary']\n return video_data\n" }, { "alpha_fraction": 0.6156351566314697, "alphanum_fraction": 0.6188924908638, "avg_line_length": 33.13888931274414, "blob_id": "162c8da2be6af14f740776746536ce3f6e2824e8", "content_id": "e6fbf2ba73122d4cbb62bcb18f46cbe70bfb6349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 94, "num_lines": 36, "path": "/setup.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nreadme_file = 'README.md'\nreadme_file_full_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), readme_file)\nwith open(readme_file_full_path, 'r') as f:\n readme_contents = f.read()\nif not readme_contents:\n readme_contents = ''\n\nsetup(name='server-status',\n version='0.0.1',\n author='David Beall',\n author_email='[email protected]',\n url='http://ww.beallio.com',\n description='Server Status',\n long_description='{}'.format(readme_contents),\n packages=['serverstatus'],\n package_dir={'serverstatus': 'serverstatus'},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: System',\n ])" }, { "alpha_fraction": 0.5667939186096191, "alphanum_fraction": 0.5687022805213928, "avg_line_length": 23.395349502563477, "blob_id": "ccb08695a52f2fa560b09365fb1637e59bcd4653", "content_id": "0ccdc40b87970f030b22c9559fc7840d452d1e3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 68, "num_lines": 43, "path": "/serverstatus/assets/wrappers.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nfunction wrappers module\n\"\"\"\n\nimport logging\nfrom inspect import stack, getmodule\n\n\ndef logger(log_type):\n \"\"\"\n decorator to log output of functions\n :param log_type: logger level as string (debug, warn, info, etc)\n :type log_type: str\n \"\"\"\n def log_decorator(func):\n \"\"\"\n wrapped function\n \"\"\"\n def wrapped(*args, **kwargs):\n # preserve calling module name for LOGGER\n frm = stack()[1]\n mod = getmodule(frm[0])\n wrapped_logger = logging.getLogger(mod.__name__)\n result = func(*args, **kwargs)\n try:\n getattr(wrapped_logger, log_type)(result)\n except AttributeError as err:\n wrapped_logger.error(err)\n return result\n\n return wrapped\n\n return log_decorator\n\n\ndef log_args(function):\n \"\"\"\n Logs arguments passed to function\n \"\"\"\n def wrapper(*args, **kwargs):\n print 'Arguments:', args, kwargs\n return function(*args, **kwargs)\n return wrapper" }, { "alpha_fraction": 0.49355217814445496, "alphanum_fraction": 0.5064478516578674, "avg_line_length": 43.894737243652344, "blob_id": "9f8ec88ff5f9f77ce5bf63ecfd596edb736829c5", "content_id": "cbd1a9ad9c65d43a32a5e96cf91f04e24026ea61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5120, "license_type": "no_license", "max_line_length": 106, "num_lines": 114, "path": "/serverstatus/assets/weather.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom collections import namedtuple\nfrom time import localtime, strftime\nimport logging\n\nimport forecastio\n\nfrom serverstatus.assets.exceptions import MissingForecastIOKey\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Forecast(object):\n def __init__(self, weather_config):\n assert type(weather_config) is dict\n self.logger = LOGGER\n self.logger.debug(\n '{} class initialized'.format(self.__class__.__name__))\n self.forcastio_link_url = 'http://forecast.io/#/f/'\n try:\n self.api_key = weather_config['Forecast_io_API_key']\n except KeyError:\n raise MissingForecastIOKey('No ForecastIO API key found. API key required for weather data')\n # default weather to Stanford, CA and US units\n self.lat = weather_config.get('Latitude', 37.4225)\n self.lng = weather_config.get('Longitude', 122.1653)\n self.units = weather_config.get('units', 'us')\n self.forecast = self._get_forecast_io()\n\n def get_data(self):\n json = self.forecast.json\n current = json['currently']\n hourly = json['hourly']\n minutely = json['minutely']\n daily = json['daily']['data'][0]\n output = dict(current_summary=current['summary'],\n current_summary_icon=self._get_weather_icons(current['icon']),\n current_temp=u'{:0.0f}°'.format(round(current['temperature'], 0)),\n feels_like_temp=u'{:0.0f}°'.format(round(current['apparentTemperature'], 0)),\n current_windspeed='{:0.0f}'.format(round(current['windSpeed'], 0)),\n minutely_summary=minutely['summary'],\n hourly_summary=hourly['summary'],\n sunset=self._convert_time_to_text(daily['sunsetTime']),\n sunrise=self._convert_time_to_text(daily['sunriseTime']),\n url_link='{url}{lat},{lng}'.format(\n url=self.forcastio_link_url,\n lat=self.lat, lng=self.lng))\n if output['current_windspeed'] != 0:\n output['current_windbearing'] = self._get_wind_bearing_text(current['windBearing'])\n return output\n\n def reload_data(self):\n self.forecast.update()\n\n def _get_forecast_io(self):\n return forecastio.load_forecast(self.api_key, self.lat, self.lng,\n units=self.units)\n\n @staticmethod\n def _get_weather_icons(weather_icon):\n assert type(weather_icon) is unicode\n weather_icon = weather_icon.replace(\"-\", \"_\")\n weather_mappings = dict(clear_day='B',\n clear_night='C',\n rain='R',\n snow='W',\n sleet='X',\n wind='F',\n fog='L',\n cloudy='N',\n partly_cloudy_day='H',\n partly_cloudy_night='I')\n assert weather_icon in weather_mappings\n return weather_mappings[weather_icon]\n\n @staticmethod\n def _get_wind_bearing_text(degrees):\n # normalize windbearing so N starts at 0 degrees\n deg_norm = (float(degrees) + 11.25) / 22.5\n # convert range of windbearing degrees to lookup patterns\n deg_norm_lookup = int(deg_norm) + int((deg_norm // 1) > 0)\n direction_mappings = {1: ('North', 'N'),\n 2: ('North-northeast', 'NNE'),\n 3: ('Northeast', 'NE'),\n 4: ('East-northeast', 'ENE'),\n 5: ('East', 'E'),\n 6: ('East-southeast', 'ESE'),\n 7: ('Southeast', 'SE'),\n 8: ('South-southeast', 'SSE'),\n 9: ('South', 'S'),\n 10: ('South-southwest', 'SSW'),\n 11: ('Southwest', 'SW'),\n 12: ('West-southwest', 'WSW'),\n 13: ('West', 'W'),\n 14: ('West-northwest', 'WNW'),\n 15: ('Northwest', 'NW'),\n 16: ('North-northwest', 'NNW')}\n try:\n bearing_text = direction_mappings[int(deg_norm_lookup)]\n except KeyError:\n # Key values exceeds max in dictionary, which means it's blowing North\n bearing_text = direction_mappings[1]\n # output namedtuple for Cardinal direction, and abbrevation text\n return namedtuple(typename='bearing_text', field_names=['cardinal', 'abbrev'])._make(bearing_text)\n\n @staticmethod\n def _convert_time_to_text(time_var):\n assert type(time_var) is int\n time_var = strftime('%I:%M %p', localtime(time_var))\n # Remove '0' values from time if less than 10hrs or 10mins\n if time_var.startswith('0'):\n time_var = time_var[1:]\n return time_var\n" }, { "alpha_fraction": 0.7539432048797607, "alphanum_fraction": 0.7570977807044983, "avg_line_length": 36.235294342041016, "blob_id": "eed5b3839bec997e087fb490b153d8a5fdcd44d8", "content_id": "05099ee7654eb0dc456546bd239f64166fe1e260", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 78, "num_lines": 17, "path": "/wsgi.py", "repo_name": "beallio/media-server-status", "src_encoding": "UTF-8", "text": "\"\"\"\nAPACHE MOD_WSGI Load script\nSome of the variables in this file may need to be adjusted depending on\nserver setup and/or location of virtual environment and application\n\"\"\"\nimport sys\nimport os\n\nPROJECT_DIR = '/var/www/status' # change to the root of your app\n# 'venv/bin' is the location of the project's virtual environment\nVIRTUAL_ENV_DIR = 'venv/bin'\nPACKAGES = 'lib/python2.7/site-packages'\n\nactivate_this = os.path.join(PROJECT_DIR, VIRTUAL_ENV_DIR, 'activate_this.py')\nexecfile(activate_this, dict(__file__=activate_this))\nsys.path.append(PROJECT_DIR)\nsys.path.append(os.path.join(PROJECT_DIR, VIRTUAL_ENV_DIR, PACKAGES))\n\n" } ]
16
cnu-cse-datacom/2-packetcapture-201502038
https://github.com/cnu-cse-datacom/2-packetcapture-201502038
bbd25c2802ddde56b72d581111e255f93663c69a
271a219eddefdd1a67ef91e0b115c3949e7b7247
6e0116ff609c2b31eda5718bd141852394f0e6f2
refs/heads/master
2020-04-29T01:52:45.914054
2019-03-23T09:42:56
2019-03-23T09:42:56
175,744,966
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5444238185882568, "alphanum_fraction": 0.5891568660736084, "avg_line_length": 33.650001525878906, "blob_id": "b3a582f7d0f04506dcd8e46edb9ef903c8e3daa2", "content_id": "e571a73c3fc3fe632e21eeb4bdc70ffdde65be1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4851, "license_type": "no_license", "max_line_length": 90, "num_lines": 140, "path": "/DC02_02_201502038_KimJeongWoo.py", "repo_name": "cnu-cse-datacom/2-packetcapture-201502038", "src_encoding": "UTF-8", "text": "import socket\nimport struct\n\ndef parsing_ip_header(data):\n ip_header = struct.unpack(\"!6c6c2cccHHHccHccccccccHHIIHHHH\",data)\n ether_src = convert_ip_address(ip_header[0:6])\n ether_dest = convert_ip_address(ip_header[6:12])\n ip_head = \"0x\"+convert_ip_address(ip_header[12:13])\n ip_version = int(convert_ip_address(ip_header[14:15]),16) >> 4\n ip_HLEN = int(convert_ip_address(ip_header[14:15]),16) & 0xf\n differentiated_service_codepoint = int(convert_ip_address(ip_header[15:16]),16) >> 2 \n Explicit_Congestion_Notification = int(convert_ip_address(ip_header[15:16]),16) & 0x11\n total_length = convert_ip_int(ip_header[16:17])\n Identification = convert_ip_int(ip_header[17:18])\n flag = convert_hex(ip_header[18:19])\n Reserved_bit = (int(flag,16) >> 15) & 0x1\n not_fragment = (int(flag,16) >> 14) & 0x1\n fragments = (int(flag,16) >> 13) & 0x1\n fragments_offset = int(flag,16) & 0x1fff\n Time_to_live = convert_ip_address(ip_header[19:20])\n Protocol = convert_ip_address(ip_header[20:21]) \n Header_checksum = convert_hex(ip_header[21:22])\n source_ip = convert_ip(ip_header[22:26])\n dest_ip = convert_ip(ip_header[26:30])\n #tcp_parsing\n source_port = convert_ip_int(ip_header[30:31])\n dest_port = convert_ip_int(ip_header[31:32])\n sequence_number = convert_ip_int(ip_header[32:33])\n udp_length = sequence_number >> 16\n udp_check = sequence_number & 0xff\n acknowledgment = convert_ip_int(ip_header[33:34])\n header_length = int(convert_hex(ip_header[34:35]),16) >> 12\n _flag = hex(int(convert_hex(ip_header[34:35]),16) & 0xfff)\n Reserved = ((int(_flag,16)) >> 9) & 0x111\n Nonce = ((int(_flag,16)) >> 8) & 0x1\n CWR = ((int(_flag,16)) >> 7) & 0x1\n ECN = ((int(_flag,16)) >> 6) & 0x1\n URG = ((int(_flag,16)) >> 5) & 0x1\n ACK = ((int(_flag,16)) >> 4) & 0x1\n PUSH = ((int(_flag,16)) >> 3) & 0x1\n Reset = ((int(_flag,16)) >> 2) & 0x1\n SYN = ((int(_flag,16)) >> 1) & 0x1\n FIN = int(_flag,16) & 0x1\n window = convert_ip_int(ip_header[35:36])\n checkSum = convert_ip_int(ip_header[36:37])\n urgentpointer = convert_ip_int(ip_header[37:38])\n \n print(\"===============ETH===============\")\n print(\"src_mac_address: \",ether_src)\n print(\"dest_mac_address: \",ether_dest)\n print(\"IP Header: \",ip_head)\n print(\"===============IPH===============\")\n print(\"ip_version: \",ip_version)\n \n print(\"ip_HLEN: \",ip_HLEN)\n \n print(\"differentiated_service_codepoint: \",differentiated_service_codepoint) \n print(\"Explicit_Congestion_Notification: \",Explicit_Congestion_Notification)\n\n print(\"Total Length: \",total_length) \n \n print(\"Identification: \",Identification)\n \n print(\"Flag: \",flag)\n \n print(\"Reserved_bit: \",Reserved_bit)\n print(\"not_fragment: \",not_fragment)\n print(\"fragments: \",fragments)\n print(\"fragments_offset: \",fragments_offset)\n print(\"Time_to_live: \",Time_to_live)\n print(\"Protocol\",int(Protocol,16))\n print(\"Header_checksum\",Header_checksum)\n print(\"source_ip: \",source_ip)\n print(\"dest_ip: \",dest_ip)\n\n if int(Protocol) == 6 :\n print(\"===============tcp===============\")\n print(\"source_poert: \",source_port)\n print(\"dest_port\",dest_port)\n print(\"sequence_number: \",sequence_number)\n print(\"acknowledgment: \",acknowledgment)\n print(\"header_length: \",header_length)\n print(\"Flag: \",_flag)\n print(\"reserved: \",Reserved)\n print(\"Nonce: \",Nonce)\n print(\"CWR: \",CWR)\n print(\"ECN: \",ECN)\n print(\"URG: \",URG)\n print(\"ACK: \",ACK)\n print(\"push: \",PUSH)\n print(\"Reset: \",Reset)\n print(\"SYN: \",SYN)\n print(\"FIN: \",FIN)\n print(\"window: \",window)\n print(\"checkSum: \",checkSum)\n print(\"urgentPointer: \",urgentpointer)\n\n if int(Protocol) == 11 :\n print(\"===============UDP===============\")\n print(\"source_port: \",source_port)\n print(\"dest_port: \",dest_port)\n print(\"UDP length: \",udp_length)\n print(\"UDP Checksum: \",udp_check)\n\ndef convert_ip_address(data):\n ip_addr = list()\n for i in data:\n ip_addr.append(i.hex()) \n ip_addr = \":\".join(ip_addr)\n return ip_addr\n\ndef convert_byte(data):\n byte = list()\n for i in data:\n byte.append(i.hex())\n return byte\n\ndef convert_ip_int(data):\n ip_int = list()\n for i in data:\n ip_int.append(i)\n return ip_int[0]\n\ndef convert_hex(data):\n ip_flag = list()\n for i in data:\n ip_flag.append(i)\n return hex(ip_flag[0])\n\ndef convert_ip(data):\n ip = list()\n for i in data:\n ip.append(int(i.hex(),16))\n return ip\n\nrecv_socket = socket.socket(socket.PF_PACKET,socket.SOCK_RAW,socket.ntohs(0x0800))\n\ndata = recv_socket.recvfrom(65565)\nwhile True:\n\tparsing_ip_header(data[0][0:54])\n" } ]
1
marty-Wallace/FibbonacciServer
https://github.com/marty-Wallace/FibbonacciServer
5eb10dcbde29fc9470fde30b36e82030543ca3f8
bee68eda66535349fcf3b0579a33d1721cf79b34
87efed27c2654d6f5181e4eec7e4c1e2e5ea1764
refs/heads/master
2020-06-20T16:04:39.742452
2017-03-03T07:00:07
2017-03-03T07:00:07
81,610,047
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6461588740348816, "alphanum_fraction": 0.6516926884651184, "avg_line_length": 38.89610290527344, "blob_id": "4c5f704ac0776b10bcb5ddabe68e372f4f959051", "content_id": "69066ad46bca611da030d2efc4c1c6c438aaef36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3072, "license_type": "no_license", "max_line_length": 108, "num_lines": 77, "path": "/Fibonacci/fib_server.py", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler\n\n\nclass FibonacciThreadedTCPServer(ThreadingMixIn, TCPServer):\n \"\"\"\n FibonacciThreadedTCPServer used to serve concurrent TCP requests for a fibonacci\n number. The server holds the lookup table fib_dict shared by each instance of\n FibonacciThreadedTCPRequestHandler to make optimized calculations.\n \"\"\"\n\n def __init__(self, server_address):\n TCPServer.__init__(self, server_address, FibonacciThreadedTCPRequestHandler, bind_and_activate=True)\n self.fib_dict = {0: 0, 1: 1, 2: 1}\n\n\nclass FibonacciThreadedTCPRequestHandler(BaseRequestHandler):\n \"\"\"\n FibonacciThreadedTCPRequestHandler class for our server. One instance will be created to\n serve each request that comes into the server. Must override the handle() method which will\n be called by the server on each new instance for each incoming request\n \"\"\"\n\n def handle(self):\n \"\"\"\n reads in an integer from the incoming socket connection, calculates the fibonacci value of\n that number then returns that value to the socket\n\n :return: None\n \"\"\"\n\n data = self.request.recv(1024).strip()\n print('Serving new request, data=%s' % data)\n try:\n num = int(data)\n if num < 0:\n raise ValueError\n except ValueError:\n self.request.sendall(bytes('Must send a valid number >= 0\\n', 'ascii'))\n return\n\n # calculate the result of fib(num)\n result = self.calc_fib(self.server.fib_dict, num)\n # encode into bytes\n ret = bytes(str(result) + '\\n', 'ascii')\n # return result\n self.request.sendall(ret)\n\n @staticmethod\n def calc_fib(fib_dict, n):\n \"\"\"\n Calculates the fibonacci value of n in an optimized way using a lookup table\n and a linear calculation. Since the fib_table is a dictionary shared between\n multiple threads we can only write to the dict. Any type of read->modify->write\n sequence may be interrupted mid-execution, creating a race condition. If n is in\n the fib_dict we can simply return it, otherwise we can begin calculating each value\n of fib between the current highest value ( which is fib(len(fib_dict)-1) ) and n.\n\n :param fib_dict: the dictionary of fib numbers shared between threads\n :param n: the value of fib to calculate\n :return: fib(n)\n \"\"\"\n length = len(fib_dict)\n while length <= n:\n fib_dict[length] = fib_dict[length - 1] + fib_dict[length - 2]\n length = len(fib_dict)\n return fib_dict[n]\n\n# if module is imported this code won't run\nif __name__ == '__main__':\n # port of 0 will request an open port from the kernel\n HOST, PORT = 'localhost', 0\n\n with FibonacciThreadedTCPServer((HOST, PORT)) as server:\n ip, port = server.server_address\n print(\"Starting FibServer at %s:%d\" % (ip, port))\n print(\"Waiting for fibonacci requests...\")\n server.serve_forever()\n" }, { "alpha_fraction": 0.5699896216392517, "alphanum_fraction": 0.5748847126960754, "avg_line_length": 36.273685455322266, "blob_id": "425d3c69d8e8431d6d7481a4f1ea64a8e72f1533", "content_id": "1cadb6a432d08fe7bc7a2996e866ea91499752b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10623, "license_type": "no_license", "max_line_length": 116, "num_lines": 285, "path": "/Fibonacci/fib_client.py", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "import socket\nimport sys\nimport getopt\n\nfrom threading import Thread\nfrom random import randint\n\n\nclass FibClient(object):\n \"\"\"\n Base Class for the AutoClient and HumanClient to extend from. Implements some of the shared methods/attributes\n \"\"\"\n def __init__(self, ip, port):\n self.ip = ip\n self.port = port\n\n @staticmethod\n def receive_from_sock(sock, buffer_size):\n \"\"\"\n Generator function to yield the current buffer received from sock.\n Can be used in the form of b''.join(recv_all(sock, buffer_size)) to\n receive the full transmission from a socket\n\n :param sock: the socket to receive data from\n :param buffer_size: the size of the buffer to load on each yield\n :return: yields the current buffer as a byte object\n \"\"\"\n message_buffer = sock.recv(buffer_size)\n while message_buffer:\n yield message_buffer\n message_buffer = sock.recv(buffer_size)\n\n @staticmethod\n def receive_all_from_sock(sock, buffer_size=2048):\n \"\"\"\n Builds the full message received from a socket in bytes\n\n :param sock: the socket to receive data from\n :param buffer_size: the size of the buffer to load while building full result, defaults to 2048\n :return: byte object containing full message\n \"\"\"\n return b''.join(FibClient.receive_from_sock(sock, buffer_size))\n\n def get_fibonacci_number(self, number):\n \"\"\"\n Make a request to the fib server for a single fib number. If there is a socket or value error, return None\n\n :param number: the fib number to request from the server\n :return: fib of n, if an error occurs then None\n \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.ip, self.port))\n response = None\n try:\n sock.sendall(bytes(str(number), 'ascii'))\n response = int(FibClient.receive_all_from_sock(sock))\n except socket.error as err:\n print(err, file=sys.stderr)\n except ValueError as err:\n print(err, file=sys.stderr)\n finally:\n sock.close()\n return response\n\n\nclass AutoClient(FibClient):\n \"\"\"\n Class to do automated testing on the fibonacci server. Capable of spinning up multiple threads\n and requesting random fib numbers then testing their correctness.\n \"\"\"\n\n def _test_fib(self, number, verbose, silent):\n \"\"\"\n Requests a single fib number from the server then does the calculation locally to\n ensure that the number is correct\n\n :param number: the fib number to request/test\n :param verbose flag if the printing level is high\n :param silent flag if the printing level is for errors only\n :return: None\n \"\"\"\n\n def local_fib(n):\n \"\"\"\n Generate the fib number locally to test against the server's result\n :param n: the fib number to generate\n :return: fib of n\n \"\"\"\n a, b = 1, 1\n for i in range(n-1):\n a, b = b, a+b\n return a\n\n # get server result\n result = self.get_fibonacci_number(number)\n # server errors will return None so check for None\n if result is None:\n if verbose:\n print('Received None from server')\n return None\n\n if not silent:\n print('Received result %d from server for fib(%d)' % (result, number))\n\n # get local result\n local_result = local_fib(number)\n if verbose:\n print('Calculated local value to be %d for fib(%d)' % (local_result, number))\n\n # compare results\n if result != local_result:\n # even on silent we will display errors of this kind.\n # if we enter this block it means the server is returning wrong numbers\n print(\"Server returned %d for fib(%d) should have been %d\" % (result, number, local_result))\n\n def connect(self, num_threads=15, fib_min=1, fib_max=2000, verbose=False, silent=False):\n \"\"\"\n Runs some automated tests on the server by spinning up multiple concurrent clients, one to a thread,\n each requesting a random fib number and double checking the results returned by the server\n\n :param num_threads: the number of threads/clients to spin up concurrently. Defaults to 15\n :param fib_min: the minimum fib number to request from the server. Defaults to 1\n :param fib_max: the maximum fib number to request from the server. Defaults to 2000\n :param verbose: sets the highest level of printing out whats going on\n :param silent: sets the lowest level of printing out whats going on\n :return: None\n \"\"\"\n threads = []\n\n for i in range(num_threads):\n num = randint(fib_min, fib_max)\n if verbose:\n print('Starting thread with target number %d' % num)\n threads.append(Thread(target=self._test_fib, args=(num, verbose, silent)))\n\n for thread in threads:\n thread.start()\n\n\nclass HumanClient(FibClient):\n\n def __init__(self, ip, port):\n super().__init__(ip, port)\n\n def connect(self):\n \"\"\"\n A loop that allows a human to repeatedly request fib numbers from the server.\n\n :return: None\n \"\"\"\n while True:\n bad_input = True\n num = 0\n while bad_input:\n try:\n num = int(input('Please enter which fibonacci number you would like: '))\n if num <= 0:\n print(\"Please enter a positive number. Negative fibonacci numbers are undefined.\")\n else:\n bad_input = False\n except ValueError as err:\n print(\"Please enter a number\")\n continue\n fib = self.get_fibonacci_number(num)\n if fib is None:\n print('Error: None returned by get_fibonacci_number(%s, %d, %d)' % (ip, port, num))\n continue\n print(\"Fib of %d is %d\" % (num, fib))\n print() # blank line\n\n\ndef usage(message=''):\n \"\"\"\n Displays a set of messages describing how to use the program\n :param message: an optional message to display at the beggining of the output\n :return: None\n \"\"\"\n if message != '':\n print(message)\n\n print('fib_client.py improper usage')\n print('Usage: python fib_client.py --port=<portnumber> [options] ')\n print('Options are:')\n print(' -i, --ip= ip address of the fib server, defaults to localhost')\n print(' -p, --port= port address of the server, required argument')\n print(' -a, --auto sets that we are going to use the auto tester client rather than the human client')\n print(' -t, --threads= only applies to the auto tester and it sets how many concurrent requests to make')\n print(' -l, --low= sets the lowest fib number to randomly request for the auto client defaults to 1')\n print(' -h, --high= sets the highest fib number to randomly request for the auto client defaults to 2000')\n print(' -s, --silent sets the output level to silent for auto-testing (useful for large numbers)')\n print(' -v, --verbose sets output level to verbose for auto-testing')\n print(' --help requests this usage screen')\n exit()\n\n\ndef main():\n \"\"\"\n Reads in opts and args from the command line and then takes the appropriate action\n to either start up the human client or the auto-tester client.\n \"\"\"\n\n ip = '127.0.0.1' # ip address of the server\n port = -1 # port of the server must be set by args\n auto = False # flag to run auto_client over human_client\n threads = 15 # number of threads to run auto_client with\n low = 1 # lowest fib number to request with auto_client\n high = 2000 # highest fib number to request with auto_client\n silent = False # print nothing during auto_testing\n verbose = False # print everything during auto-testing\n\n # reads in all opts and args and sets appropriate variables\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"i:p:at:l:h:sv\",\n [\"ip=\", \"port=\", \"auto\", \"threads=\", \"low=\", \"high=\", \"silent\", \"verbose\"])\n except getopt.GetoptError:\n usage()\n for o, a in opts:\n # ip address\n if o in ('-i', '--ip'):\n ip = a\n # port number\n elif o in ('-p', '--port'):\n try:\n port = int(a)\n except ValueError:\n usage(\"Port must be a number\")\n # auto client\n elif o in ('-a', '--auto'):\n auto = True\n # threads\n elif o in ('-t', '--threads'):\n try:\n threads = int(a)\n except ValueError:\n usage(\"Number of threads must be a number\")\n # low value\n elif o in ('-l', '--low'):\n try:\n low = int(a)\n if low < 1:\n raise ValueError\n except ValueError:\n usage(\"Low must be a number greater than 0\")\n # high value\n elif o in ('-h', '--high'):\n try:\n high = int(a)\n if high < 1:\n raise ValueError\n except ValueError:\n usage(\"High must be a number greater than 0\")\n # verbose\n elif o in ('-v', '--verbose'):\n if silent:\n usage('Cannot set both verbose and silent to be true')\n verbose = True\n # silent\n elif o in ('-s', '--silent'):\n if verbose:\n usage('Cannot set both verbose and silent to be true')\n silent = True\n # any other args/opts show usage\n else:\n usage()\n\n # ensure port is set\n if port == -1:\n usage('The port number must be set')\n\n # make sure our numbers make sense, take low if they don't\n if high < low:\n high = low\n\n if auto:\n if verbose:\n print('Target server at %s:%d' % (ip, port))\n print('Starting %d threads requesting numbers between %d-%d' % (threads, low, high))\n\n AutoClient(ip, port).connect(num_threads=threads, fib_min=low, fib_max=high, verbose=verbose, silent=silent)\n else:\n HumanClient(ip, port).connect()\n\n# Won't run if code is imported\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6936936974525452, "alphanum_fraction": 0.7162162065505981, "avg_line_length": 25.058822631835938, "blob_id": "1971c1c6c2a8374379d1733f3c454aa2b1e4a4f3", "content_id": "014d173d1e0cebb7ce4e14872f72aa1ed43bdc24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 91, "num_lines": 17, "path": "/example_client.py", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "from Fibonacci import HumanClient, AutoClient\n\n'''\nExample file showing how to use and how to test out the Fibonacci client\n'''\n\nip = 'localhost'\nport = int(input('Please enter the port number of the Fibonacci server: '))\n\ntest_auto = True\n\nif test_auto:\n client = AutoClient(ip, port)\n client.connect(num_threads=50, fib_min=4000, fib_max=5000, verbose=False, silent=False)\nelse:\n client = HumanClient(ip, port)\n client.connect()\n\n" }, { "alpha_fraction": 0.7815126180648804, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 22.600000381469727, "blob_id": "eb341dfb76934920a74a40af33303c35a56b2c59", "content_id": "8aa14ea7435790ee0ba6c25dd598fbe506686b61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/example_server.py", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "from Fibonacci import FibonacciThreadedTCPServer\n\n'''\nExample file showing how to use the Fibonacci server\n'''\n\naddress = ('localhost', 0)\nserver = FibonacciThreadedTCPServer(address)\nprint(server.server_address)\nserver.serve_forever()\n\n\n" }, { "alpha_fraction": 0.752136766910553, "alphanum_fraction": 0.752136766910553, "avg_line_length": 28.25, "blob_id": "0aa9e98be13cfab094cc5f0fa2e8bedccc82421e", "content_id": "3b293e570539489cae2ffab4dd5dda2b8abea57e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 63, "num_lines": 4, "path": "/Fibonacci/__init__.py", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "from .fib_server import *\nfrom .fib_client import *\n\n__all__ = [FibonacciThreadedTCPServer, AutoClient, HumanClient]\n" }, { "alpha_fraction": 0.7276967763900757, "alphanum_fraction": 0.7411078810691833, "avg_line_length": 49.44117736816406, "blob_id": "03a89c93317f814e39b413804a7748c3282019a8", "content_id": "18234fd3d6f5c85eec49267a01c2dd23155e6914", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 397, "num_lines": 34, "path": "/README.md", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "# FibonacciServer\nA class assignment to create a multi-threaded fibonacci server/client\n\n\nMy implementation of a school assignment with the following requirements\n\n>Given that our Fibonacci calculation looks like:\n>```\n>def fib(n):\n> if n == 0:\n> return 0\n> elif n == 1:\n> return 1\n> else:\n> return fib(n-1) + fib(n-2)\n>```\n>\n>Using Python, design a socket server that will accept an integer(properly packed) and return a fibonacci value for the integer. Design a\nreliable protocol and implement both a client and server for calculating our Fibonacci values. Ensure that you can have more than one calculation occurring at any moment.\n\n####Implementation\n\nThe server is set up to serve many concurrent Fibonacci requests for very large values up to around fib(100000). The server uses a lookup table that is shared among each thread. All fibonacci numbers are calculated using a linear definition rather than a recursive one and each value is stored in the cache table. To keep the dictionary \"safe\" the data is written to the dictionary in the form of:\n```\ndef calc_fib(fib_dict, n):\n length = len(fib_dict)\n while length <= n:\n fib_dict[length] = fib_dict[length - 1] + fib_dict[length - 2]\n length = len(fib_dict)\nreturn fib_dict[n]\n```\nIn this format it is possible for many threads to write to the same location in the dictionary but since each number is based on the previous two numbers the data will stay intact. The length is updated to the length of the actual dict at the end of each loop iteration to avoid doing work done in other threads (as opposed to length += 1). \n\nhttp://stackoverflow.com/questions/42564437/why-is-this-python-code-not-thread-safe\n" }, { "alpha_fraction": 0.7823365926742554, "alphanum_fraction": 0.7927677035331726, "avg_line_length": 129.72727966308594, "blob_id": "d25ca74ee16705848b922c873d0502e214d60d53", "content_id": "fa97b36312e2f0b76b1611dfbff37a463fb3c08d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 727, "num_lines": 11, "path": "/Fibonacci/README.md", "repo_name": "marty-Wallace/FibbonacciServer", "src_encoding": "UTF-8", "text": "##Fibonacci module \n\n###fib_server\nfib_server.py contains two classes **FibonacciThreadedTCPRequestHandler** and **FibonacciThreadedTCPServer**. Which can be implemented by following the example in *test_client.py*. I have optimized the calculations of the fibonacci numbers as far as I can and on my Dell Inspirion I am able to have 50 threads requesting fib numbers up to fib(50000) concurrently without any timeouts or incorrect answers.\n\n###fib_client\nfib_client.py also contains two classes as well as command line main function which accepts several args and options for it's auto tester function. An example of how to use the two fib_client classes is available in *test_client.py*. The two classes are **HumanClient** and **AutoClient**. **HumanClient** runs a loop forever requesting a number from the user then requesting the fib value of that number from the server. **AutoClient** accepts several parameters such as the number of threads/concurrent requests to use, the minimum and maximum fib values to request and the level of verbosity it should use when printing out (printing all the numbers when requesting 100 fib(50000)'s will fill up your terminal very quickly).\n\nTo view the command line opts and args for the auto tester simply run _python fib_client.py --help_.\n\nTo run an instance of the server from the command line simply run _python fib_server.py_. It will request a port number from the kernel automatically.\n" } ]
7
sstollenwerk/roguelike_learn
https://github.com/sstollenwerk/roguelike_learn
2317bc457ca2c6fd1efc53fc41d57a71ce4fbd6c
148891b8da9a5517533062ef1d0548f50e467055
a639cd77ff940b3274107312dda8b363d30f6213
refs/heads/main
2023-04-29T17:45:59.362969
2021-05-16T06:32:30
2021-05-16T06:32:30
367,771,111
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6532663106918335, "alphanum_fraction": 0.6557788848876953, "avg_line_length": 22.41176414489746, "blob_id": "dd88cd660d0ee39f9e9450820ccc2a9f3888ac29", "content_id": "2f76626f592eb3f0524e949e42a6de1bb4b613b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 74, "num_lines": 17, "path": "/entity.py", "repo_name": "sstollenwerk/roguelike_learn", "src_encoding": "UTF-8", "text": "from dataclasses import dataclass, asdict, replace\n\nfrom actions import EscapeAction, MovementAction\nfrom basic_types import Color\n\n\n@dataclass\nclass Entity:\n x: int\n y: int\n string: str # len == 1\n fg: Color\n\n def move(self, action: MovementAction):\n ##return replace(self, x=self.x + action.dx, y=self.y + action.dy)\n self.x += action.dx\n self.y += action.dy\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.795918345451355, "avg_line_length": 31.66666603088379, "blob_id": "f51fb45929ee6ce8c825c5e881959964e28778ba", "content_id": "5e0181216e075d18ebc2f2030269dae8101e2d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 78, "num_lines": 3, "path": "/README.md", "repo_name": "sstollenwerk/roguelike_learn", "src_encoding": "UTF-8", "text": "# roguelike_learn\n\ngoing off of the http://www.rogueliketutorials.com/tutorials/tcod/v2/ tutorial\n" }, { "alpha_fraction": 0.5867508053779602, "alphanum_fraction": 0.6340693831443787, "avg_line_length": 24.360000610351562, "blob_id": "ad6f4ff611dbf71a531608b564669cd8baec4168", "content_id": "5899a6c5fab85ed3eaefe3039976300f6bc7a25d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/tile_types.py", "repo_name": "sstollenwerk/roguelike_learn", "src_encoding": "UTF-8", "text": "import numpy as np # type: ignore\n\nfrom basic_types import Color, graphic_dt, tile_dt\n\n\ndef new_tile(\n *, # Enforce the use of keywords, so that parameter order doesn't matter.\n walkable: bool,\n transparent: bool,\n dark: tuple[int, Color, Color],\n) -> np.ndarray:\n \"\"\"Helper function for defining individual tile types\"\"\"\n return np.array((walkable, transparent, dark), dtype=tile_dt)\n\n\nfloor = new_tile(\n walkable=True,\n transparent=True,\n dark=(ord(\" \"), (255, 255, 255), (50, 50, 150)),\n)\nwall = new_tile(\n walkable=False,\n transparent=False,\n dark=(ord(\" \"), (255, 255, 255), (0, 0, 100)),\n)\n" }, { "alpha_fraction": 0.6348797082901001, "alphanum_fraction": 0.6348797082901001, "avg_line_length": 24.866666793823242, "blob_id": "2bb85afc9106173063254003bac9c25cbf483042", "content_id": "ff047a4490fdce039c13f1556bc7de1ca7f5a599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 65, "num_lines": 45, "path": "/engine.py", "repo_name": "sstollenwerk/roguelike_learn", "src_encoding": "UTF-8", "text": "from typing import Iterable, Any\nfrom dataclasses import dataclass, asdict\n\nfrom tcod.context import Context\nfrom tcod.console import Console\n\nfrom actions import EscapeAction, MovementAction\nfrom entity import Entity\nfrom input_handlers import EventHandler\nfrom game_map import GameMap\n\n\nclass Engine:\n def __init__(\n self,\n entities: list[Entity],\n event_handler: EventHandler,\n game_map: GameMap,\n player: Entity,\n ):\n if player not in entities:\n entities += [player]\n self.entities = entities\n self.event_handler = event_handler\n self.player = player\n self.game_map = game_map\n\n def handle_events(self, events: Iterable[Any]) -> None:\n for event in events:\n action = self.event_handler.dispatch(event)\n\n if action is None:\n continue\n\n action.perform(self, self.player)\n\n def render(self, console: Console, context: Context) -> None:\n self.game_map.render(console)\n\n for entity in self.entities:\n console.print(**asdict(entity))\n\n context.present(console)\n\n console.clear()\n" }, { "alpha_fraction": 0.5859246850013733, "alphanum_fraction": 0.5941080451011658, "avg_line_length": 25.565217971801758, "blob_id": "6abab909900304aec4d15ecdb9938d62c6426077", "content_id": "00028583e8d32569c5994a1c8fd3e9046bb09906", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/basic_types.py", "repo_name": "sstollenwerk/roguelike_learn", "src_encoding": "UTF-8", "text": "import numpy as np # type: ignore\n\n\nColor = tuple[int, int, int]\n\n\n# Tile graphics structured type compatible with Console.tiles_rgb.\ngraphic_dt = np.dtype(\n [\n (\"ch\", np.int32), # Unicode codepoint.\n (\"fg\", \"3B\"), # 3 unsigned bytes, for RGB colors.\n (\"bg\", \"3B\"),\n ]\n)\n\n# Tile struct used for statically defined tile data.\ntile_dt = np.dtype(\n [\n (\"walkable\", np.bool), # True if this tile can be walked over.\n (\"transparent\", np.bool), # True if this tile doesn't block FOV.\n (\"dark\", graphic_dt), # Graphics for when this tile is not in FOV.\n ]\n)\n" } ]
5
dimbler/Zabbix
https://github.com/dimbler/Zabbix
cd81064b099a383b1de4ebdded2a2db4996dddba
34a00da5acce21fa418c6bd29f4471396a62dcfa
d71713677ec27bf50d0db7b21317e6d3b4573bcf
refs/heads/master
2021-05-06T02:46:24.217903
2017-12-18T12:02:14
2017-12-18T12:02:14
114,636,232
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6751569509506226, "alphanum_fraction": 0.6920179128646851, "avg_line_length": 32.18452453613281, "blob_id": "b6b65a538cf167a57b9ddb3d8e375c6c8ce3722c", "content_id": "dca652f3fa07204de6262e8d5e247143be071980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5607, "license_type": "no_license", "max_line_length": 154, "num_lines": 168, "path": "/send_traf_down.py", "repo_name": "dimbler/Zabbix", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n#\n# import needed modules.\n# pyzabbix is needed, see https://github.com/lukecyca/pyzabbix\n#\n# Pillow is also needed, see https://github.com/python-pillow/Pillow\n#\n#\nimport argparse\nimport ConfigParser\nimport os\nimport os.path\nimport distutils.util\nimport requests\nimport time\nimport sys\nfrom cStringIO import StringIO\nfrom PIL import Image\nfrom pyzabbix import ZabbixAPI\nimport cStringIO\n\nusername = \"root\"\npassword = \"zabbix123\"\ngraphid = 14931\napi = \"http://localhost\"\nperiod=9600\n\ndef getGraph(graphid, username, password, api, period):\n zapi = ZabbixAPI(url='http://localhost', user='root', password='zabbix123')\n zapi.session.verify = False\n\n # Find graph from API\n graph = zapi.graph.get(output=\"extend\", graphids=graphid)\n\n if graph:\n #print(format(graph))\n # Set width and height\n width = graph[0]['width']\n height = graph[0]['height']\n\n # Select the right graph generator according to graph type\n # type 3 = Exploded graph\n if graph[0]['graphtype'] == \"3\":\n generator = \"chart6.php\"\n # type 2 = Pie graph\n elif graph[0]['graphtype'] == \"2\":\n generator = \"chart6.php\"\n # type 1 = Stacked graph\n elif graph[0]['graphtype'] == \"1\":\n generator = \"chart2.php\"\n # type 0 = Normal graph\n elif graph[0]['graphtype'] == \"0\":\n generator = \"chart2.php\"\n # catch-all in case someone invents a new type/generator\n else:\n generator = \"chart2.php\"\n\n # Set login URL for the Frontend (frontend access is needed, as we cannot retrieve graph images via the API)\n loginurl = api + \"/index.php\"\n # Data that needs to be posted to the Frontend to log in\n logindata = {'autologin' : '1', 'name' : username, 'password' : password, 'enter' : 'Sign in'}\n # We need to fool the frontend into thinking we are a real browser\n headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0', 'Content-type' : 'application/x-www-form-urlencoded'}\n\n # setup a session object so we can reuse session cookies\n session=requests.session()\n verify=False\n\n # Login to the frontend\n login=session.post(loginurl, params=logindata, headers=headers, verify=verify)\n\n # See if we logged in successfully\n try:\n if session.cookies['zbx_sessionid']:\n\n # Build the request for the graph\n graphurl = api + \"/\" + generator + \"?graphid=\" + str(graphid) + \"&period=\" + str(period)\n\n\n # get the graph\n graphreq = session.get(graphurl,verify=verify)\n # read the data as an image\n graphpng = Image.open(StringIO(graphreq.content))\n memf = cStringIO.StringIO()\n graphpng.save(memf, \"JPEG\")\n return memf\n\n except:\n sys.exit(\"Error: Could not log in to retrieve graph\")\n else:\n sys.exit(\"Error: Could not find graphid \"+ graphid)\n\n\n# Arguments parser\nparser = argparse.ArgumentParser(description='Send Zabbix notification with Graphics')\nparser.add_argument('recipient', metavar=('Recipient'), type=str, help='Email recepient')\nparser.add_argument('subject', metavar=('Subject'), type=str, help='Subject you want to push.')\nparser.add_argument('message', metavar=('Message'), type=str, help='Message you want to push.')\n\n# Argument processing\nargs = parser.parse_args()\nrecipient = args.recipient\nsubject = args.subject\nmessage = args.message\n\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nfrom email.MIMEImage import MIMEImage\n\n# Define these once; use them twice!\nstrFrom = '[email protected]'\nstrTo = recipient\n\n# Create the root message and fill in the from, to, and subject headers\nmsgRoot = MIMEMultipart('related')\nmsgRoot['Subject'] = subject\nmsgRoot['From'] = strFrom\nmsgRoot['To'] = strTo\nmsgRoot.preamble = 'This is a multi-part message in MIME format.'\n\n# Encapsulate the plain and HTML versions of the message body in an\n# 'alternative' part, so message agents can decide which they want to display.\nmsgAlternative = MIMEMultipart('alternative')\nmsgRoot.attach(msgAlternative)\n\nmsgText = MIMEText('This is the alternative plain text message.')\nmsgAlternative.attach(msgText)\n\nzapi = ZabbixAPI(url='http://localhost', user='root', password='zabbix123')\nzapi.session.verify = False\n\ntrig = zapi.trigger.get(output='extend',\n itemids = ['135536','135537'],\n only_true=1,\n)\n\nif trig:\n message += \"<br><b>Присутствуют проблемы доступа к СМЭВ</b><br>\"\n\n# We reference the image in the IMG SRC attribute by the ID we give it below\nmsgText = MIMEText(message+'<br><img src=\"cid:image1\"><br><br><img src=\"cid:image2\"><br><br><img src=\"cid:image3\"><br>', 'html')\nmsgAlternative.attach(msgText)\n\n# This example assumes the image is in the current directory\ngraphImage1 = getGraph(graphid, username, password, api, period)\nmsgImage1 = MIMEImage(graphImage1.getvalue())\nmsgImage1.add_header('Content-ID', '<image1>')\nmsgRoot.attach(msgImage1)\n\ngraphImage2 = getGraph(18485, username, password, api, period)\nmsgImage2 = MIMEImage(graphImage2.getvalue())\nmsgImage2.add_header('Content-ID', '<image2>')\nmsgRoot.attach(msgImage2)\n\ngraphImage3 = getGraph(11706, username, password, api, period)\nmsgImage3 = MIMEImage(graphImage3.getvalue())\nmsgImage3.add_header('Content-ID', '<image3>')\nmsgRoot.attach(msgImage3)\n\n# Send the email (this example assumes SMTP authentication is required)\n\nimport smtplib\nsmtp = smtplib.SMTP('localhost', 25)\nsmtp.ehlo()\nsmtp.sendmail(strFrom, strTo, msgRoot.as_string())\nsmtp.quit()\n" } ]
1
LiquidNalee/MiniPkmnShowdown
https://github.com/LiquidNalee/MiniPkmnShowdown
fadb560ad7a7e0ab599910b046cf0d9062326555
56520e351f984fee710860b0949a19eefa8ee05f
71d9b9fd559e05fd0d961b505973a1e86e5b7f3c
refs/heads/main
2023-02-13T05:12:13.285627
2021-01-11T17:44:54
2021-01-11T17:44:54
325,296,231
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5402510166168213, "alphanum_fraction": 0.5512703061103821, "avg_line_length": 47.04411697387695, "blob_id": "4d9d411e31e91cdb6db2f0a89436e44ee82e0cb7", "content_id": "dfc0e92419f727f075b6071763436d6991f687c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3267, "license_type": "permissive", "max_line_length": 168, "num_lines": 68, "path": "/models/pkmn/moves/PokemonMove.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom os.path import join\nfrom json import load\n\nfrom models.pkmn.types.PokemonType import PokemonType\nfrom models.pkmn.stats.StatsDict import StatsDict\n\nfrom database.DatabaseConfig import database_dir\n\n\nclass MoveCategory(Enum):\n Physical = \"[Ph]\"\n Special = \"[Sp]\"\n Status = \"[St]\"\n\n\nclass PokemonMove:\n\n def __init__(self, name: str, move_type: PokemonType, category: MoveCategory, pp: int,\n power: int = 0, accuracy: int = 100, priority: int = 0, effect_rate: int = 0,\n self_stat_mod: StatsDict = None, trgt_stat_mod: StatsDict = None):\n self.name = name\n self.type = move_type\n self.category = category\n self.pp = pp\n self.max_pp = pp\n self.power = power\n self.accuracy = accuracy\n self.priority = priority\n self.effect_rate = effect_rate\n self.self_stat_mod = self_stat_mod if self_stat_mod is not None else StatsDict()\n self.trgt_stat_mod = trgt_stat_mod if trgt_stat_mod is not None else StatsDict()\n\n def __str__(self):\n prio_str = f\"prio:{'+' if self.priority > 0 else ''}{self.priority}\"\n return f\"{str(self.type):^15} {self.category.value}{self.pp:>9}/{self.max_pp:<3}\\n\" \\\n f\"{self.name:^34}\\n\" \\\n f\" {str(self.power):>3}BP -{str(self.accuracy) + '% acc.':^15}- {prio_str:>8} \\n\" \\\n f\"{'-':^34}\" if self.effect_rate == 0 else \\\n f\"{self.effect_rate:<3}%: \" if self.effect_rate != 100 else \"\" \\\n + f\"{', '.join([key + ' x' + val for key, val in self.self_stat_mod if val != 0]):^20} [Self]\" \\\n if self.self_stat_mod != StatsDict() else \\\n f\"{', '.join([key + ' x' + val for key, val in self.trgt_stat_mod if val != 0]):^18} [Target]\" \\\n if self.trgt_stat_mod != StatsDict() else \\\n \"\"\n\n @staticmethod\n def fromDb(name: str):\n with open(join(str(database_dir), \"moves\", f\"{name.lower()}.json\")) as move_data_file:\n move_data = load(move_data_file)\n return PokemonMove(\n name=name,\n move_type=PokemonType[move_data[\"type\"]],\n category=MoveCategory[move_data[\"category\"]],\n pp=int(move_data[\"pp\"]),\n power=int(move_data[\"power\"])\n if \"power\" in move_data and move_data[\"power\"] is not None else 0,\n accuracy=int(move_data[\"accuracy\"])\n if \"accuracy\" in move_data and move_data[\"accuracy\"] is not None else 100,\n priority=int(move_data[\"priority\"])\n if \"priority\" in move_data and move_data[\"priority\"] else 0,\n effect_rate=int(move_data[\"effect_rate\"])\n if \"effect_rate\" in move_data and move_data[\"effect_rate\"] is not None else 0,\n self_stat_mod=StatsDict(move_data[\"self_stat_mod\"])\n if \"self_stat_mod\" in move_data and move_data[\"self_stat_mode\"] is not None else StatsDict(),\n trgt_stat_mod=StatsDict(move_data[\"trgt_stat_mod\"])\n if \"trgt_stat_mod\" in move_data and move_data[\"trgt_stat_mode\"] else StatsDict()\n )\n" }, { "alpha_fraction": 0.7914438247680664, "alphanum_fraction": 0.7914438247680664, "avg_line_length": 17.799999237060547, "blob_id": "954d472d89e9073e7c52ba6242c78676f01bd807", "content_id": "01a702e295e2571e5deb314e8cd3c745c12f5092", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 187, "license_type": "permissive", "max_line_length": 47, "num_lines": 10, "path": "/Makefile", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "build:\n\tdocker build -t minishowdown:latest .\n\nrun:\n\tdocker run -d --name minishowdown minishowdown\n\nclean:\n\tdocker stop minishowdown\n\tdocker rm minishowdown\n\tdocker image rm minishowdown" }, { "alpha_fraction": 0.49754709005355835, "alphanum_fraction": 0.5299889445304871, "avg_line_length": 40.56578826904297, "blob_id": "3cef88bfa9bc6e012796d2e14d7412eff13deae4", "content_id": "c708bf83ee5d447ca884a24a67222f7821aaa032", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6319, "license_type": "permissive", "max_line_length": 105, "num_lines": 152, "path": "/tests/models/game/battle/test_BattleGameState.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom models.game.battle.BattleGameState import BattleGameState\nfrom models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.game.trainer.utils.ArenaBadge import ArenaBadge\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import MoveCategory, PokemonMove\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\n\n\nclass TestBattleGameState(TestCase):\n\n def setUp(self) -> None:\n self.Pikachu = PokemonModel(\n name=\"Pikachu\",\n types=(PokemonType.Electric, None),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Volt Tackle\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Iron Tail\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Physical,\n pp=24,\n power=100,\n accuracy=75\n ),\n PokemonMove(\n name=\"Thunderbolt\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Special,\n pp=24,\n power=90\n )\n ],\n base_stats=StatsDict(hp=35, atk=55, phys_def=40, spe_atk=50, spe_def=50, spd=90),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=4, spe_def=0, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Charizard = PokemonModel(\n name=\"Charizard\",\n types=(PokemonType.Fire, PokemonType.Flying),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Fire Blast\",\n move_type=PokemonType.Fire,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=85\n ),\n PokemonMove(\n name=\"Hurricane\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Special,\n pp=16,\n power=110,\n accuracy=70\n )\n ],\n base_stats=StatsDict(hp=78, atk=84, phys_def=78, spe_atk=109, spe_def=85, spd=100),\n evs=StatsDict(hp=0, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Pidgeot = PokemonModel(\n name=\"Pidgeot\",\n types=(PokemonType.Flying, PokemonType.Normal),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Double Edge\",\n move_type=PokemonType.Normal,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Brave Bird\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n )\n ],\n base_stats=StatsDict(hp=83, atk=80, phys_def=75, spe_atk=70, spe_def=70, spd=101),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=0, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Blastoise = PokemonModel(\n name=\"Blastoise\",\n types=(PokemonType.Water, None),\n level=100,\n nature=PokemonNature.Modest,\n moves=[\n PokemonMove(\n name=\"Hydro Pump\",\n move_type=PokemonType.Water,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=80\n ),\n PokemonMove(\n name=\"Ice Beam\",\n move_type=PokemonType.Ice,\n category=MoveCategory.Special,\n pp=16,\n power=90\n )\n ],\n base_stats=StatsDict(hp=79, atk=83, phys_def=100, spe_atk=85, spe_def=105, spd=78),\n evs=StatsDict(hp=252, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=0),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Red = PokemonTrainer(\n name=\"Red\",\n team=[self.Pikachu, self.Charizard],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n self.Blue = PokemonTrainer(\n name=\"Blue\",\n team=[self.Pidgeot, self.Blastoise],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n self.BattleGameState = BattleGameState(player=self.Red, opponent=self.Blue)\n\n def test_battle_game_state(self):\n\n assert self.BattleGameState.player.name == \"Red\" and self.BattleGameState.opponent.name == \"Blue\"\n assert self.BattleGameState.turn == 1\n assert self.BattleGameState.getPlayerActivePkmn() == self.Pikachu\n assert self.BattleGameState.getOpponentActivePkmn() == self.Pidgeot\n self.BattleGameState.setPlayerActivePkmn(1)\n assert self.BattleGameState.getPlayerActivePkmn() == self.Charizard\n assert self.BattleGameState.getOpponentActivePkmn() == self.Pidgeot\n self.BattleGameState.setOpponentActivePkmn(1)\n assert self.BattleGameState.getPlayerActivePkmn() == self.Charizard\n assert self.BattleGameState.getOpponentActivePkmn() == self.Blastoise\n\n" }, { "alpha_fraction": 0.6862886548042297, "alphanum_fraction": 0.6961396932601929, "avg_line_length": 57.61971664428711, "blob_id": "003e87c83f1b6010c83cdc0819d2299d7c93a6bf", "content_id": "a7e8757dd8e587e24156e95d5ada6d5000ba1660", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12486, "license_type": "permissive", "max_line_length": 111, "num_lines": 213, "path": "/engine/pkmn/types/ClassicTypesRuleSet.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from pandas import DataFrame\nfrom engine.pkmn.types.TypesBaseRuleSet import TypesBaseRuleSet\nfrom models.pkmn.types.PokemonType import PokemonType\n\n\nclass ClassicTypesRuleSet(TypesBaseRuleSet):\n\n def __init__(self):\n # Init all at 1\n type_effectiveness_chart = DataFrame({ref_pkmn_type: {pkmn_type: float(1) for pkmn_type in PokemonType}\n for ref_pkmn_type in PokemonType})\n\n # Normal is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Normal][PokemonType.Ghost] = float(0)\n # not very effective against\n type_effectiveness_chart[PokemonType.Normal][PokemonType.Steel] = float(.5)\n type_effectiveness_chart[PokemonType.Normal][PokemonType.Rock] = float(.5)\n\n # Fire is:\n # very effective against\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Steel] = float(2)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Grass] = float(2)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Ice] = float(2)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Bug] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Water] = float(.5)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Rock] = float(.5)\n type_effectiveness_chart[PokemonType.Fire][PokemonType.Dragon] = float(.5)\n\n # Water is:\n # very effective against\n type_effectiveness_chart[PokemonType.Water][PokemonType.Fire] = float(2)\n type_effectiveness_chart[PokemonType.Water][PokemonType.Ground] = float(2)\n type_effectiveness_chart[PokemonType.Water][PokemonType.Rock] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Water][PokemonType.Water] = float(.5)\n type_effectiveness_chart[PokemonType.Water][PokemonType.Grass] = float(.5)\n type_effectiveness_chart[PokemonType.Water][PokemonType.Dragon] = float(.5)\n\n # Electric is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Ground] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Water] = float(2)\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Flying] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Electric] = float(.5)\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Grass] = float(.5)\n type_effectiveness_chart[PokemonType.Electric][PokemonType.Dragon] = float(.5)\n\n # Grass is:\n # very effective against\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Water] = float(2)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Ground] = float(2)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Rock] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Grass] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Poison] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Flying] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Bug] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Dragon] = float(.5)\n type_effectiveness_chart[PokemonType.Grass][PokemonType.Steel] = float(.5)\n\n # Ice is:\n # very effective against\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Grass] = float(2)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Ground] = float(2)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Flying] = float(2)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Dragon] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Water] = float(.5)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Ice] = float(.5)\n type_effectiveness_chart[PokemonType.Ice][PokemonType.Steel] = float(.5)\n\n # Fighting is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Ghost] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Normal] = float(2)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Ice] = float(2)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Rock] = float(2)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Dark] = float(2)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Steel] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Poison] = float(.5)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Flying] = float(.5)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Psychic] = float(.5)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Bug] = float(.5)\n type_effectiveness_chart[PokemonType.Fighting][PokemonType.Fairy] = float(.5)\n\n # Poison is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Steel] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Grass] = float(2)\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Fairy] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Poison] = float(.5)\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Ground] = float(.5)\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Rock] = float(.5)\n type_effectiveness_chart[PokemonType.Poison][PokemonType.Ghost] = float(.5)\n\n # Ground is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Flying] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Fire] = float(2)\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Electric] = float(2)\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Poison] = float(2)\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Rock] = float(2)\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Steel] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Grass] = float(.5)\n type_effectiveness_chart[PokemonType.Ground][PokemonType.Bug] = float(.5)\n\n # Flying is:\n # very effective against\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Grass] = float(2)\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Fighting] = float(2)\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Bug] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Electric] = float(.5)\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Rock] = float(.5)\n type_effectiveness_chart[PokemonType.Flying][PokemonType.Steel] = float(.5)\n\n # Psychic is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Psychic][PokemonType.Dark] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Psychic][PokemonType.Poison] = float(2)\n type_effectiveness_chart[PokemonType.Psychic][PokemonType.Fighting] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Psychic][PokemonType.Psychic] = float(.5)\n type_effectiveness_chart[PokemonType.Psychic][PokemonType.Steel] = float(.5)\n\n # Bug is:\n # very effective against\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Grass] = float(2)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Psychic] = float(2)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Dark] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Fighting] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Poison] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Flying] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Rock] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Ghost] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Steel] = float(.5)\n type_effectiveness_chart[PokemonType.Bug][PokemonType.Fairy] = float(.5)\n\n # Rock is:\n # very effective against\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Fire] = float(2)\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Ice] = float(2)\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Flying] = float(2)\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Bug] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Fighting] = float(.5)\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Ground] = float(.5)\n type_effectiveness_chart[PokemonType.Rock][PokemonType.Steel] = float(.5)\n\n # Ghost is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Ghost][PokemonType.Normal] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Ghost][PokemonType.Psychic] = float(2)\n type_effectiveness_chart[PokemonType.Ghost][PokemonType.Ghost] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Ghost][PokemonType.Dark] = float(.5)\n\n # Dragon is:\n # ineffective against\n type_effectiveness_chart[PokemonType.Dragon][PokemonType.Fairy] = float(0)\n # very effective against\n type_effectiveness_chart[PokemonType.Dragon][PokemonType.Dragon] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Dragon][PokemonType.Steel] = float(.5)\n\n # Dark is:\n # very effective against\n type_effectiveness_chart[PokemonType.Dark][PokemonType.Psychic] = float(2)\n type_effectiveness_chart[PokemonType.Dark][PokemonType.Ghost] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Dark][PokemonType.Fighting] = float(.5)\n type_effectiveness_chart[PokemonType.Dark][PokemonType.Dark] = float(.5)\n type_effectiveness_chart[PokemonType.Dark][PokemonType.Fairy] = float(.5)\n\n # Steel is:\n # very effective against\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Ice] = float(2)\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Rock] = float(2)\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Fairy] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Water] = float(.5)\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Electric] = float(.5)\n type_effectiveness_chart[PokemonType.Steel][PokemonType.Steel] = float(.5)\n\n # Fairy is:\n # very effective against\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Fighting] = float(2)\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Dark] = float(2)\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Dragon] = float(2)\n # not very effective against\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Fire] = float(.5)\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Poison] = float(.5)\n type_effectiveness_chart[PokemonType.Fairy][PokemonType.Steel] = float(.5)\n\n super().__init__(type_effectiveness_chart=type_effectiveness_chart)\n" }, { "alpha_fraction": 0.5425220131874084, "alphanum_fraction": 0.5451287031173706, "avg_line_length": 40.47297286987305, "blob_id": "bbbdafe4f3acd16c681e34d7d039daaed72851eb", "content_id": "bd3bf742a6db194b679cc20a3143801bf39b5ee8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3069, "license_type": "permissive", "max_line_length": 120, "num_lines": 74, "path": "/database/DatabaseConfig.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from os.path import join, dirname, realpath\nfrom pathlib import Path\nfrom typing import List\nfrom json import dump\nfrom requests import request\nfrom re import sub\n\ndatabase_dir = Path(dirname(realpath(__file__)))\n__pokeApi_url = \"https://pokeapi.co/api/v2/\"\n\n\ndef updatePkmnDatabase(pkmn_list: List):\n for pkmn_name, pkmn_data in getPkmnDatabase(pkmn_list).items():\n print(f\"Uploaded {pkmn_name}.json\")\n with open(join(database_dir, \"pkmn\", f\"{pkmn_name}.json\"), mode='w') as pkmn_data_file:\n dump(pkmn_data, pkmn_data_file)\n\n\ndef getPkmnDatabase(pkmn_list: List[str]) -> {}:\n db = {}\n for pkmn in pkmn_list:\n res = request(\"GET\", f\"{__pokeApi_url}/pokemon/{pkmn}\")\n\n if res.status_code == 200:\n pkmn_json = {\n \"type\": {str(pkmn_type[\"slot\"] - 1): pkmn_type[\"type\"][\"name\"].capitalize()\n for pkmn_type in res.json()[\"types\"]},\n \"base_stats\": {__convertApiStatNameToDbStatName(pkmn_stat[\"stat\"][\"name\"]): pkmn_stat[\"base_stat\"]\n for pkmn_stat in res.json()[\"stats\"]}\n }\n db[res.json()[\"name\"]] = pkmn_json\n return db\n\n\ndef updateMoveDatabase(move_list: List):\n for move_name, move_data in getMoveDatabase(move_list).items():\n print(f\"Uploaded {move_name.lower()}.json\")\n with open(join(database_dir, \"moves\", f\"{move_name.lower()}.json\"), mode='w') as move_data_file:\n dump(move_data, move_data_file)\n\n\ndef getMoveDatabase(move_list: List[str]) -> {}:\n db = {}\n for move in move_list:\n move_url = sub('\\\\s', '-', move.lower())\n res = request(\"GET\", f\"{__pokeApi_url}/move/{move_url}\")\n\n if res.status_code == 200:\n move_json = {\n \"type\": res.json()[\"type\"][\"name\"].capitalize(),\n \"category\": res.json()[\"damage_class\"][\"name\"].capitalize(),\n \"pp\": res.json()[\"pp\"],\n \"power\": res.json()[\"power\"],\n \"accuracy\": res.json()[\"accuracy\"],\n \"priority\": res.json()[\"priority\"],\n \"effect_rate\": res.json()[\"effect_chance\"],\n \"self_stat_mod\": {__convertApiStatNameToDbStatName(pkmn_stat[\"stat\"][\"name\"]): pkmn_stat[\"change\"]\n for pkmn_stat in res.json()[\"stat_changes\"]} if res.json()[\"target\"][\"name\"] == \"user\"\n else {},\n \"trgt_stat_mod\": {__convertApiStatNameToDbStatName(pkmn_stat[\"stat\"][\"name\"]): pkmn_stat[\"change\"]\n for pkmn_stat in res.json()[\"stat_changes\"]} if res.json()[\"target\"][\"name\"] != \"user\"\n else {}\n }\n db[move] = move_json\n return db\n\n\ndef __convertApiStatNameToDbStatName(api_stat: str) -> str:\n return \"atk\" if api_stat == \"attack\" \\\n else \"phys_def\" if api_stat == \"defense\" \\\n else \"spe_atk\" if api_stat == \"special-attack\" \\\n else \"spe_def\" if api_stat == \"special-defense\" \\\n else \"spd\" if api_stat == \"speed\" \\\n else \"hp\"\n" }, { "alpha_fraction": 0.6633986830711365, "alphanum_fraction": 0.6633986830711365, "avg_line_length": 37.25, "blob_id": "aa5b9721a95d829d81ef13270225cd7736f9644c", "content_id": "e4e12cd6e9461c77cb375bb7da6a9807cc9e9d61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "permissive", "max_line_length": 92, "num_lines": 16, "path": "/models/pkmn/PokemonBaseModel.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType, format_types_tuple, type_equals\n\n\nclass PokemonBaseModel:\n\n def __init__(self, name: str, types: (PokemonType, PokemonType), base_stats: StatsDict):\n self.name = name\n self.type = format_types_tuple(types)\n self.base_stats = base_stats\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return NotImplementedError\n return self.name == other.name and type_equals(self.type, other.type) \\\n and self.base_stats == other.base_stats\n" }, { "alpha_fraction": 0.7446808218955994, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 30, "blob_id": "6ac61a64f7cef1b3d25d2b29d0e6c52661d7cd27", "content_id": "00050388debf012360c916d942911f973b600195", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 94, "license_type": "permissive", "max_line_length": 72, "num_lines": 3, "path": "/run_tests.sh", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "make build\nmake run\ndocker run -a stdin -a stdout -i -t minishowdown python -m pytest tests/\n\n" }, { "alpha_fraction": 0.7653429508209229, "alphanum_fraction": 0.7689530849456787, "avg_line_length": 18.85714340209961, "blob_id": "c24e7384ea2c44628f167ee77fba7b59e038f56e", "content_id": "9ecc07e6494676e554e7966d7bf8865f855d8f72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 277, "license_type": "permissive", "max_line_length": 70, "num_lines": 14, "path": "/README.md", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "1st time:\n\npython -m minishowdown -p trainer_example.json --update_db True\n\nelse:\n\npython -m minishowdown -p trainer_example.json\n\nfor a tougher battle:\n\npython -m minishowdown -p trainer_example.json -o trainer_example.json\n\n\nFeel free to write your own teams, pkmns and moves" }, { "alpha_fraction": 0.4875912368297577, "alphanum_fraction": 0.5352798104286194, "avg_line_length": 36.3636360168457, "blob_id": "03f6035d410e9c49c9c5964a014410220f8c369b", "content_id": "b8b0e75c46459ac5a07a847afea0a2ae86318208", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2055, "license_type": "permissive", "max_line_length": 116, "num_lines": 55, "path": "/tests/database/test_DatabaseInit.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom database.DatabaseConfig import getPkmnDatabase\n\nfrom models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\n\n\nclass TestDatabaseInit(TestCase):\n\n def test_database_init(self):\n trainer_json = {\n \"name\": \"Red\",\n \"team\": {\n \"0\": {\n \"name\": \"Scizor\",\n \"level\": 100,\n \"nature\": \"Adamant\",\n \"moves\": [\"Bullet Punch\"],\n \"evs\": {\"hp\": 120, \"atk\": 252, \"phys_def\": 0, \"spe_atk\": 0, \"spe_def\": 0, \"spd\": 136},\n \"ivs\": {\"hp\": 31, \"atk\": 31, \"phys_def\": 31, \"spe_atk\": 31, \"spe_def\": 31, \"spd\": 31}\n }\n }\n }\n trainer = PokemonTrainer.fromJson(trainer_json)\n\n assert trainer.name == \"Red\"\n assert trainer.team[1] is None\n assert trainer.team[0].name == \"Scizor\"\n assert trainer.team[0].level == 100\n assert trainer.team[0]._nature == PokemonNature.Adamant\n assert trainer.team[0].moves[0].type == PokemonType.Steel\n assert trainer.team[0].moves[0].power == 40\n assert trainer.team[0].moves[0].accuracy == 100\n assert trainer.team[0].base_stats == StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n assert trainer.team[0].stats == StatsDict(hp=311, atk=394, phys_def=236, spe_atk=131, spe_def=196, spd=200)\n\n def test_database_update(self):\n db = getPkmnDatabase([\"tyranitar\"])\n\n assert \"tyranitar\" in db\n expected_json = {\n \"type\": {\"0\": \"rock\", \"1\": \"dark\"},\n \"base_stats\": {\n \"hp\": 100,\n \"atk\": 134,\n \"phys_def\": 110,\n \"spe_atk\": 95,\n \"spe_def\": 100,\n \"spd\": 61\n }\n }\n assert db[\"tyranitar\"] == expected_json\n" }, { "alpha_fraction": 0.6279069781303406, "alphanum_fraction": 0.6376594305038452, "avg_line_length": 45.36521911621094, "blob_id": "7e97dae375103b69683e4c7453dd35282ab2df61", "content_id": "4fa09cbfdc2533bfd728b0d882e0440d32b39148", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5332, "license_type": "permissive", "max_line_length": 114, "num_lines": 115, "path": "/engine/game/BattleEngine.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from random import randint\n\nfrom engine.game import InputHandler\nfrom engine.game.BattleDisplay import displayPlayers, displayBattleGameState, displayUsedMove, \\\n displaySwitch, displayEndOfBattle\nfrom engine.pkmn.types.TypesBaseRuleSet import TypesBaseRuleSet\nfrom models.game.battle.BattleGameState import BattleGameState\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove, MoveCategory\nfrom models.pkmn.stats.StatsDict import StatsDict\n\n\nclass BattleEngine:\n\n def __init__(self, battleGameState: BattleGameState, typesRuleSet: TypesBaseRuleSet):\n self.gameState = battleGameState\n self.typesRuleSet = typesRuleSet\n\n def startGame(self) -> bool:\n displayPlayers(self.gameState)\n # Lead Phase\n player_pkmn_selection = InputHandler.pkmnSelection(self.gameState.player.team)\n self.gameState.setPlayerActivePkmn(player_pkmn_selection)\n\n # Battle Phase\n self.__battlePhase()\n\n win = not self.gameState.player.hasLost()\n displayEndOfBattle(win, self.gameState)\n return win\n\n def __battlePhase(self):\n while not self.gameState.player.hasLost() and not self.gameState.opponent.hasLost():\n # Move Selection Phase\n if self.gameState.getPlayerActivePkmn().isKO():\n self.__switchPlayerPokemon()\n if self.gameState.getOpponentActivePkmn().isKO():\n self.__switchOpponentPokemon()\n\n displayBattleGameState(self.gameState)\n plyr_move, plyr_switch = InputHandler.turnDecision(self.gameState.getPlayerActivePkmn(),\n self.gameState.player.team)\n rand = randint(0, len(self.gameState.getOpponentActivePkmn().moves) - 1)\n opponent_move = self.gameState.getOpponentActivePkmn().moves[rand]\n self.gameState.setTurnState(plyr_move=plyr_move, opponent_move=opponent_move)\n\n if plyr_switch:\n self.__switchPlayerPokemon(plyr_switch)\n\n first, second = self.__getMoveOrder()\n self.__useMove(first[0], first[1], second[0])\n if not second[0].isKO() and second[1] is not None:\n self.__useMove(second[0], second[1], first[0])\n self.gameState.turn += 1\n\n def __useMove(self, caster: PokemonModel, caster_move: PokemonMove, trgt: PokemonModel) \\\n -> bool:\n damage = 0\n type_effectiveness = 1\n if caster_move.category != MoveCategory.Status:\n offensive_stat = caster.stats.atk if caster_move.category == MoveCategory.Physical \\\n else caster.stats.spe_atk\n defensive_stat = trgt.stats.phys_def if caster_move.category == MoveCategory.Physical \\\n else trgt.stats.spe_def\n damage = (((2 * caster.level / 5 + 2) * caster_move.power * offensive_stat / defensive_stat) / 50 + 2)\n type_effectiveness = self.typesRuleSet.getEffectiveness(caster_move.type, trgt.type)\n modifier = (1.5 if caster_move.type == caster.type else 1) \\\n * type_effectiveness \\\n * randint(85, 100) / 100 \\\n * (1 if randint(0, 100) <= caster_move.accuracy else 0)\n damage = int(damage * modifier)\n\n if damage != 0:\n trgt.takeDamage(damage)\n displayUsedMove(caster, caster_move, trgt, type_effectiveness, damage)\n\n if randint(0, 100) <= caster_move.effect_rate:\n for stat_key in StatsDict.__dict__.keys():\n caster.stats[stat_key] *= caster_move.self_stat_mod[stat_key]\n trgt.stats[stat_key] *= caster_move.trgt_stat_mod[stat_key]\n\n return True\n\n def __switchPlayerPokemon(self, selection: int = None):\n out = self.gameState.getPlayerActivePkmn()\n coming_in = self.gameState.setPlayerActivePkmn(InputHandler.pkmnSelection(self.gameState.player.team)) \\\n if selection is None else selection\n displaySwitch(player_name=self.gameState.player.name, out=out, coming_in=coming_in)\n\n def __switchOpponentPokemon(self):\n out = self.gameState.getOpponentActivePkmn()\n coming_in = self.gameState.sendNextOpponentActivePkmn()\n displaySwitch(player_name=self.gameState.opponent.name, out=out, coming_in=coming_in)\n\n def __getMoveOrder(self) -> ((PokemonModel, PokemonMove), (PokemonModel, PokemonMove)):\n move_order = self.gameState.getTurnState()\n (plyr_pkmn, plyr_move), (opponent_pkmn, opponent_move) = move_order\n if plyr_move is None:\n return move_order[1], move_order[0]\n\n if plyr_move.priority == opponent_move.priority:\n if plyr_pkmn.stats.spd == opponent_pkmn.stats.spd:\n if randint(0, 1) == 1:\n move_order = move_order[1], move_order[0]\n else:\n if plyr_pkmn.stats.spd < opponent_pkmn.stats.spd:\n move_order = move_order[1], move_order[0]\n else:\n if plyr_move.priority < opponent_move.priority:\n move_order = move_order[1], move_order[0]\n\n return move_order\n\n def __getActivePkmns(self):\n return self.gameState.getPlayerActivePkmn(), self.gameState.getOpponentActivePkmn()\n" }, { "alpha_fraction": 0.5573957562446594, "alphanum_fraction": 0.5628212690353394, "avg_line_length": 42.775001525878906, "blob_id": "9012645dce48f392af14f29a18cff640fb91ea51", "content_id": "1e429fd728f1f813e961afc4d1675988cdce736a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3506, "license_type": "permissive", "max_line_length": 111, "num_lines": 80, "path": "/models/pkmn/PokemonModel.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from typing import List\nfrom os.path import join\nfrom json import load\n\nfrom models.pkmn.PokemonBaseModel import PokemonBaseModel\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.moves.PokemonMove import PokemonMove\n\nfrom database.DatabaseConfig import database_dir\n\n\nclass PokemonModel(PokemonBaseModel):\n\n __bar_len = 50\n\n def __init__(self, name: str, types: (PokemonType, PokemonType), level: int, nature: PokemonNature,\n moves: List[PokemonMove], base_stats: StatsDict, evs: StatsDict, ivs: StatsDict):\n super().__init__(name=name, types=types, base_stats=base_stats)\n self.level = level\n self._nature = nature\n self.moves = moves\n self._evs = evs\n self._ivs = ivs\n self.stats = self.__stats_compute()\n self.max_hp = self.stats.hp\n\n def __str__(self):\n hp_percentage = self.stats.hp / self.max_hp\n ticks = int(hp_percentage * self.__bar_len + .5)\n return f\"{self.name} (lvl.{self.level}):\\n\" \\\n f\"{'█'.join('' for _ in range(ticks))}{'▁'.join('' for _ in range(self.__bar_len - ticks))}\\n\" \\\n f\"{self.stats.hp}/{self.max_hp} ({int(hp_percentage * 100)}%)\"\n\n def moveListAsStr(self) -> str:\n split_lines = [str(move).splitlines() for move in self.moves]\n return \"\\n\".join([\"| |\".join([split_lines[x][y] for x in range(len(split_lines))])\n for y in range(len(split_lines[0]))])\n\n def takeDamage(self, damage: int):\n self.stats.hp -= min(damage, self.stats.hp)\n\n def isKO(self):\n return self.stats.hp == 0\n\n @staticmethod\n def __compute_stat_baseline(level: int, base_stat: int, iv: int, ev: int) -> int:\n return int((2 * base_stat + iv + ev / 4) * level / 100)\n\n def __stats_compute(self) -> StatsDict:\n stat_values = StatsDict(**{\n stat_key:\n self.__compute_stat_baseline(level=self.level, base_stat=self.base_stats.hp,\n iv=self._ivs.hp, ev=self._evs.hp) + self.level + 10\n if stat_key == \"hp\" else\n self.__compute_stat_baseline(level=self.level, base_stat=self.base_stats[stat_key],\n iv=self._ivs[stat_key], ev=self._evs[stat_key]) + 5\n for stat_key in self.base_stats.__dict__.keys()\n })\n self._nature.apply_modifier(stat_values)\n return stat_values\n\n @staticmethod\n def fromJson(json: {}):\n keys = [\"name\", \"level\", \"nature\", \"moves\", \"evs\", \"ivs\"]\n if all(key in json for key in keys):\n with open(join(str(database_dir), \"pkmn\", f\"{json['name'].lower()}.json\")) as pkmn_data_file:\n pkmn_data = load(pkmn_data_file)\n return PokemonModel(\n name=json[\"name\"],\n types=(PokemonType[pkmn_data[\"type\"]['0']],\n PokemonType[pkmn_data[\"type\"]['1']] if 1 in pkmn_data[\"type\"] else None),\n level=int(json[\"level\"]),\n nature=PokemonNature[json[\"nature\"]],\n moves=[PokemonMove.fromDb(move) for move in json[\"moves\"]],\n base_stats=StatsDict(**pkmn_data[\"base_stats\"]),\n evs=StatsDict(**json[\"evs\"]),\n ivs=StatsDict(**json[\"ivs\"])\n )\n" }, { "alpha_fraction": 0.7233532667160034, "alphanum_fraction": 0.7281436920166016, "avg_line_length": 45.38888931274414, "blob_id": "604bd5509dacceef377a1be8700e4463a7310828", "content_id": "bb88ebce0383ecf88b0064f6f10bff7567f159b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "permissive", "max_line_length": 117, "num_lines": 18, "path": "/engine/pkmn/types/TypesBaseRuleSet.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from pandas import DataFrame\nfrom models.pkmn.types.PokemonType import PokemonType\n\n\nclass TypesBaseRuleSet:\n\n # .loc on DataFrame: effectiveness of attack on defense\n # .iloc on DataFrame: effectiveness of defense on attack\n\n def __init__(self, type_effectiveness_chart: DataFrame):\n self.type_effectiveness_chart = type_effectiveness_chart\n\n def __getitem__(self, item: PokemonType) -> (DataFrame, DataFrame):\n return self.type_effectiveness_chart.loc(item), self.type_effectiveness_chart.iloc(item)\n\n def getEffectiveness(self, attack_type: PokemonType, defender_type: (PokemonType, PokemonType)) -> float:\n return self.type_effectiveness_chart[attack_type][defender_type[0]] \\\n * self.type_effectiveness_chart[attack_type][defender_type[1]] if defender_type[1] is not None else 1\n" }, { "alpha_fraction": 0.746835470199585, "alphanum_fraction": 0.746835470199585, "avg_line_length": 25.33333396911621, "blob_id": "60f4d48d5de00c627956b2631bed1b714132f04d", "content_id": "a5c50dde53326514e1899cc8145ab1086720ce9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 79, "license_type": "permissive", "max_line_length": 58, "num_lines": 3, "path": "/run.sh", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "make build\nmake run\ndocker run -a stdin -a stdout -i -t minishowdown /bin/bash\n" }, { "alpha_fraction": 0.5025380849838257, "alphanum_fraction": 0.5332769155502319, "avg_line_length": 39.75862121582031, "blob_id": "f911cbd3e09e6e573aabdcd86a7d86bba2d37eef", "content_id": "ff82f110841d97891d07e450cc2a862946c2614f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3546, "license_type": "permissive", "max_line_length": 114, "num_lines": 87, "path": "/tests/models/game/trainer/test_PokemonTrainer.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove, MoveCategory\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.game.trainer.utils.ArenaBadge import ArenaBadge\n\n\nclass TestPokemonTrainer(TestCase):\n\n def setUp(self) -> None:\n self.Pikachu = PokemonModel(\n name=\"Pikachu\",\n types=(PokemonType.Electric, None),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Volt Tackle\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Iron Tail\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Physical,\n pp=24,\n power=100,\n accuracy=75\n ),\n PokemonMove(\n name=\"Thunderbolt\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Special,\n pp=24,\n power=90\n )\n ],\n base_stats=StatsDict(hp=35, atk=55, phys_def=40, spe_atk=50, spe_def=50, spd=90),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=4, spe_def=0, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Charizard = PokemonModel(\n name=\"Charizard\",\n types=(PokemonType.Fire, PokemonType.Flying),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Fire Blast\",\n move_type=PokemonType.Fire,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=85\n ),\n PokemonMove(\n name=\"Hurricane\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Special,\n pp=16,\n power=110,\n accuracy=70\n )\n ],\n base_stats=StatsDict(hp=78, atk=84, phys_def=78, spe_atk=109, spe_def=85, spd=100),\n evs=StatsDict(hp=0, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Red = PokemonTrainer(\n name=\"Red\",\n team=[self.Pikachu, self.Charizard],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n\n def testPokemonTrainer(self):\n assert self.Red.team[0] == self.Pikachu\n assert self.Red.team[1] == self.Charizard\n assert self.Red.team[2] is None and self.Red.team[5] is None\n assert self.Red.badges == [ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n" }, { "alpha_fraction": 0.4969214200973511, "alphanum_fraction": 0.541832685470581, "avg_line_length": 40.83333206176758, "blob_id": "3800ad9560142d58301516486fd9b6dde3b697e1", "content_id": "b88026726652f9bc3c9130c07f302cbefe4f6c8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2761, "license_type": "permissive", "max_line_length": 107, "num_lines": 66, "path": "/tests/models/pkmn/test_PokemonModel.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove, MoveCategory\nfrom models.pkmn.types.PokemonType import PokemonType, type_equals\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.stats.StatsDict import StatsDict\n\n\nclass TestPokemonModel(TestCase):\n\n def test_pokemon_model(self):\n scizor = PokemonModel(\n name=\"Scizor\",\n types=(PokemonType.Bug, PokemonType.Steel),\n level=100,\n nature=PokemonNature.Adamant,\n moves=[\n PokemonMove(\n name=\"Bullet Punch\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Physical,\n pp=48,\n power=40,\n priority=1\n ),\n PokemonMove(\n name=\"U-Turn\",\n move_type=PokemonType.Bug,\n category=MoveCategory.Physical,\n pp=32,\n power=70\n ),\n PokemonMove(\n name=\"Steel Beam\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Special,\n pp=8,\n power=140,\n accuracy=95\n ),\n PokemonMove(\n name=\"Swords Dance\",\n move_type=PokemonType.Normal,\n category=MoveCategory.Status,\n pp=32,\n effect_rate=100,\n self_stat_mod=StatsDict(atk=2)\n )\n ],\n base_stats=StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65),\n evs=StatsDict(hp=120, atk=252, phys_def=0, spe_atk=0, spe_def=0, spd=136),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n\n assert scizor.name == \"Scizor\"\n assert type_equals(scizor.type, (PokemonType.Steel, PokemonType.Bug))\n assert scizor.level == 100\n assert scizor._nature == PokemonNature.Adamant\n assert not scizor._nature == PokemonNature.Jolly\n assert scizor.base_stats == StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n assert scizor.base_stats.atk == 130\n assert scizor._evs == StatsDict(hp=120, atk=252, phys_def=0, spe_atk=0, spe_def=0, spd=136)\n assert scizor._ivs == StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n\n expected_stat_values = StatsDict(hp=311, atk=394, phys_def=236, spe_atk=131, spe_def=196, spd=200)\n assert scizor.stats == expected_stat_values\n" }, { "alpha_fraction": 0.5978332757949829, "alphanum_fraction": 0.6360822319984436, "avg_line_length": 41.271026611328125, "blob_id": "37435514e7c23d33377737ec64d839fe4b09ea0b", "content_id": "18c9af6c1f1a3f6f84b893fbf28a5befe06a7632", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4523, "license_type": "permissive", "max_line_length": 108, "num_lines": 107, "path": "/tests/models/pkmn/test_PokemonBaseModel.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nfrom models.pkmn.PokemonBaseModel import PokemonBaseModel\nfrom models.pkmn.types.PokemonType import PokemonType, type_equals\nfrom models.pkmn.stats.StatsDict import StatsDict\n\n\nclass TestPokemonBaseModel(TestCase):\n\n def test_double_type_pokemon(self):\n scizor = PokemonBaseModel(\n name=\"Scizor\",\n types=(PokemonType.Bug, PokemonType.Steel),\n base_stats=StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n )\n\n assert scizor.name == \"Scizor\"\n assert type_equals(scizor.type, (PokemonType.Bug, PokemonType.Steel))\n assert scizor.base_stats == StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n assert scizor.base_stats.atk == 130\n\n def test_double_type_pokemon_valid_comparison(self):\n scizor = PokemonBaseModel(\n name=\"Scizor\",\n types=(PokemonType.Bug, PokemonType.Steel),\n base_stats=StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n )\n\n definitely_not_scizor = PokemonBaseModel(\n name=\"Scizor\",\n types=(PokemonType.Steel, PokemonType.Bug),\n base_stats=StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n )\n\n assert scizor.name == definitely_not_scizor.name\n assert type_equals(scizor.type, definitely_not_scizor.type)\n assert scizor.base_stats == definitely_not_scizor.base_stats\n assert scizor.base_stats.atk == definitely_not_scizor.base_stats.atk\n assert scizor == definitely_not_scizor\n\n def test_double_type_pokemon_invalid_comparison(self):\n scizor = PokemonBaseModel(\n name=\"Scizor\",\n types=(PokemonType.Bug, PokemonType.Steel),\n base_stats=StatsDict(hp=70, atk=130, phys_def=100, spe_atk=55, spe_def=80, spd=65)\n )\n\n crawdaunt = PokemonBaseModel(\n name=\"Crawdaunt\",\n types=(PokemonType.Water, PokemonType.Dark),\n base_stats=StatsDict(hp=63, atk=130, phys_def=85, spe_atk=90, spe_def=55, spd=55)\n )\n\n assert scizor.name != crawdaunt.name\n assert not type_equals(scizor.type, crawdaunt.type)\n assert scizor.base_stats != crawdaunt.base_stats\n assert scizor.base_stats.atk == crawdaunt.base_stats.atk\n assert scizor != crawdaunt\n\n def test_simple_type_pokemon(self):\n blissey = PokemonBaseModel(\n name=\"Blissey\",\n types=(PokemonType.Normal, None),\n base_stats=StatsDict(hp=255, atk=10, phys_def=10, spe_atk=75, spe_def=135, spd=55)\n )\n\n assert blissey.name == \"Blissey\"\n assert type_equals(blissey.type, (PokemonType.Normal, None))\n assert blissey.base_stats == StatsDict(hp=255, atk=10, phys_def=10, spe_atk=75, spe_def=135, spd=55)\n assert blissey.base_stats.hp == 255\n\n def test_simple_type_pokemon_valid_comparison(self):\n blissey = PokemonBaseModel(\n name=\"Blissey\",\n types=(PokemonType.Normal, None),\n base_stats=StatsDict(hp=255, atk=10, phys_def=10, spe_atk=75, spe_def=135, spd=55)\n )\n\n definitely_not_blissey = PokemonBaseModel(\n name=\"Blissey\",\n types=(None, PokemonType.Normal),\n base_stats=StatsDict(hp=255, atk=10, phys_def=10, spe_atk=75, spe_def=135, spd=55)\n )\n\n assert blissey.name == definitely_not_blissey.name\n assert type_equals(blissey.type, definitely_not_blissey.type)\n assert blissey.base_stats == definitely_not_blissey.base_stats\n assert blissey.base_stats.atk == definitely_not_blissey.base_stats.atk\n assert blissey == definitely_not_blissey\n\n def test_simple_type_pokemon_invalid_comparison(self):\n blissey = PokemonBaseModel(\n name=\"Blissey\",\n types=(PokemonType.Normal, None),\n base_stats=StatsDict(hp=255, atk=10, phys_def=10, spe_atk=75, spe_def=135, spd=55)\n )\n\n barraskewda = PokemonBaseModel(\n name=\"Barraskewda\",\n types=(PokemonType.Water, None),\n base_stats=StatsDict(hp=61, atk=123, phys_def=60, spe_atk=60, spe_def=50, spd=136)\n )\n\n assert blissey.name != barraskewda.name\n assert not type_equals(blissey.type, barraskewda.type)\n assert blissey.base_stats != barraskewda.base_stats\n assert blissey.base_stats.atk != barraskewda.base_stats.atk\n assert blissey != barraskewda\n" }, { "alpha_fraction": 0.5142965912818909, "alphanum_fraction": 0.5432710647583008, "avg_line_length": 40.96799850463867, "blob_id": "1a011e917beb88a49cd39fe9430fad2c2e1c0e89", "content_id": "afbe8def605979165cb2d74d9f5f358ae73d9cc9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5246, "license_type": "permissive", "max_line_length": 119, "num_lines": 125, "path": "/tests/engine/game/test_BattleEngine.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "import builtins\nfrom unittest import TestCase\nimport mock\n\nfrom engine.game import InputHandler\nfrom engine.game.BattleEngine import BattleEngine\nfrom engine.pkmn.types.ClassicTypesRuleSet import ClassicTypesRuleSet\nfrom models.game.battle.BattleGameState import BattleGameState\nfrom models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.game.trainer.utils.ArenaBadge import ArenaBadge\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove, MoveCategory\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\n\n\nclass TestBattleEngine(TestCase):\n\n def setUp(self) -> None:\n self.Pikachu = PokemonModel(\n name=\"Pikachu\",\n types=(PokemonType.Electric, None),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Volt Tackle\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Iron Tail\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Physical,\n pp=24,\n power=100,\n accuracy=75\n ),\n PokemonMove(\n name=\"Thunderbolt\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Special,\n pp=24,\n power=90\n )\n ],\n base_stats=StatsDict(hp=35, atk=55, phys_def=40, spe_atk=50, spe_def=50, spd=90),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=4, spe_def=0, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Pidgeot = PokemonModel(\n name=\"Pidgeot\",\n types=(PokemonType.Flying, PokemonType.Normal),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Double Edge\",\n move_type=PokemonType.Normal,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Brave Bird\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n )\n ],\n base_stats=StatsDict(hp=83, atk=80, phys_def=75, spe_atk=70, spe_def=70, spd=101),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=0, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Blastoise = PokemonModel(\n name=\"Blastoise\",\n types=(PokemonType.Water, None),\n level=100,\n nature=PokemonNature.Modest,\n moves=[\n PokemonMove(\n name=\"Hydro Pump\",\n move_type=PokemonType.Water,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=80\n ),\n PokemonMove(\n name=\"Ice Beam\",\n move_type=PokemonType.Ice,\n category=MoveCategory.Special,\n pp=16,\n power=90\n )\n ],\n base_stats=StatsDict(hp=79, atk=83, phys_def=100, spe_atk=85, spe_def=105, spd=78),\n evs=StatsDict(hp=252, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=0),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n self.Red = PokemonTrainer(\n name=\"Red\",\n team=[self.Pikachu],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n self.Blue = PokemonTrainer(\n name=\"Blue\",\n team=[self.Pidgeot, self.Blastoise],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n self.BattleGameState = BattleGameState(player=self.Red, opponent=self.Blue)\n\n def test_red_blue_battle(self):\n with mock.patch.object(InputHandler, 'getCancelableNumberInput', return_value=0):\n with mock.patch.object(InputHandler, 'getNumberInput', return_value=0):\n with mock.patch.object(InputHandler, 'getDecisionType', return_value='m'):\n with mock.patch.object(builtins, 'input', return_value=''):\n engine = BattleEngine(battleGameState=self.BattleGameState, typesRuleSet=ClassicTypesRuleSet())\n assert not engine.startGame()\n" }, { "alpha_fraction": 0.5457690358161926, "alphanum_fraction": 0.5751765966415405, "avg_line_length": 37.97190856933594, "blob_id": "f8f45e1e151f7b12d136629e7ba7b33ce6b2320b", "content_id": "2ff07a9a51b2b151e410045e5b43264302815449", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6937, "license_type": "permissive", "max_line_length": 118, "num_lines": 178, "path": "/miniShowdown.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from argparse import ArgumentParser\nfrom json import load\n\nfrom database.DatabaseConfig import updatePkmnDatabase, updateMoveDatabase\n\nfrom engine.game.BattleEngine import BattleEngine\nfrom engine.pkmn.types.ClassicTypesRuleSet import ClassicTypesRuleSet\nfrom models.game.battle.BattleGameState import BattleGameState\nfrom models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.game.trainer.utils.ArenaBadge import ArenaBadge\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove, MoveCategory\nfrom models.pkmn.natures.PokemonNature import PokemonNature\nfrom models.pkmn.stats.StatsDict import StatsDict\nfrom models.pkmn.types.PokemonType import PokemonType\n\nparser = ArgumentParser()\nparser.add_argument(\"--update_db\", \"-u\", help=\"Fill out Pkmn info as json in database folder for future use\",\n type=bool, required=False)\nparser.add_argument(\"--player_trainer_json\", \"-p\", help=\"Path to player's pokemon trainer json\",\n type=str, required=False)\nparser.add_argument(\"--opponent_trainer_json\", \"-o\", help=\"Path to opponent's pokemon trainer json\",\n type=str, required=False)\nargs = parser.parse_args()\n\n\ndef setUpDefaultTeams():\n pikachu = PokemonModel(\n name=\"Pikachu\",\n types=(PokemonType.Electric, None),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Volt Tackle\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Iron Tail\",\n move_type=PokemonType.Steel,\n category=MoveCategory.Physical,\n pp=24,\n power=100,\n accuracy=75\n ),\n PokemonMove(\n name=\"Thunderbolt\",\n move_type=PokemonType.Electric,\n category=MoveCategory.Special,\n pp=24,\n power=90\n )\n ],\n base_stats=StatsDict(hp=35, atk=55, phys_def=40, spe_atk=50, spe_def=50, spd=90),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=4, spe_def=0, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n charizard = PokemonModel(\n name=\"Charizard\",\n types=(PokemonType.Fire, PokemonType.Flying),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Fire Blast\",\n move_type=PokemonType.Fire,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=85\n ),\n PokemonMove(\n name=\"Hurricane\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Special,\n pp=16,\n power=110,\n accuracy=70\n )\n ],\n base_stats=StatsDict(hp=78, atk=84, phys_def=78, spe_atk=109, spe_def=85, spd=100),\n evs=StatsDict(hp=0, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n pidgeot = PokemonModel(\n name=\"Pidgeot\",\n types=(PokemonType.Flying, PokemonType.Normal),\n level=100,\n nature=PokemonNature.Jolly,\n moves=[\n PokemonMove(\n name=\"Double Edge\",\n move_type=PokemonType.Normal,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n ),\n PokemonMove(\n name=\"Brave Bird\",\n move_type=PokemonType.Flying,\n category=MoveCategory.Physical,\n pp=24,\n power=120\n )\n ],\n base_stats=StatsDict(hp=83, atk=80, phys_def=75, spe_atk=70, spe_def=70, spd=101),\n evs=StatsDict(hp=0, atk=252, phys_def=0, spe_atk=0, spe_def=4, spd=252),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n blastoise = PokemonModel(\n name=\"Blastoise\",\n types=(PokemonType.Water, None),\n level=100,\n nature=PokemonNature.Modest,\n moves=[\n PokemonMove(\n name=\"Hydro Pump\",\n move_type=PokemonType.Water,\n category=MoveCategory.Special,\n pp=8,\n power=110,\n accuracy=80\n ),\n PokemonMove(\n name=\"Ice Beam\",\n move_type=PokemonType.Ice,\n category=MoveCategory.Special,\n pp=16,\n power=90\n )\n ],\n base_stats=StatsDict(hp=79, atk=83, phys_def=100, spe_atk=85, spe_def=105, spd=78),\n evs=StatsDict(hp=252, atk=0, phys_def=0, spe_atk=252, spe_def=4, spd=0),\n ivs=StatsDict(hp=31, atk=31, phys_def=31, spe_atk=31, spe_def=31, spd=31)\n )\n red = PokemonTrainer(\n name=\"Red\",\n team=[pikachu, charizard],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n blue = PokemonTrainer(\n name=\"Blue\",\n team=[pidgeot, blastoise],\n badges=[ArenaBadge.Boulder, ArenaBadge.Cascade, ArenaBadge.Thunder, ArenaBadge.Rainbow,\n ArenaBadge.Soul, ArenaBadge.Marsh, ArenaBadge.Marsh, ArenaBadge.Earth]\n )\n return red, blue\n\n\ndef main():\n if args.update_db:\n updatePkmnDatabase([str(_) for _ in range(10)])\n updateMoveDatabase([\"Swords Dance\", \"Meteor Mash\", \"Close Combat\", \"Mach Punch\", \"Aura Sphere\", \"Bug Bite\",\n \"Hydro Pump\", \"Surf\", \"Waterfall\", \"Moonblast\", \"Play Rough\", \"Solar Beam\", \"Wood Hammer\",\n \"Stone Edge\", \"Meteor Gem\", \"Sludge Bomb\", \"Poison Jab\", \"Draco Meteor\", \"Outrage\",\n \"Dragon Dance\", \"Psychic\", \"Zen Headbutt\", \"Mystical Fire\", \"Fire Blast\", \"Flare Blitz\",\n \"Hurricane\", \"Brave Bird\", \"Earthquake\", \"Earth Power\"])\n\n player, opponent = setUpDefaultTeams()\n\n if args.player_trainer_json:\n with open(args.player_trainer_json) as player_trainer_json_file:\n player = PokemonTrainer.fromJson(load(player_trainer_json_file))\n if args.opponent_trainer_json:\n with open(args.opponent_trainer_json) as opponent_trainer_json_file:\n opponent = PokemonTrainer.fromJson(load(opponent_trainer_json_file))\n\n engine = BattleEngine(battleGameState=BattleGameState(player=player, opponent=opponent),\n typesRuleSet=ClassicTypesRuleSet())\n engine.startGame()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5387858152389526, "alphanum_fraction": 0.5716694593429565, "avg_line_length": 24.23404312133789, "blob_id": "7650a23911a77fc3e477c36157928db60618e627", "content_id": "343f5a4bf2e820b398e2200103635d6acb678983", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 106, "num_lines": 47, "path": "/models/pkmn/types/PokemonType.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom re import sub\n\n\nclass PokemonType(Enum):\n Normal = 1\n Fighting = 2\n Flying = 3\n Poison = 4\n Ground = 5\n Rock = 6\n Bug = 7\n Ghost = 8\n Psychic = 9\n Grass = 10\n Fire = 11\n Water = 12\n Electric = 13\n Ice = 14\n Dragon = 15\n Steel = 16\n Dark = 17\n Fairy = 18\n\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplementedError\n return self.value < other.value\n\n def __str__(self):\n return sub(\"PokemonType\\\\.\", \"\", super().__str__())\n\n\ndef format_types_tuple(types: (PokemonType, PokemonType)):\n if types[0] is None:\n if types[1] is None:\n raise Exception(\"PokemonBaseModel - init: Unhandled type (None; None)\")\n else:\n return types[1], None\n else:\n if types[0] == types[1]:\n raise Exception(f\"PokemonBaseModel - init: Unhandled type ({str(types[0])}; {str(types[1])})\")\n return types if types[1] is None or types[0] < types[1] else (types[1], types[0])\n\n\ndef type_equals(self, other: (PokemonType, PokemonType)):\n return format_types_tuple(self) == format_types_tuple(other)\n" }, { "alpha_fraction": 0.6798893213272095, "alphanum_fraction": 0.6881918907165527, "avg_line_length": 37.71428680419922, "blob_id": "d3ffcb376c94525d59eb73a4840fa904c2c46a14", "content_id": "8d202128c06a12dfc60c761811fd0a24f9121a0c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2168, "license_type": "permissive", "max_line_length": 113, "num_lines": 56, "path": "/engine/game/BattleDisplay.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from re import sub, M\nfrom models.game.battle.BattleGameState import BattleGameState\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove\n\n__separation_line = '-'.join('' for _ in range(150))\n\n\ndef __shiftDisplayBox(box: str, shift: int):\n return sub(\"^\", \"\\t\".join(\"\" for _ in range(shift)), box, flags=M)\n\n\ndef __displaySideBySide(box_1: str, box_2: str):\n split_lines = (box_1.splitlines(), box_2.splitlines())\n print(\"\\n\".join([f\"{split_lines[0][_]:<50}{split_lines[1][_]:>50}\" for _ in range(len(split_lines[0]))]))\n\n\ndef displayPlayers(battleGameState: BattleGameState):\n __displaySideBySide(str(battleGameState.player), str(battleGameState.opponent))\n\n\ndef displayBattleGameState(battleGameState: BattleGameState):\n displayPlayers(battleGameState)\n print(f\"Turn: {battleGameState.turn}\\n\")\n __displaySideBySide(str(battleGameState.getPlayerActivePkmn()), str(battleGameState.getOpponentActivePkmn()))\n print(f\"{__separation_line}\")\n print(battleGameState.getPlayerActivePkmn().moveListAsStr())\n print(f\"{__separation_line}\")\n\n\ndef displayUsedMove(caster: PokemonModel, caster_move: PokemonMove, trgt: PokemonModel,\n type_effectiveness: float, damage: int):\n print(f\"{caster.name} uses {caster_move.name}\")\n if type_effectiveness != 1:\n print(\"It's super effective!\" if type_effectiveness > 1\n else f\"It doesn't affect {trgt.name}\" if type_effectiveness == 0\n else \"It's not very effective...\")\n if damage != 0:\n print(f\"{trgt.name} loses {damage} HP\")\n if trgt.isKO():\n print(f\"{trgt.name} is K.O !\")\n input()\n\n\ndef displaySwitch(player_name: str, out: PokemonModel, coming_in: PokemonModel):\n print(f\"{out.name} goes back into its PokeBall.\")\n print(f\"{player_name} sends {coming_in.name} out !\")\n input()\n\n\ndef displayEndOfBattle(win: bool, battleGameState: BattleGameState):\n print(\"# Finished #\")\n displayPlayers(battleGameState)\n print(f\"Winner: {battleGameState.player.name if win else battleGameState.opponent.name}\")\n print(f\"{'-'.join(__separation_line)}\")\n input()\n" }, { "alpha_fraction": 0.6985074877738953, "alphanum_fraction": 0.7008955478668213, "avg_line_length": 39.85365676879883, "blob_id": "438e0de04a4cd506c9b73a3bb09b9af06e069f68", "content_id": "07b0a88620977d804617a27caf0dbc68f04679e6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1675, "license_type": "permissive", "max_line_length": 79, "num_lines": 41, "path": "/models/game/battle/BattleGameState.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from models.game.trainer.PokemonTrainer import PokemonTrainer\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove\n\n\nclass BattleGameState:\n\n def __init__(self, player: PokemonTrainer, opponent: PokemonTrainer):\n self.turn = 1\n self.player = player.makeCopy()\n self.__player_active_pkmn_slot = 0\n self.player_move_selection = None\n self.opponent = opponent.makeCopy()\n self.opponent_move_selection = None\n self.__opponent_active_pkmn_slot = 0\n\n def getPlayerActivePkmn(self) -> PokemonModel:\n return self.player.team[self.__player_active_pkmn_slot]\n\n def setPlayerActivePkmn(self, index: int) -> PokemonModel:\n self.__player_active_pkmn_slot = index\n return self.player.team[self.__player_active_pkmn_slot]\n\n def getOpponentActivePkmn(self) -> PokemonModel:\n return self.opponent.team[self.__opponent_active_pkmn_slot]\n\n def setOpponentActivePkmn(self, index: int) -> PokemonModel:\n self.__opponent_active_pkmn_slot = index\n return self.opponent.team[self.__opponent_active_pkmn_slot]\n\n def sendNextOpponentActivePkmn(self) -> PokemonModel:\n self.__opponent_active_pkmn_slot += 1\n return self.opponent.team[self.__opponent_active_pkmn_slot]\n\n def getTurnState(self):\n return (self.getPlayerActivePkmn(), self.player_move_selection),\\\n (self.getOpponentActivePkmn(), self.opponent_move_selection)\n\n def setTurnState(self, plyr_move: PokemonMove, opponent_move: PokemonMove):\n self.player_move_selection = plyr_move\n self.opponent_move_selection = opponent_move\n" }, { "alpha_fraction": 0.5681179761886597, "alphanum_fraction": 0.5730336904525757, "avg_line_length": 37.486488342285156, "blob_id": "c6787e6d18561174603430d8f5be3cdee7ae0515", "content_id": "b059d225782a761fe809c888cc8d5da86be95347", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "permissive", "max_line_length": 100, "num_lines": 37, "path": "/models/game/trainer/PokemonTrainer.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from typing import List\nfrom models.game.trainer.utils.ArenaBadge import ArenaBadge\n\nfrom models.pkmn.PokemonModel import PokemonModel\n\n\nclass PokemonTrainer:\n\n def __init__(self, name: str, team: List[PokemonModel], badges: List[ArenaBadge]):\n self.name = name\n self.team = [team[i] if i < len(team) else None for i in range(6)]\n self.badges = list(badges)\n self.badges.sort()\n\n def __str__(self):\n team_str_list = [f\"{x}. {self.team[x].name} ({self.team[x].stats.hp}/{self.team[x].max_hp})\"\n for x in range(len(self.team)) if self.team[x] is not None]\n return f\"Trainer: {self.name}\\n\" \\\n f\"{' - '.join(team_str_list[0:2])}\\n\" \\\n f\"{' - '.join(team_str_list[2:4])}\\n\" \\\n f\"{' - '.join(team_str_list[4:6])}\\n\"\n\n def hasLost(self):\n return all(pkmn.isKO() if pkmn is not None else True for pkmn in self.team)\n\n def makeCopy(self):\n return PokemonTrainer(name=self.name, team=list(self.team), badges=list(self.badges))\n\n @staticmethod\n def fromJson(json: {}):\n if \"name\" in json and \"team\" in json:\n return PokemonTrainer(\n name=json[\"name\"],\n team=[PokemonModel.fromJson(json[\"team\"][_]) for _ in json[\"team\"]],\n badges=[]\n )\n raise AttributeError(\"PokemonTrainer - fromJson: Invalid JSON Input\")\n" }, { "alpha_fraction": 0.729411780834198, "alphanum_fraction": 0.7326797246932983, "avg_line_length": 49.16393280029297, "blob_id": "0d3fd58d8049f1cb8f7db401db8a97237491bf3b", "content_id": "8876830097963204fb93a583be0a14fa2161bbdb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3060, "license_type": "permissive", "max_line_length": 116, "num_lines": 61, "path": "/models/pkmn/natures/PokemonNature.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from enum import Enum\n\nfrom models.pkmn.stats.StatsDict import StatsDict\n\n\n# Bonus/Malus percentage from natures\nnature_base_modifier = 100\nnature_bonus = 10\nnature_bonus_modifier = nature_base_modifier + nature_bonus\nnature_malus = 10\nnature_malus_modifier = nature_base_modifier - nature_malus\n\n\nclass _PokemonNatureImplm:\n\n def __init__(self, hp: int = nature_base_modifier,\n atk: int = nature_base_modifier, phys_def: int = nature_base_modifier,\n spe_atk: int = nature_base_modifier, spe_def: int = nature_base_modifier,\n spd: int = nature_base_modifier):\n super().__init__()\n self.__stats_table = StatsDict(hp=hp, atk=atk, phys_def=phys_def, spe_atk=spe_atk, spe_def=spe_def, spd=spd)\n\n def apply_modifier(self, stats: StatsDict):\n for stat_key in self.__stats_table.__dict__.keys():\n stats[stat_key] *= self.__stats_table[stat_key] / 100\n\n\nclass PokemonNature(Enum):\n Lonely = _PokemonNatureImplm(atk=nature_bonus_modifier, phys_def=nature_malus_modifier)\n Adamant = _PokemonNatureImplm(atk=nature_bonus_modifier, spe_atk=nature_malus_modifier)\n Naughty = _PokemonNatureImplm(atk=nature_bonus_modifier, spe_def=nature_malus_modifier)\n Brave = _PokemonNatureImplm(atk=nature_bonus_modifier, spd=nature_malus_modifier)\n\n Bold = _PokemonNatureImplm(phys_def=nature_bonus_modifier, atk=nature_malus_modifier)\n Impish = _PokemonNatureImplm(phys_def=nature_bonus_modifier, spe_atk=nature_malus_modifier)\n Lax = _PokemonNatureImplm(phys_def=nature_bonus_modifier, spe_def=nature_malus_modifier)\n Relaxed = _PokemonNatureImplm(phys_def=nature_bonus_modifier, spd=nature_malus_modifier)\n\n Modest = _PokemonNatureImplm(spe_atk=nature_bonus_modifier, atk=nature_malus_modifier)\n Mild = _PokemonNatureImplm(spe_atk=nature_bonus_modifier, phys_def=nature_malus_modifier)\n Rash = _PokemonNatureImplm(spe_atk=nature_bonus_modifier, spe_def=nature_malus_modifier)\n Quiet = _PokemonNatureImplm(spe_atk=nature_bonus_modifier, spd=nature_malus_modifier)\n\n Calm = _PokemonNatureImplm(spe_def=nature_bonus_modifier, atk=nature_malus_modifier)\n Gentle = _PokemonNatureImplm(spe_def=nature_bonus_modifier, phys_def=nature_malus_modifier)\n Careful = _PokemonNatureImplm(spe_def=nature_bonus_modifier, spe_atk=nature_malus_modifier)\n Sassy = _PokemonNatureImplm(spe_def=nature_bonus_modifier, spd=nature_malus_modifier)\n\n Timid = _PokemonNatureImplm(spd=nature_bonus_modifier, atk=nature_malus_modifier)\n Hasty = _PokemonNatureImplm(spd=nature_bonus_modifier, phys_def=nature_malus_modifier)\n Jolly = _PokemonNatureImplm(spd=nature_bonus_modifier, spe_atk=nature_malus_modifier)\n Naive = _PokemonNatureImplm(spd=nature_bonus_modifier, spe_def=nature_malus_modifier)\n\n Quirky = _PokemonNatureImplm()\n Docile = _PokemonNatureImplm()\n Hardy = _PokemonNatureImplm()\n Bashful = _PokemonNatureImplm()\n Serious = _PokemonNatureImplm()\n\n def apply_modifier(self, stats: StatsDict):\n self.value.apply_modifier(stats)\n" }, { "alpha_fraction": 0.5120481848716736, "alphanum_fraction": 0.5192770957946777, "avg_line_length": 30.846153259277344, "blob_id": "2a4f7584fe78e81fd9259bbf384f37c024681e68", "content_id": "374f8123a9618f449dc0ea8805621fc614597310", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "permissive", "max_line_length": 119, "num_lines": 26, "path": "/models/pkmn/stats/StatsDict.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "\n\nclass StatsDict:\n\n def __init__(self, hp: int = 0, atk: int = 0, phys_def: int = 0, spe_atk: int = 0, spe_def: int = 0, spd: int = 0):\n self.hp = int(hp)\n self.atk = int(atk)\n self.phys_def = int(phys_def)\n self.spe_atk = int(spe_atk)\n self.spe_def = int(spe_def)\n self.spd = int(spd)\n\n def __eq__(self, other):\n if self.__class__ != other.__class__:\n return NotImplementedError\n return self.__dict__ == other.__dict__\n\n def __getitem__(self, item: str):\n return self.__dict__[item]\n\n def __setitem__(self, key, value):\n self.__dict__[key] = int(value)\n\n @classmethod\n def from_json(cls, json: {}):\n keys = cls.__dict__.keys()\n if all(keys) in json:\n return StatsDict(**{key: json[key] for key in keys})\n" }, { "alpha_fraction": 0.4727272689342499, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 14.857142448425293, "blob_id": "e233bf4b74f01ad24a13050b2ac86b580e7781ab", "content_id": "522033f22f21144dcc803b82caa609f241df7f18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 110, "license_type": "permissive", "max_line_length": 22, "num_lines": 7, "path": "/requirements.txt", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "pandas==1.0.3\nnumpy==1.18.2\npython-dateutil==2.8.1\nmock~=4.0.3\npathlib~=1.0.1\nrequests~=2.23.0\nargparse~=1.4.0" }, { "alpha_fraction": 0.5749621391296387, "alphanum_fraction": 0.5790005326271057, "avg_line_length": 27.299999237060547, "blob_id": "10099c5e6d66dbc75e0e549ee8d44af52f0079b9", "content_id": "6ccdb14e244394ced4c7200587cf044f4b8a7c3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1981, "license_type": "permissive", "max_line_length": 92, "num_lines": 70, "path": "/engine/game/InputHandler.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from typing import List\n\nfrom models.pkmn.PokemonModel import PokemonModel\nfrom models.pkmn.moves.PokemonMove import PokemonMove\n\n\ndef repeatOnError(*exceptions):\n def checking(function):\n def checked(*args, **kwargs):\n while True:\n try:\n result = function(*args, **kwargs)\n except exceptions as ex:\n print(\"Invalid Input!\")\n else:\n return result\n\n return checked\n\n return checking\n\n\n@repeatOnError(NotImplementedError)\ndef getDecisionType():\n i = input(\"Enter 'm' to use a move or 's' to switch out: \")\n if i != \"m\" and i != \"s\":\n raise NotImplementedError()\n return i\n\n\n@repeatOnError(ValueError)\ndef getNumberInput(max_range: int):\n i = input(f'Enter your selection (0 - {max_range - 1}): ')\n val = int(i)\n if val < 0 or val >= max_range:\n raise ValueError\n return val\n\n\n@repeatOnError(ValueError)\ndef getCancelableNumberInput(max_range: int):\n i = input(f'Enter your selection (0 - {max_range - 1}) or Cancel (C): ')\n if i == \"C\":\n return -1\n val = int(i)\n if val < 0 or val >= max_range:\n raise ValueError\n return val\n\n\ndef pkmnSelection(team: List[PokemonModel]) -> int:\n print(\"\\nChoose a Pkmn.\")\n while True:\n i = getNumberInput(len([_ for _ in team if _ is not None]))\n if team[i].isKO():\n print(f\"{team[i].name} is K.O ! It cannot fight anymore...\")\n else:\n break\n return i\n\n\ndef turnDecision(active_pkmn: PokemonModel, team: List[PokemonModel]) -> (PokemonMove, int):\n while True:\n t = getDecisionType()\n max_range = len([_ for _ in active_pkmn.moves if _ is not None]) if t == \"m\" \\\n else len([_ for _ in team if _ is not None])\n i = getCancelableNumberInput(max_range=max_range)\n if i != -1:\n break\n return (active_pkmn.moves[i], None) if t == \"m\" else (None, i)\n" }, { "alpha_fraction": 0.5261538624763489, "alphanum_fraction": 0.5507692098617554, "avg_line_length": 18.058822631835938, "blob_id": "30918d05131bed246946bdd41a9591e10729e937", "content_id": "ae4df15d8b072d29429511e89eff6003beb102af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "permissive", "max_line_length": 45, "num_lines": 17, "path": "/models/game/trainer/utils/ArenaBadge.py", "repo_name": "LiquidNalee/MiniPkmnShowdown", "src_encoding": "UTF-8", "text": "from enum import Enum\n\n\nclass ArenaBadge(Enum):\n Boulder = 1\n Cascade = 2\n Thunder = 3\n Rainbow = 4\n Soul = 5\n Marsh = 6\n Volcano = 7\n Earth = 8\n\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplementedError\n return self.value < other.value\n\n" } ]
27
firstcase/theSecretForest
https://github.com/firstcase/theSecretForest
d8ab550e357e6411fa5b0ef9b42022bee6646cf6
f475527cfacb85c604951d89b840a0195b88f8ba
69be2c21357dcd8254716c63d896905ebb6fa17f
refs/heads/master
2023-01-10T01:44:14.003939
2020-11-06T15:54:50
2020-11-06T15:54:50
309,050,666
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4482758641242981, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 13.5, "blob_id": "5f38c8c5f7f685f5bbc1d4d4abe43ff39b06578e", "content_id": "2316b9945ca815007a7bda3c05e1fbdd8dd2ef18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/README.md", "repo_name": "firstcase/theSecretForest", "src_encoding": "UTF-8", "text": "# 비밀의 숲 프로젝트(11/1~)\nstart!!!\n" }, { "alpha_fraction": 0.5865615606307983, "alphanum_fraction": 0.6276996731758118, "avg_line_length": 29.38541603088379, "blob_id": "7040f205527aaef568cb2dcd605c280ee8a7674c", "content_id": "bff0437f107b0da066c8742471220b3b069785b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3121, "license_type": "no_license", "max_line_length": 206, "num_lines": 96, "path": "/app.py", "repo_name": "firstcase/theSecretForest", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nimport urllib3\nimport json\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/task1')\ndef my_page():\n print(step1())\n print(step3(step2('https://news.v.daum.net/v/20201106110404888')))\n return 'This is My Page!'\n\n#url을 변경해서기 검색 쿼리를 변경하고, 각 뉴스의 url을 가져오기\ndef step1():\n # URL을 읽어서 HTML를 받아오고,\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n data = requests.get('https://search.daum.net/search?w=news&nil_search=btn&DA=NTB&enc=utf8&cluster=y&cluster_page=1&q=%EA%B2%80%EC%B0%B0%EC%B4%9D%EC%9E%A5%20%EC%9C%A4%EC%84%9D%EC%97%B4', headers=headers)\n\n #clusterResultUL > li.fst > div.wrap_cont > div > span.f_nb.date > a\n # clusterResultUL > li.fst > div.wrap_cont > div > span.f_nb.date > a\n # clusterResultUL > li:nth-child(2) > div.wrap_cont > div > span.f_nb.date > a\n soup = BeautifulSoup(data.text, 'html.parser')\n\n links = soup.select('#clusterResultUL > li > div.wrap_cont > div > span.f_nb.date > a')\n news_link = []\n for link in links:\n news_link.append(link['href'])\n\n return news_link\n\n\n#가져온 url에서 본문 text 가져오기\ndef step2(news_link):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n data = requests.get(news_link,headers=headers)\n\n soup = BeautifulSoup(data.text, 'html.parser')\n contents = soup.select('#harmonyContainer > section > p')\n news_contents = []\n for content in contents:\n news_contents.append(content.getText())\n one_content = \" \".join(news_contents)\n return one_content\n\n\n#가져온 text를 분석기로 보내서 나온 인물명을 총장명칭과 함께 저장하기\ndef step3(text):\n openApiURL = \"http://aiopen.etri.re.kr:8000/WiseNLU\"\n\n accessKey = \"*****\"\n analysisCode = \"ner\"\n\n requestJson = {\n \"access_key\": accessKey,\n \"argument\": {\n \"text\": text,\n \"analysis_code\": analysisCode\n }\n }\n\n http = urllib3.PoolManager()\n response = http.request(\n \"POST\",\n openApiURL,\n headers={\"Content-Type\": \"application/json; charset=UTF-8\"},\n body=json.dumps(requestJson)\n )\n\n api_doc = str(response.data, \"utf-8\")\n print(api_doc)\n json_data = json.loads(api_doc)\n print(json_data)\n\n api_results = []\n# return_object > sentence(배열) 반복문을 돌면서 0(딕셔너리) key 'NE'(배열) 내 요소는 딕서녀리 'type' : PS_NAME 딕셔너리의 text key의 밸\n for sentence in json_data['return_object']['sentence']:\n for a in sentence['NE']:\n if a['type'] == 'PS_NAME':\n api_results.append(a['text'])\n\n return api_results\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)\n" } ]
2
stats94/championship-bot
https://github.com/stats94/championship-bot
b369a6ca3439a5ffcc86abedf203718ebc0aefbf
3cf1bd91c63fd5d8e19e9764f3608eca5b8ce2eb
c4d2683882b666f2701faa194fa9874d8195ace6
refs/heads/master
2020-09-02T05:55:38.017064
2019-11-02T12:14:56
2019-11-02T12:14:56
219,148,801
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6404494643211365, "alphanum_fraction": 0.6853932738304138, "avg_line_length": 28.66666603088379, "blob_id": "4c9328936c8e061b7f1a14dfcd025537189c7628", "content_id": "8cd704d538b042435022b0ee65b0059e342086d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/config.py", "repo_name": "stats94/championship-bot", "src_encoding": "UTF-8", "text": "api_key = **APIKEY**\nendpoint = 'https://api-football-v1.p.rapidapi.com'\nleague_id = 565\n" }, { "alpha_fraction": 0.5471124649047852, "alphanum_fraction": 0.5501520037651062, "avg_line_length": 25.31999969482422, "blob_id": "ab3971f875c3fdc023e86940d5e134195c9415ff", "content_id": "eb49ac6fc477f9cfe2271b8841e2cc35587a744e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/api_service.py", "repo_name": "stats94/championship-bot", "src_encoding": "UTF-8", "text": "import requests;\nimport config;\n\nclass api_service:\n endpoint = config.endpoint\n api_key = config.api_key\n\n def get(self, url):\n response = requests.get(url, headers={'X-RapidAPI-Key': self.api_key})\n \n '''\n api element is just a wrapper.\n \n api: {\n results: 0 -> Number of results\n fixtures/standing etc: [] -> array with data\n }\n '''\n json = response.json()\n return json['api']\n\n def get_table(self, league_id):\n url = '{}/v2/leagueTable/{}'.format(self.endpoint, league_id)\n response = self.get(url)\n return response['standings']\n" }, { "alpha_fraction": 0.5595390796661377, "alphanum_fraction": 0.5608194470405579, "avg_line_length": 44.94117736816406, "blob_id": "75630dfe668b76bd6ee8da7a1c576dce5e7fca44", "content_id": "48b826a17412fa413783e447151405f29023ce8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 261, "num_lines": 17, "path": "/bot.py", "repo_name": "stats94/championship-bot", "src_encoding": "UTF-8", "text": "from api_service import api_service\nimport config\n\nclass bot:\n api_service = api_service()\n league_id = config.league_id\n\n def build_table(self):\n # The standings array is wrapped in another array\n table_data = self.api_service.get_table(self.league_id)[0]\n\n headers = '|Pos|Team|Pl|W|D|L|Form|GD|Pts|\\n:-:|:--|:-:|:-:|:-:|:-:|:--|:-:|:-:'\n\n # Position | Team Name | Played | Won | Drawn | Lost | Form | GD | Points |\n teams = list(map(lambda team: '{}|{}|{}|{}|{}|{}|{}|{}|{}'.format(team['rank'], team['teamName'], team['all']['matchsPlayed'], team['all']['win'], team['all']['draw'], team['all']['lose'], team['forme'], team['goalsDiff'], team['points']), table_data))\n\n return '{}\\n{}'.format(headers, '\\n'.join(teams))\n" } ]
3
fikirmulu/ITC172_python2
https://github.com/fikirmulu/ITC172_python2
3302f253c2e6ae1a7731392fcc01f547931f4779
ee3509f9f86d60e9792670274890eb080210f4a7
7ae8d203e4716047405b79dd7c58710867fecc6a
refs/heads/master
2020-04-18T04:40:07.125399
2019-03-04T20:06:06
2019-03-04T20:06:06
167,247,752
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7405660152435303, "alphanum_fraction": 0.7405660152435303, "avg_line_length": 39.33333206176758, "blob_id": "214b212c96f268ea7402d5ddcb680c2a7a187211", "content_id": "9dc3c86d19169e8b8d670d455041adc0dc8aa4f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/club/views.py", "repo_name": "fikirmulu/ITC172_python2", "src_encoding": "UTF-8", "text": "\nfrom django.shortcuts import render\nfrom.models import Meeting, MeetingMinutes, Resource, Event\n# Create your views here.\ndef index (request):\n return render(request, 'club/index.html')\n# importing all theobjects under the productstype\ndef clubmeetings(request):\n meetings_list=Meeting.objects.all()\n return render (request,'club/meetings.html', {'meetings_list': meetings_list})\n\ndef clubminutes(request):\n minutes_list=MeetingMinutes.objects.all()\n return render (request,'club/minutes.html', {'minutes_list': minutes_list}) \n\ndef clubresources (request):\n resources_list=Resource.objects.all()\n return render (request,'club/resources.html', {'resources_list': resources_list}) \n\ndef clubevents (request):\n events_list=Resource.objects.all()\n return render (request,'club/events.html', {'events_list': events_list}) " }, { "alpha_fraction": 0.7098321318626404, "alphanum_fraction": 0.7134292721748352, "avg_line_length": 33.020408630371094, "blob_id": "e94b2764914e06956012c77055bbb1e11d64ab8e", "content_id": "51d206225a346e50be74e624f1d83c108a168a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1668, "license_type": "no_license", "max_line_length": 84, "num_lines": 49, "path": "/pythonclubproject/club/views.py", "repo_name": "fikirmulu/ITC172_python2", "src_encoding": "UTF-8", "text": "\nfrom django.shortcuts import render, get_object_or_404\nfrom.models import Meeting, MeetingMinutes, Resource, Event \nfrom.forms import ResourceForm\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef index (request):\n return render(request, 'club/index.html')\n# importing all theobjects under the productstype\ndef clubresources(request):\n resources_list=Resource.objects.all()\n return render(request,'club/resources.html', {'resources_list': resources_list})\n\ndef getmeeting(request):\n meeting_list=Meeting.objects.all()\n return render (request,'club/meetings.html', {'meeting_list': meeting_list}) \n\ndef meetingdetail(request, id):\n detail=get_object_or_404(Meeting, pk=id)\n context = {'detail': detail}\n return render (request, 'club/details.html', context=context)\n\ndef clubminutes (request):\n minutes_list=MeetingMinutes.objects.all()\n return render (request,'club/minutes.html', {'minutes_list': minutes_list}) \n\ndef clubevents (request):\n events_list=Resource.objects.all()\n return render (request,'club/events.html', {'events_list': events_list}) \n#form view\n@login_required\ndef newResource(request):\n form=ResourceForm\n if request.method=='POST':\n form=ResourceForm(request.POST)\n if form.is_valid():\n post=form.save(commit=True)\n post.save()\n form=ResourceForm()\n else:\n form=ResourceForm()\n return render(request, 'club/newresource.html', {'form':form})\n\ndef loginmessage(request):\n return render(request, 'club/loginmessage.html')\n\ndef logoutmessage(request):\n return render(request, 'club/logoutmessage.html')\n" }, { "alpha_fraction": 0.5206611752510071, "alphanum_fraction": 0.5730027556419373, "avg_line_length": 19.16666603088379, "blob_id": "18ebe3a85329d715f7bf896225ff9566dcca2205", "content_id": "f3d030628732919795758a0c44c65a6e2353c9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/pythonclubproject/club/migrations/0002_auto_20190207_0913.py", "repo_name": "fikirmulu/ITC172_python2", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.4 on 2019-02-07 09:13\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('club', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='meeting',\n old_name='meetingttitle',\n new_name='meetingtitle',\n ),\n ]\n" }, { "alpha_fraction": 0.7105538249015808, "alphanum_fraction": 0.7105538249015808, "avg_line_length": 30.866666793823242, "blob_id": "a07e8971f1a1203942dd6e4c97f4b14038492995", "content_id": "bdfa98cb0da795a95f3cd4e1471dfd9d8876bcae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 66, "num_lines": 30, "path": "/pythonclubproject/club/tests.py", "repo_name": "fikirmulu/ITC172_python2", "src_encoding": "UTF-8", "text": "\nfrom django.test import TestCase\nfrom .models import Resource, Meeting, Event\nfrom django.urls import reverse\n\n# Create your tests here.\n# model tests\n\nclass ResourceTest(TestCase):\n def test_stringOutput(self):\n resource=Resource(resourcename='computer')\n self.assertEqual(str(resource), resource.resourcename)\n\n def test_tablename(self):\n self.assertEqual(str(Resource._meta.db_table), 'resource')\n\nclass MeetingTest(TestCase):\n def test_stringOutput(self):\n meeting=Meeting(meetingtitle='salary increment')\n self.assertEqual(str(meeting), meeting.meetingtitle)\n\n def test_tablename(self):\n self.assertEqual(str(Meeting._meta.db_table), 'meeting')\n\nclass EnentTest(TestCase):\n def test_stringOutput(self):\n event=Event(eventtitle='New years')\n self.assertEqual(str(event), event.eventtitle)\n\n def test_tablename(self):\n self.assertEqual(str(Event._meta.db_table), 'event')\n" }, { "alpha_fraction": 0.6655948758125305, "alphanum_fraction": 0.6655948758125305, "avg_line_length": 24.91666603088379, "blob_id": "47b39d228f7634c84015c09cbf45608c8fcfd09f", "content_id": "98d5547c3de7dde0758bbafe2a36dece3589eec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 65, "num_lines": 12, "path": "/club/urls.py", "repo_name": "fikirmulu/ITC172_python2", "src_encoding": "UTF-8", "text": "\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('clubmeetings/', views.clubmeetings, name='meetings'),\n path('clubminutes/', views.clubminutes, name='minutes'),\n path('clubresources/', views.clubresources, name='resource'),\n \n \n]" } ]
5
eppoha/BGC_Comparative
https://github.com/eppoha/BGC_Comparative
3a0cfab41f4c3d2790f254a9ab2a80e048c87f8a
2c562a8a2b13e136ebba6a7e453c4e37a85519f7
b428f37e3c93258638d318ddced59fa004acbff9
refs/heads/master
2022-04-06T06:45:36.204120
2019-12-30T10:35:26
2019-12-30T10:35:26
257,168,848
3
0
null
2020-04-20T04:13:36
2019-12-30T10:39:34
2019-12-30T10:39:31
null
[ { "alpha_fraction": 0.5076754093170166, "alphanum_fraction": 0.5244883298873901, "avg_line_length": 41.765625, "blob_id": "cf1e1ab4f50abbbb0aca7267a0cbf7a33acb24bf", "content_id": "33b7db6a0e503ab3276caccc0e17b918c06c7485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2736, "license_type": "no_license", "max_line_length": 85, "num_lines": 64, "path": "/Evaluation.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import torch\n\n\nclass Result_Eval(object):\n def __init__(self, num_classes):\n self.num_classes = num_classes\n self.mask = torch.zeros(1, dtype=torch.long)\n self.y_hat = torch.zeros(1, dtype=torch.long)\n self.y = torch.zeros(1, dtype=torch.long)\n self.class_dict = {1: \"OBJ\", 2: \"SUB\", 3: \"ATTR\", 4: \"SENTI\", 5: \"KW\"}\n self.truth = {\"OBJ\": 0, \"SUB\": 0, \"ATTR\": 0, \"SENTI\": 0, \"KW\": 0}\n self.prediction = {\"OBJ\": 0, \"SUB\": 0, \"ATTR\": 0, \"SENTI\": 0, \"KW\": 0}\n self.correct = {\"OBJ\": 0, \"SUB\": 0, \"ATTR\": 0, \"SENTI\": 0, \"KW\": 0}\n\n def add(self, output, target, attention_mask):\n self.y = torch.cat((self.y, target.to(\"cpu\")))\n self.y_hat = torch.cat((self.y_hat, output.to(\"cpu\")))\n attention_mask = attention_mask.view(-1)\n self.mask = torch.cat((self.mask, attention_mask.to(\"cpu\")))\n\n def get_dict(self, temp_y):\n res_dict, index, n = [{}, {}, {}, {}, {}, {}], 0, temp_y.size(0)\n while index < n:\n if temp_y[index] == 0 or temp_y[index] >= 6:\n index += 1\n continue\n s_index = index\n while index < n and temp_y[index] == temp_y[s_index]:\n index += 1\n res_dict[temp_y[s_index]][s_index] = index - s_index\n return res_dict\n\n def eval_model(self):\n index = (self.mask == 1)\n temp_y, temp_y_hat = self.y[index], self.y_hat[index]\n\n y_dict = self.get_dict(temp_y)\n for c in range(1, self.num_classes):\n self.truth[self.class_dict[c]] = len(y_dict[c])\n\n y_hat_dict = self.get_dict(temp_y_hat)\n for c in range(1, self.num_classes):\n self.prediction[self.class_dict[c]] = len(y_hat_dict[c])\n\n for c in range(1, self.num_classes):\n for key, value in y_hat_dict[c].items():\n if key in y_dict[c] and value == y_dict[c][key]:\n self.correct[self.class_dict[c]] += 1\n F_measure = 0\n for value in self.class_dict.values():\n if self.prediction[value] == 0 or self.truth[value] == 0:\n continue\n\n Precision = self.correct[value] / self.prediction[value]\n Recall = self.correct[value] / self.truth[value]\n if Precision == 0 and Recall == 0:\n continue\n F = 2 * Precision * Recall / (Precision + Recall)\n F_measure += F\n\n print(\"class-type {} precision value is {:.2f}\".format(value, Precision))\n print(\"class-type {} Recall value is: {:.2f}\".format(value, Recall))\n print(\"class-type {} F1_Measure value is: {:.2f}\".format(value, F))\n print(\"Final F_Measure is {:.2f}\".format(F_measure))" }, { "alpha_fraction": 0.5492755770683289, "alphanum_fraction": 0.5654614567756653, "avg_line_length": 41.79888153076172, "blob_id": "90d39100651412d1747e74e4afc3270fa7693872", "content_id": "2fdc54669bfafd9a66c6d65c9a9de05ae908f9af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7661, "license_type": "no_license", "max_line_length": 129, "num_lines": 179, "path": "/Model.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\nfrom pytorch_pretrained_bert import BertModel, BertTokenizer\n\n\nclass BERT_Cell(nn.Module):\n def __init__(self, model_path):\n super(BERT_Cell, self).__init__()\n self.bert = BertModel.from_pretrained(model_path)\n self.hidden_size = self.bert.config.hidden_size\n self.hidden_dropout_prob = self.bert.config.hidden_dropout_prob\n\n def forward(self, input_ids, token_type_ids, attention_mask):\n self.bert.eval()\n # encoded_layers shape: [layers, batch_size, tokens, hidden_size]\n encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)\n\n # Concatenate the tensors for all layers. We use `stack` here to\n # create a new dimension in the tensor.\n token_embeddings = torch.stack(encoded_layers, dim=0)\n\n # token_embeddings size is [encode_layers, batch_size, seq_len, hidden_size]\n token_embeddings = token_embeddings.permute(1, 2, 0, 3)\n # x_embedding = torch.cat((token_embeddings[:, :, -4, :], token_embeddings[:, :, -3, :],\n # token_embeddings[:, :, -2, :], token_embeddings[:, :, -1, :]), dim=2)\n x_embedding = torch.cat((token_embeddings[:, :, -1, :], token_embeddings[:, :, -2, :]), dim=2)\n # x_embedding = token_embeddings[:, :, -1, :]\n return x_embedding\n\n\nclass BiGRU_Cell(nn.Module):\n def __init__(self, config):\n super(BiGRU_Cell, self).__init__()\n # define hyper-parameters\n self.input_size = config.input_size\n self.hidden_size = config.hidden_size\n self.num_layer = config.num_layers\n self.batch_size = config.batch_size\n self.device = config.device\n\n self.gru = nn.GRU(config.input_size, config.hidden_size, config.num_layers,\n batch_first=True, bidirectional=True)\n\n def forward(self, x):\n h0 = torch.zeros(self.num_layer * 2, x.size(0), self.hidden_size).to(self.device)\n\n # x shape is [batch_size, token_size, bert_embedding_size]\n output, _ = self.gru(x, h0)\n return output\n\n\nclass CRF_Cell(nn.Module):\n def __init__(self, config):\n super(CRF_Cell, self).__init__()\n # tag_size = num_classes + start tag and stop tag\n self.tag_size = config.num_classes + 2\n self.device = config.device\n self.f = nn.Sigmoid()\n self.target_to_index = {\"OTHERS\": 0, \"OBJ\": 1, \"SUB\": 2, \"ATTR\": 3,\n \"SENTI\": 4, \"KW\": 5, \"START\": 6, \"STOP\": 7}\n self.transitions = nn.Parameter(torch.ones(self.tag_size, self.tag_size), requires_grad=True)\n\n # change the type\n def log_sum_exp(self, vec):\n max_score = vec[0, torch.argmax(vec, dim=1)]\n n, m = vec.size(0), vec.size(1)\n max_score_broadcast = max_score.view(-1, 1).expand(n, m)\n # print(max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast))))\n return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n def viterbi_decode(self, word_feature, input_ids):\n index, decode_ids, each_seq_ids = 0, [], []\n forward_var = torch.full((1, self.tag_size), -1000000).to(self.device)\n\n # '[CLS]' id is 101 and '[SEP]' id is 102\n for feat in word_feature:\n step_best_ids = [] # store current step max value's last tag\n step_best_value = [] # store current step each tag max value\n\n # if feat is '[CLS]' init forward_var\n if input_ids[index] == 101:\n forward_var = torch.full((1, self.tag_size), -100000).to(self.device)\n forward_var[0][self.target_to_index['START']] = 0\n index += 1\n continue\n\n # if feature is \"[SEP]\" need get sequence tags\n if input_ids[index] == 102:\n seq_stop_var = forward_var + self.transitions[self.target_to_index['STOP']]\n current_id = torch.argmax(seq_stop_var).item()\n\n # find the best tag path\n path_ids = [current_id]\n for i in range(len(each_seq_ids) - 1, 0, -1):\n step_ids = each_seq_ids[i]\n path_ids.insert(0, step_ids[path_ids[0]])\n\n # 6, 7 denote start and stop\n path_ids.insert(0, 6)\n path_ids.append(7)\n\n # add each sequence tags to all tags\n decode_ids.extend(path_ids)\n each_seq_ids = []\n index += 1\n continue\n\n # else using viterbi algorithm\n for next_tag in range(self.tag_size):\n current_var = forward_var + self.transitions[next_tag]\n current_best_id = torch.argmax(current_var)\n\n step_best_ids.append(current_best_id)\n step_best_value.append(current_var[0][current_best_id].view(1))\n\n forward_var = (feat + torch.cat(step_best_value)).view(1, -1)\n each_seq_ids.append(step_best_ids)\n index += 1\n return torch.tensor(decode_ids, dtype=torch.long).to(self.device)\n\n def truth_path_loss(self, word_feature, input_ids, tag_col):\n # score should be loss back\n batch_score, seq_score = torch.zeros(1).to(self.device), torch.zeros(1).to(self.device)\n\n # using 6 and 7 instead of -1\n tag_col[input_ids == 101] = 6\n tag_col[input_ids == 102] = 7\n\n for index, feat in enumerate(word_feature):\n if tag_col[index] == 6:\n seq_score = torch.zeros(1).to(self.device)\n continue\n\n # end the sequence tag score\n if tag_col[index] == 7:\n seq_score -= self.transitions[tag_col[index], tag_col[index - 1]]\n batch_score += seq_score\n continue\n\n # seq_score = seq_score + self.f(feat[tag_col[index]]) + self.f(self.transitions[tag_col[index + 1], tag_col[index]])\n seq_score = seq_score + feat[tag_col[index]] + self.transitions[tag_col[index + 1], tag_col[index]]\n\n return batch_score\n\n def all_path_loss(self, word_feature, input_ids):\n forward_var = torch.zeros(self.tag_size, 1).to(self.device)\n all_path_score = torch.zeros(1).to(self.device)\n\n for index, feat in enumerate(word_feature):\n if input_ids[index] == 101:\n # forward_var = feat.view(self.tag_size, 1)\n forward_var = torch.zeros(self.tag_size, 1).to(self.device)\n\n # forward_var = torch.full((1, self.tag_size), -100000).to(self.device)\n # forward_var[0][self.target_to_index['START']] = 0\n continue\n\n if input_ids[index] == 102:\n forward_var = forward_var.view(1, self.tag_size)\n all_path_score += self.log_sum_exp(forward_var)\n continue\n\n # expand to n * n\n forward_var = forward_var.expand(self.tag_size, self.tag_size)\n # feat = self.f(feat.view(1, -1))\n feat = feat.view(1, -1)\n emission = feat.expand(self.tag_size, self.tag_size)\n\n # score = forward_var + emission + self.f(torch.t(self.transitions))\n score = forward_var + emission + torch.t(self.transitions)\n forward_var = self.log_sum_exp(score).view(self.tag_size, 1)\n\n return all_path_score\n\n def loss(self, word_feature, input_ids, target_col):\n truth_path_score = self.truth_path_loss(word_feature, input_ids, target_col)\n all_path_score = self.all_path_loss(word_feature, input_ids)\n return all_path_score - truth_path_score\n" }, { "alpha_fraction": 0.5853132009506226, "alphanum_fraction": 0.593952476978302, "avg_line_length": 28, "blob_id": "6fa0c87eaaa466d064db2ec5092800b4d373121e", "content_id": "c5c55afd739c6544491023555e9a7cae3f9e5eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/Config.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "class Config(object):\n def __init__(self, args):\n self.input_size = args.input\n self.hidden_size = args.hidden\n\n self.num_layers = args.layer\n self.num_classes = 6\n\n self.epochs = args.epoch\n self.batch_size = args.batch\n self.device = args.device\n\n self.train_data_path = args.train\n self.test_data_path = args.test\n self.bert_model_path = args.bert\n self.hidden_dropout_prob = 0.05" }, { "alpha_fraction": 0.6379280686378479, "alphanum_fraction": 0.6478105187416077, "avg_line_length": 39.75694274902344, "blob_id": "351031850fd803873a736ff8f086417213e10c68", "content_id": "797b9946b663294f689328532b791c5bba17a7cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5869, "license_type": "no_license", "max_line_length": 102, "num_lines": 144, "path": "/Main.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport os\nimport argparse\nimport Model\nimport MyData\nimport DataProcess\n# import PreProcess\nimport Config\nimport Evaluation\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport Bert_GRU_CRF_Model\n\nfrom pytorch_pretrained_bert import BertAdam\n\n\n# padding data and return input_ids, token_type_ids, attention_mask\ndef padding_data(data, target, maxLen):\n input_ids = [line + [0] * (maxLen - len(line)) for line in data]\n token_type_ids = [[0] * maxLen for _ in data]\n attention_mask = [[1] * len(line) + [0] * (maxLen - len(line)) for line in data]\n target_col = [line + [-1] * (maxLen - len(line)) for line in target]\n return input_ids, token_type_ids, attention_mask, target_col\n\n\ndef TrainModel(model, optimizer, train_loader, maxLen, device, epoch):\n epoch_loss, t = 0, 0\n for index, (data, target) in enumerate(train_loader):\n # through data to get input_ids, token_type_ids, attention_mask\n if index >= 10 and index % 10 == 0:\n print(\"index: \", index)\n input_ids, token_type_ids, attention_mask, target_col = padding_data(data, target, maxLen)\n\n input_ids = torch.tensor(input_ids).long().to(device)\n token_type_ids = torch.tensor(token_type_ids).long().to(device)\n attention_mask = torch.tensor(attention_mask).long().to(device)\n target_col = torch.tensor(target_col).to(device)\n\n loss = model(input_ids, token_type_ids, attention_mask, target_col)\n epoch_loss += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n t = index\n\n print(\"Loss: {}\".format(epoch_loss / t))\n\n\ndef TestModel(model, test_loader, maxLen, device, epoch):\n total, correct = 0, 0\n res_eval = Evaluation.Result_Eval(6)\n with torch.no_grad():\n for index, (data, target) in enumerate(test_loader):\n # through data to get input_ids, token_type_ids, attention_mask\n input_ids, token_type_ids, attention_mask, target_col = padding_data(data, target, maxLen)\n\n input_ids = torch.tensor(input_ids).long().to(device)\n token_type_ids = torch.tensor(token_type_ids).long().to(device)\n attention_mask = torch.tensor(attention_mask).long().to(device)\n target_col = torch.tensor(target_col).to(device)\n\n target_col = target_col[attention_mask == 1]\n\n output = model(input_ids, token_type_ids, attention_mask)\n\n total += torch.sum(attention_mask == 1).item()\n correct += torch.sum(target_col == output).item()\n attention_mask = attention_mask[attention_mask == 1]\n\n res_eval.add(output, target_col, attention_mask)\n res_eval.eval_model()\n print(\"ACC: {:.2f}%, correct/total: {}/{}\".format(correct / total * 100, correct, total))\n\n\ndef TerminalParser():\n # define train data path and test data path\n train_data_path = \"./result/char_train.data\"\n test_data_path = \"./result/char_test.data\"\n bert_model_path = \"/home/zhliu/RTX/modeling_bert/bert-base-chinese.tar.gz\"\n # bert_model_path = \"C:\\\\Users\\\\curry\\\\Desktop\\\\modeling_bert\\\\bert-base-chinese.tar.gz\"\n\n # define parse parameters\n parser = argparse.ArgumentParser()\n parser.description = 'choose train data and test data file path'\n parser.add_argument('--train', help='train data file path', default=train_data_path)\n parser.add_argument('--test', help='test data file path', default=test_data_path)\n parser.add_argument('--bert', help='bert model file path', default=bert_model_path)\n parser.add_argument('--batch', help='input data batch size', default=5)\n parser.add_argument('--input', help='input data size', default=768)\n parser.add_argument('--hidden', help='gru hidden size', default=100)\n parser.add_argument('--layer', help='the number of gru layer', default=3)\n parser.add_argument('--epoch', help='the number of run times', default=100)\n parser.add_argument('--device', help='run program in device type', default='cuda')\n # parser.add_argument('--device', help='run program in device type', default='cpu')\n args = parser.parse_args()\n\n config_obj = Config.Config(args)\n return config_obj\n\n\ndef main():\n # get some configure and hyper-parameters\n args = TerminalParser()\n\n # get standard data file\n if not os.path.exists(args.train_data_path) or not os.path.exists(args.test_data_path):\n PreProcess.pre_process_main()\n\n train_path = \"./result/char_train.data\"\n test_path = \"./result/char_test.data\"\n\n # get vocab and max sequence length\n vocab, maxLen = DataProcess.get_vocab(train_path, test_path)\n maxLen += 10\n\n # get train data and test data BERT\n train_data, train_target = DataProcess.get_bert_data(train_path)\n test_data, test_target = DataProcess.get_bert_data(test_path)\n\n # define train data and test data\n train_loader = MyData.get_loader(train_data, train_target, args.batch_size)\n test_loader = MyData.get_loader(test_data, test_target, args.batch_size)\n\n # define model and optimizer\n model = Bert_GRU_CRF_Model.BGCM(args).to(args.device)\n optimizer = optim.Adam([{'params': model.fc2.parameters(), 'lr': 0.001},\n {'params': model.fc1.parameters(), 'lr': 0.001},\n {'params': model.bert.parameters(), 'lr': 2e-5},\n {'params': model.gru.parameters(), 'lr': 0.001},\n {'params': model.crf.parameters(), 'lr': 0.001}], weight_decay=0.01)\n\n # train model and test model\n for epoch in range(args.epochs):\n TrainModel(model, optimizer, train_loader, maxLen, args.device, epoch)\n TestModel(model, test_loader, maxLen, args.device, epoch)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6370157599449158, "alphanum_fraction": 0.6398852467536926, "avg_line_length": 23.821428298950195, "blob_id": "5c175d39c3aaf68daf4edf0f15fc2f4a098342dd", "content_id": "902dba4540e0ba200c0ea42193dfe18c13374c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/MyData.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass MyData(Dataset):\n def __init__(self, data, target):\n self.data = data\n self.target = target\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n data = self.data[index]\n target = self.target[index]\n return [data, target]\n\n\ndef collate_fn(data):\n input, target = list(list(zip(*data))[0]), list(list(zip(*data))[1])\n return input, target\n\n\ndef get_loader(data, target, batch_size):\n dataset = MyData(data, target)\n dataloader = DataLoader(dataset, batch_size, shuffle=False, collate_fn=collate_fn)\n return dataloader\n\n\n" }, { "alpha_fraction": 0.6316171288490295, "alphanum_fraction": 0.6403190493583679, "avg_line_length": 32.65853500366211, "blob_id": "806b4e816fb2533686ab5e082e5fc184ca0dc77c", "content_id": "12ae6f7ca003535e1959b5ad74ae75c95c65d11a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1379, "license_type": "no_license", "max_line_length": 82, "num_lines": 41, "path": "/Bert_GRU_CRF_Model.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport Main\nimport Evaluation\nfrom Model import BERT_Cell, BiGRU_Cell, CRF_Cell\n\n\nclass BGCM(nn.Module):\n def __init__(self, config):\n super(BGCM, self).__init__()\n self.bert = BERT_Cell(config.bert_model_path)\n self.dropout = nn.Dropout(0.1)\n\n self.gru = BiGRU_Cell(config)\n self.fc1 = nn.Linear(config.input_size, 2 * config.hidden_size)\n self.fc2 = nn.Linear(2 * config.hidden_size, config.num_classes + 2)\n self.crf = CRF_Cell(config)\n\n def forward(self, input_ids, token_type_ids, attention_mask, target_col=None):\n x_word_embedding = self.bert(input_ids, token_type_ids, attention_mask)\n x_word_embedding = self.dropout(x_word_embedding)\n\n bert_feature = self.fc1(x_word_embedding)\n word_feature = self.gru(x_word_embedding)\n\n emission_prob = self.fc2(word_feature + bert_feature)\n\n features = emission_prob[attention_mask == 1]\n sequence_ids = input_ids[attention_mask == 1]\n\n if target_col is None:\n output = self.crf.viterbi_decode(features, sequence_ids)\n return output\n\n else:\n sequence_tags = target_col[attention_mask == 1]\n loss = self.crf.loss(features, sequence_ids, sequence_tags)\n return loss" }, { "alpha_fraction": 0.5480672121047974, "alphanum_fraction": 0.5579842329025269, "avg_line_length": 32.619049072265625, "blob_id": "5bcae6497b195d8a90394f76ec7affe058513e76", "content_id": "b294695e03ceedd2b83d25e1cb6cc940a551e728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4945, "license_type": "no_license", "max_line_length": 85, "num_lines": 147, "path": "/PreProcess.py", "repo_name": "eppoha/BGC_Comparative", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport numpy as np\nfrom stanfordcorenlp import StanfordCoreNLP\n\n\ndef process_label(path, type, inverse_class_dict):\n label_col, last_doc = [], \"\"\n\n with open(path, 'r', encoding='gb18030', errors='ignore') as f:\n for line in f.readlines():\n word_list = list(filter(None, line.split()))\n\n # type == 0 denote train label\n if type == 0:\n doc_num = word_list[0]\n tag_col = word_list[1:]\n\n # type == 1 denote test label\n else:\n doc_num = word_list[1]\n tag_col = word_list[-5:]\n\n sentence_tag = {}\n for index, tag_word in enumerate(tag_col):\n temp_list = []\n if tag_word == \"NULL\":\n continue\n\n if tag_word.find(\"|\") != -1:\n temp_list = tag_word.split('|')\n\n elif tag_word.find(\"...\") != -1:\n temp_list = tag_word.split('...')\n\n elif tag_word.find(\"、\") != -1:\n temp_list = tag_word.split('、')\n\n else:\n temp_list = [tag_word]\n\n for token in temp_list:\n sentence_tag[token] = inverse_class_dict[index + 1]\n\n if doc_num == last_doc:\n for key, value in sentence_tag.items():\n if key not in label_col[-1]:\n label_col[-1][key] = value\n else:\n label_col.append(sentence_tag)\n\n last_doc = doc_num\n return label_col\n\n\ndef process_data(path, tag_col, class_dict):\n input_data, target = [], []\n pattern = r'<DOC[0-9]+>\\t([\\s\\S]*?)</DOC[0-9]+>'\n with open(path, \"r\", encoding='gb18030', errors='ignore') as f:\n file_data = f.read()\n data = re.findall(pattern, file_data)\n\n # error label col or file error\n if len(data) != len(tag_col):\n print(\"Please check your label collection !\")\n\n for index, doc in enumerate(data):\n label = np.zeros(len(doc), dtype=int)\n\n for key, value in tag_col[index].items():\n s_index = doc.find(key)\n e_index = s_index + len(key)\n\n label[s_index: e_index] = class_dict[value]\n\n input_data.append(doc)\n target.append(label.tolist())\n\n return input_data, target\n\n\ndef char_feature(input_data, target):\n return input_data, target\n\n\ndef token_feature(input_data, target):\n stanford_nlp = StanfordCoreNLP('C:\\\\stanford-corenlp-full-2018-10-05', lang='zh')\n\n final_data, final_target = [], []\n for i, doc in enumerate(input_data):\n token_list, index = stanford_nlp.word_tokenize(doc), 0\n temp_target = [0] * len(token_list)\n\n for j in range(len(token_list)):\n class_count = np.zeros(6)\n for k in range(len(token_list[j])):\n class_count[target[i][index]] += 1\n index += 1\n temp_target[j] = np.argmax(class_count).item()\n\n final_data.append(token_list)\n final_target.append(temp_target)\n return final_data, final_target\n\n\ndef store_process_data(data, target, path):\n write_str = \"\"\n for i in range(len(data)):\n for j in range(len(data[i])):\n write_str += data[i][j] + ' ' + str(target[i][j]) + '\\n'\n write_str += '\\n'\n with open(path, 'w', encoding='utf8', errors='ignore') as f:\n f.write(write_str)\n\n\ndef pre_process_main():\n train_data_path = \"./data/train_data.txt\"\n train_label_path = \"./data/train_label.txt\"\n test_data_path = \"./data/test_data.txt\"\n test_label_path = \"./data/test_label.txt\"\n\n class_dict = {\"OTHERS\": 0, \"OBJ\": 1, \"SUB\": 2, \"ATTR\": 3, \"SENTI\": 4, \"KW\": 5}\n inverse_class_dict = {v: k for k, v in class_dict.items()}\n print(inverse_class_dict)\n\n # get train data label and test data label\n train_tag = process_label(train_label_path, 0, inverse_class_dict)\n test_tag = process_label(test_label_path, 1, inverse_class_dict)\n\n # get train data and label\n train_data, train_target = process_data(train_data_path, train_tag, class_dict)\n test_data, test_target = process_data(test_data_path, test_tag, class_dict)\n\n # # get token feature\n # train_final_data, train_final_target = token_feature(train_data, train_target)\n # test_final_data, test_final_target = token_feature(test_data, test_target)\n\n # get char feature\n train_final_data, train_final_target = char_feature(train_data, train_target)\n test_final_data, test_final_target = char_feature(test_data, test_target)\n\n # store pre-process data and target\n store_train_path = \"./result/char_train.data\"\n store_test_path = \"./result/char_test.data\"\n\n store_process_data(train_final_data, train_final_target, store_train_path)\n store_process_data(test_final_data, test_final_target, store_test_path)" } ]
7
kennneth1/Pokemon-Data-Analysis
https://github.com/kennneth1/Pokemon-Data-Analysis
22c67fe2c42948dcc9873bd8a36191bb5d65da0a
405267d4a65dc963251e0a5f28a4c04312d8eeb5
aa4f664db8c305a2200d2eb7c42729c5c718e6a3
refs/heads/master
2020-04-03T14:38:19.301424
2018-10-30T05:52:36
2018-10-30T05:52:36
155,329,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6350482106208801, "alphanum_fraction": 0.651125431060791, "avg_line_length": 23.773332595825195, "blob_id": "6ca530a000c761ff7dbd428001aaa98d22ede460", "content_id": "8e2ea65dd820e282a9b4fe47d649a13742c444f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 77, "num_lines": 75, "path": "/Pokemon.py", "repo_name": "kennneth1/Pokemon-Data-Analysis", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.polynomial.polynomial import polyfit\nimport seaborn as sns\n\ndataset = pd.read_csv(os.path.expanduser(\"~/Desktop/Pokemon/Pokemon.csv\"))\ndata = dataset.loc[:, \"Name\":\"Legendary\"]\nattack_by_type = {}\n\n\n#Removes all entries of Pokemon with more an irregular name\ndef data_cleaner(df):\n\n names = df['Name']\n bool_mask = ~(names.str.contains(' '))\n cleaned_data = df[bool_mask]\n return cleaned_data\n\n\ndef attack_plotter():\n\n poke_types = data['Type 1'].unique()\n\n for type in poke_types:\n selected_rows = data[data['Type 1'] == type]\n mean = selected_rows['Attack'].mean()\n attack_by_type[type] = mean\n average_attack = pd.DataFrame().append(attack_by_type, ignore_index=True)\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n\n bar_positions = np.arange(18) + 0.75\n bar_heights = average_attack.iloc[0].values\n ax.bar(bar_positions, bar_heights, 0.5)\n\n tick_positions = range(1,19)\n ax.set_xticks(tick_positions)\n\n ax.set_xticklabels(list(average_attack.columns.values), rotation=90)\n plt.legend(loc=\"upper left\")\n ax.set_title(\"Pokemon Types vs. Attack\")\n plt.show()\n\n\n#Finding relationship between tankiness and damage output\n\ndef hp_vs_attack():\n attack = data['Attack']\n hp = data['HP']\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n\n sns.regplot(attack, hp)\n ax.set_xlabel(\"Attack\")\n ax.set_ylabel(\"Health Points\")\n plt.show()\n\n\n#Explores whether speed makes you stronger\ndef speed_vs_attack():\n\n defense = data['Defense']\n attack = data['Attack']\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n\n ax.scatter(defense, attack)\n ax.set_xlabel(\"Defense\")\n ax.set_ylabel(\"Attack\")\n plt.show()\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.805042028427124, "alphanum_fraction": 0.805042028427124, "avg_line_length": 44.769229888916016, "blob_id": "f48b7039303cb4ad04ae7cb0bc2c506fc77488b2", "content_id": "652b8e33f8a80b51c11cafe9f6d4bafba28266e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 595, "license_type": "no_license", "max_line_length": 114, "num_lines": 13, "path": "/README.md", "repo_name": "kennneth1/Pokemon-Data-Analysis", "src_encoding": "UTF-8", "text": "# Pokemon-Data-Analysis\n\nThis program takes the Pokemon.csv file, cleans the entry names, and explores/visualizes the dataset\n\nattack_plotter() aggregates Pokemon data by \"Type\", calculates the mean of that group, then generates a bar plot.\n\n*(Dragon type pokemon tend to have base attack stats)\n\nhp_vs_attack() and speed_vs_attack() compares important pokemon stats to visualize trends and notable correlations\n\nhp_vs_attack shows weak correlation, whereas speed_vs_attack has a strong upwards correlation\n\nThe distribution curve shows that the attack stat falls within a standard distribution\n" } ]
2
KhaoulaDER/holbertonschool-backend-user-data
https://github.com/KhaoulaDER/holbertonschool-backend-user-data
9b4d10983c2a7eb0e256b61ff0c48d3323cfecf5
ffd54178c296811573b93f194d2d9232d70b3409
3d0f70909a2aa11f6df9d3e71e7dd94176646bfc
refs/heads/main
2023-07-23T13:43:56.221636
2021-09-03T16:17:00
2021-09-03T16:17:00
402,711,137
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6667270660400391, "alphanum_fraction": 0.7013505101203918, "avg_line_length": 26.824716567993164, "blob_id": "97a39f794ee5abda6f4486227ecc75fda64cda3f", "content_id": "8d50e6f37febf9b96b19a8e9ff2aee6cc3b88a00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22084, "license_type": "no_license", "max_line_length": 363, "num_lines": 793, "path": "/0x03-user_authentication_service/README.md", "repo_name": "KhaoulaDER/holbertonschool-backend-user-data", "src_encoding": "UTF-8", "text": "# 0x03. User authentication service\n\n![](https://holbertonintranet.s3.amazonaws.com/uploads/medias/2019/12/4cb3c8c607afc1d1582d.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIARDDGGGOUWMNL5ANN%2F20210830%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210830T083405Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=360fb042d05695164da35b0fb06bea1b053f21e8b17c9b328be1687688116aa6)\n\nIn the industry, you should **not** implement your own authentication system and use a module or framework that doing it for you (like in Python-Flask: [Flask-User](https://intranet.hbtn.io/rltoken/r1XmxzZ-clc7laax6Tm7WQ \"Flask-User\")). Here, for the learning purpose, we will walk through each step of this mechanism to understand it by doing.\n\n## Resources\n\n**Read or watch:**\n\n- [Flask documentation](https://intranet.hbtn.io/rltoken/LSFQS3aarknJpMZVr9Je1Q \"Flask documentation\")\n- [Requests module](https://intranet.hbtn.io/rltoken/PqxwI-hQOe4b4Hhah4fbVg \"Requests module\")\n- [HTTP status codes](https://intranet.hbtn.io/rltoken/QoqUvOM9taMBOVaeB_g06g \"HTTP status codes\")\n\n## Learning Objectives\n\nAt the end of this project, you are expected to be able to [explain to anyone](https://intranet.hbtn.io/rltoken/IAeORv21eo25XYJByjx_bg \"explain to anyone\"), **without the help of Google**:\n\n- How to declare API routes in a Flask app\n- How to get and set cookies\n- How to retrieve request form data\n- How to return various HTTP status codes\n\n\n## Setup\n\nYou will need to install `bcrypt`\n\n```\npip3 install bcrypt\n\n```\n\n## Tasks\n\n### 0. User model\n\n\nIn this task you will create a SQLAlchemy model named `User` for a database table named `users` (by using the [mapping declaration](https://intranet.hbtn.io/rltoken/IF5xw2va364LrJEntCXLlg \"mapping declaration\") of SQLAlchemy).\n\nThe model will have the following attributes:\n\n- `id`, the integer primary key\n- `email`, a non-nullable string\n- `hashed_password`, a non-nullable string\n- `session_id`, a nullable string\n- `reset_token`, a nullable string\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom user import User\n\nprint(User.__tablename__)\n\nfor column in User.__table__.columns:\n print(\"{}: {}\".format(column, column.type))\n\nbob@dylan:~$ python3 main.py\nusers\nusers.id: INTEGER\nusers.email: VARCHAR(250)\nusers.hashed_password: VARCHAR(250)\nusers.session_id: VARCHAR(250)\nusers.reset_token: VARCHAR(250)\nbob@dylan:~$ \n\n```\n\n\n### 1. create user\n\n\nIn this task, you will complete the `DB` class provided below to implement the `add_user` method.\n\n```\n\"\"\"DB module\n\"\"\"\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.session import Session\n\nfrom user import Base\n\n\nclass DB:\n \"\"\"DB class\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\n \"\"\"\n self._engine = create_engine(\"sqlite:///a.db\", echo=True)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n\n @property\n def _session(self) -> Session:\n \"\"\"Memoized session object\n \"\"\"\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n\n```\n\nNote that `DB._session` is a private property and hence should NEVER be used from outside the `DB` class.\n\nImplement the `add_user` method, which has two required string arguments: `email` and `hashed_password`, and returns a `User` object. The method should save the user to the database. No validations are required at this stage.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\n\nfrom db import DB\nfrom user import User\n\nmy_db = DB()\n\nuser_1 = my_db.add_user(\"[email protected]\", \"SuperHashedPwd\")\nprint(user_1.id)\n\nuser_2 = my_db.add_user(\"[email protected]\", \"SuperHashedPwd1\")\nprint(user_2.id)\n\nbob@dylan:~$ python3 main.py\n1\n2\nbob@dylan:~$\n\n```\n\n\n### 2. Find user\n\n\nIn this task you will implement the `DB.find_user_by` method. This method takes in arbitrary keyword arguments and returns the first row found in the `users` table as filtered by the method’s input arguments. No validation of input arguments required at this point.\n\nMake sure that SQLAlchemy’s `NoResultFound` and `InvalidRequestError` are raised when no results are found, or when wrong query arguments are passed, respectively.\n\n**Warning:**\n\n- `NoResultFound` has been moved from `sqlalchemy.orm.exc` to `sqlalchemy.exc` between the version 1.3.x and 1.4.x of SQLAchemy - please make sure you are importing it from `sqlalchemy.orm.exc`\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom db import DB\nfrom user import User\n\nfrom sqlalchemy.exc import InvalidRequestError\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\nmy_db = DB()\n\nuser = my_db.add_user(\"[email protected]\", \"PwdHashed\")\nprint(user.id)\n\nfind_user = my_db.find_user_by(email=\"[email protected]\")\nprint(find_user.id)\n\ntry:\n find_user = my_db.find_user_by(email=\"[email protected]\")\n print(find_user.id)\nexcept NoResultFound:\n print(\"Not found\")\n\ntry:\n find_user = my_db.find_user_by(no_email=\"[email protected]\")\n print(find_user.id)\nexcept InvalidRequestError:\n print(\"Invalid\") \n\nbob@dylan:~$ python3 main.py\n1\n1\nNot found\nInvalid\nbob@dylan:~$ \n\n```\n\n\n### 3. update user\n\n\nIn this task, you will implement the `DB.update_user` method that takes as argument a required `user_id` integer and arbitrary keyword arguments, and returns `None`.\n\nThe method will use `find_user_by` to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\n\nIf an argument that does not correspond to a user attribute is passed, raise a `ValueError`.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom db import DB\nfrom user import User\n\nfrom sqlalchemy.exc import InvalidRequestError\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\nmy_db = DB()\n\nemail = '[email protected]'\nhashed_password = \"hashedPwd\"\n\nuser = my_db.add_user(email, hashed_password)\nprint(user.id)\n\ntry:\n my_db.update_user(user.id, hashed_password='NewPwd')\n print(\"Password updated\")\nexcept ValueError:\n print(\"Error\")\n\nbob@dylan:~$ python3 main.py\n1\nPassword updated\nbob@dylan:~$ \n\n```\n\n\n### 4. Hash password\n\n\nIn this task you will define a `_hash_password` method that takes in a `password` string arguments and returns bytes.\n\nThe returned bytes is a salted hash of the input password, hashed with `bcrypt.hashpw`.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom auth import _hash_password\n\nprint(_hash_password(\"Hello Holberton\"))\n\nbob@dylan:~$ python3 main.py\nb'$2b$12$eUDdeuBtrD41c8dXvzh95ehsWYCCAi4VH1JbESzgbgZT.eMMzi.G2'\nbob@dylan:~$\n\n```\n\n\n### 5. Register user\n\n\nIn this task, you will implement the `Auth.register_user` in the `Auth` class provided below:\n\n```\nfrom db import DB\n\n\nclass Auth:\n \"\"\"Auth class to interact with the authentication database.\n \"\"\"\n\n def __init__(self):\n self._db = DB()\n\n```\n\nNote that `Auth._db` is a private property and should NEVER be used from outside the class.\n\n`Auth.register_user` should take mandatory `email` and `password` string arguments and return a `User` object.\n\nIf a user already exist with the passed email, raise a `ValueError` with the message `User <user's email> already exists`.\n\nIf not, hash the password with `_hash_password`, save the user to the database using `self._db` and return the `User` object.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom auth import Auth\n\nemail = '[email protected]'\npassword = 'mySecuredPwd'\n\nauth = Auth()\n\ntry:\n user = auth.register_user(email, password)\n print(\"successfully created a new user!\")\nexcept ValueError as err:\n print(\"could not create a new user: {}\".format(err))\n\ntry:\n user = auth.register_user(email, password)\n print(\"successfully created a new user!\")\nexcept ValueError as err:\n print(\"could not create a new user: {}\".format(err)) \n\nbob@dylan:~$ python3 main.py\nsuccessfully created a new user!\ncould not create a new user: User [email protected] already exists\nbob@dylan:~$\n\n```\n\n\n### 6. Basic Flask app\n\n\nIn this task, you will set up a basic Flask app.\n\nCreate a Flask app that has a single `GET` route (`\"/\"`) and use `flask.jsonify` to return a JSON payload of the form:\n\n```\n{\"message\": \"Bienvenue\"}\n\n```\n\nAdd the following code at the end of the module:\n\n```\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=\"5000\")\n\n```\n\n\n### 7. Register user\n\n\nIn this task, you will implement the end-point to register a user. Define a `users` function that implements the `POST /users` route.\n\nImport the `Auth` object and instantiate it at the root of the module as such:\n\n```\nfrom auth import Auth\n\n\nAUTH = Auth()\n\n```\n\nThe end-point should expect two form data fields: `\"email\"` and `\"password\"`. If the user does not exist, the end-point should register it and respond with the following JSON payload:\n\n```\n{\"email\": \"<registered email>\", \"message\": \"user created\"}\n\n```\n\nIf the user is already registered, catch the exception and return a JSON payload of the form\n\n```\n{\"message\": \"email already registered\"}\n\n```\n\nand return a 400 status code\n\nRemember that you should only use `AUTH` in this app. `DB` is a lower abstraction that is proxied by `Auth`.\n\n_Terminal 1:_\n\n```\nbob@dylan:~$ python3 app.py \n* Serving Flask app \"app\" (lazy loading)\n * Environment: production\n WARNING: This is a development server. Do not use it in a production deployment.\n Use a production WSGI server instead.\n * Debug mode: off\n * Running on http://0.0.0.0:5000/ (Press CTRL+C to quit)\n\n\n```\n\nTerminal 2:\n\n```\nbob@dylan:~$ curl -XPOST localhost:5000/users -d '[email protected]' -d 'password=mySuperPwd' -v\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> POST /users HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Content-Length: 40\n> Content-Type: application/x-www-form-urlencoded\n> \n* upload completely sent off: 40 out of 40 bytes\n* HTTP 1.0, assume close after body\n< HTTP/1.0 200 OK\n< Content-Type: application/json\n< Content-Length: 52\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:03:18 GMT\n< \n{\"email\":\"[email protected]\",\"message\":\"user created\"}\n\nbob@dylan:~$\nbob@dylan:~$ curl -XPOST localhost:5000/users -d '[email protected]' -d 'password=mySuperPwd' -v\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> POST /users HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Content-Length: 40\n> Content-Type: application/x-www-form-urlencoded\n> \n* upload completely sent off: 40 out of 40 bytes\n* HTTP 1.0, assume close after body\n< HTTP/1.0 400 BAD REQUEST\n< Content-Type: application/json\n< Content-Length: 39\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:03:33 GMT\n< \n{\"message\":\"email already registered\"}\nbob@dylan:~$\n\n```\n\n\n### 8. Credentials validation\n\n\nIn this task, you will implement the `Auth.valid_login` method. It should expect `email` and `password` required arguments and return a boolean.\n\nTry locating the user by email. If it exists, check the password with `bcrypt.checkpw`. If it matches return `True`. In any other case, return `False`.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom auth import Auth\n\nemail = '[email protected]'\npassword = 'MyPwdOfBob'\nauth = Auth()\n\nauth.register_user(email, password)\n\nprint(auth.valid_login(email, password))\n\nprint(auth.valid_login(email, \"WrongPwd\"))\n\nprint(auth.valid_login(\"unknown@email\", password))\n\nbob@dylan:~$ python3 main.py\nTrue\nFalse\nFalse\nbob@dylan:~$ \n\n```\n\n\n### 9. Generate UUIDs\n\n\nIn this task you will implement a `_generate_uuid` function in the `auth` module. The function should return a string representation of a new UUID. Use the `uuid` module.\n\nNote that the method is private to the `auth` module and should **NOT** be used outside of it.\n\n\n### 10. Get session ID\n\n\nIn this task, you will implement the `Auth.create_session` method. It takes an `email` string argument and returns the session ID as a string.\n\nThe method should find the user corresponding to the email, generate a new UUID and store it in the database as the user’s `session_id`, then return the session ID.\n\nRemember that only public methods of `self._db` can be used.\n\n```\nbob@dylan:~$ cat main.py\n#!/usr/bin/env python3\n\"\"\"\nMain file\n\"\"\"\nfrom auth import Auth\n\nemail = '[email protected]'\npassword = 'MyPwdOfBob'\nauth = Auth()\n\nauth.register_user(email, password)\n\nprint(auth.create_session(email))\nprint(auth.create_session(\"[email protected]\"))\n\nbob@dylan:~$ python3 main.py\n5a006849-343e-4a48-ba4e-bbd523fcca58\nNone\nbob@dylan:~$ \n\n```\n\n\n### 11. Log in\n\n\nIn this task, you will implement a `login` function to respond to the `POST /sessions` route.\n\nThe request is expected to contain form data with `\"email\"` and a `\"password\"` fields.\n\nIf the login information is incorrect, use `flask.abort` to respond with a 401 HTTP status.\n\nOtherwise, create a new session for the user, store it the session ID as a cookie with key `\"session_id\"` on the response and return a JSON payload of the form\n\n```\n{\"email\": \"<user email>\", \"message\": \"logged in\"}\n\n```\n\n```\nbob@dylan:~$ curl -XPOST localhost:5000/users -d '[email protected]' -d 'password=mySuperPwd'\n{\"email\":\"[email protected]\",\"message\":\"user created\"}\nbob@dylan:~$ \nbob@dylan:~$ curl -XPOST localhost:5000/sessions -d '[email protected]' -d 'password=mySuperPwd' -v\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> POST /sessions HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Content-Length: 37\n> Content-Type: application/x-www-form-urlencoded\n> \n* upload completely sent off: 37 out of 37 bytes\n* HTTP 1.0, assume close after body\n< HTTP/1.0 200 OK\n< Content-Type: application/json\n< Content-Length: 46\n< Set-Cookie: session_id=163fe508-19a2-48ed-a7c8-d9c6e56fabd1; Path=/\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:12:34 GMT\n< \n{\"email\":\"[email protected]\",\"message\":\"logged in\"}\n* Closing connection 0\nbob@dylan:~$ \nbob@dylan:~$ curl -XPOST localhost:5000/sessions -d '[email protected]' -d 'password=BlaBla' -v\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> POST /sessions HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Content-Length: 34\n> Content-Type: application/x-www-form-urlencoded\n> \n* upload completely sent off: 34 out of 34 bytes\n* HTTP 1.0, assume close after body\n< HTTP/1.0 401 UNAUTHORIZED\n< Content-Type: text/html; charset=utf-8\n< Content-Length: 338\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:12:45 GMT\n< \n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n<title>401 Unauthorized</title>\n<h1>Unauthorized</h1>\n<p>The server could not verify that you are authorized to access the URL requested. You either supplied the wrong credentials (e.g. a bad password), or your browser doesn't understand how to supply the credentials required.</p>\n* Closing connection 0\nbob@dylan:~$ \n\n```\n\n\n\n### 12. Find user by session ID\n\n\nIn this task, you will implement the `Auth.get_user_from_session_id` method. It takes a single `session_id` string argument and returns the corresponding `User` or `None`.\n\nIf the session ID is `None` or no user is found, return `None`. Otherwise return the corresponding user.\n\nRemember to only use public methods of `self._db`.\n\n\n\n### 13. Destroy session\n\n\n\nIn this task, you will implement `Auth.destroy_session`. The method takes a single `user_id` integer argument and returns `None`.\n\nThe method updates the corresponding user’s session ID to `None`.\n\nRemember to only use public methods of `self._db`.\n\n\n\n### 14. Log out\n\n\nIn this task, you will implement a `logout` function to respond to the `DELETE /sessions` route.\n\nThe request is expected to contain the session ID as a cookie with key `\"session_id\"`.\n\nFind the user with the requested session ID. If the user exists destroy the session and redirect the user to `GET /`. If the user does not exist, respond with a 403 HTTP status.\n\n\n\n### 15. User profile\n\n\nIn this task, you will implement a `profile` function to respond to the `GET /profile` route.\n\nThe request is expected to contain a `session_id` cookie. Use it to find the user. If the user exist, respond with a 200 HTTP status and the following JSON payload:\n\n```\n{\"email\": \"<user email>\"}\n\n```\n\nIf the session ID is invalid or the user does not exist, respond with a 403 HTTP status.\n\n```\nbob@dylan:~$ curl -XPOST localhost:5000/sessions -d '[email protected]' -d 'password=mySuperPwd' -v\nNote: Unnecessary use of -X or --request, POST is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> POST /sessions HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Content-Length: 37\n> Content-Type: application/x-www-form-urlencoded\n> \n* upload completely sent off: 37 out of 37 bytes\n* HTTP 1.0, assume close after body\n< HTTP/1.0 200 OK\n< Content-Type: application/json\n< Content-Length: 46\n< Set-Cookie: session_id=75c89af8-1729-44d9-a592-41b5e59de9a1; Path=/\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:15:57 GMT\n< \n{\"email\":\"[email protected]\",\"message\":\"logged in\"}\n* Closing connection 0\nbob@dylan:~$\nbob@dylan:~$ curl -XGET localhost:5000/profile -b \"session_id=75c89af8-1729-44d9-a592-41b5e59de9a1\"\n{\"email\": \"[email protected]\"}\nbob@dylan:~$ \nbob@dylan:~$ curl -XGET localhost:5000/profile -b \"session_id=nope\" -v\nNote: Unnecessary use of -X or --request, GET is already inferred.\n* Trying 127.0.0.1...\n* TCP_NODELAY set\n* Connected to localhost (127.0.0.1) port 5000 (#0)\n> GET /profile HTTP/1.1\n> Host: localhost:5000\n> User-Agent: curl/7.58.0\n> Accept: */*\n> Cookie: session_id=75c89af8-1729-44d9-a592-41b5e59de9a\n> \n* HTTP 1.0, assume close after body\n< HTTP/1.0 403 FORBIDDEN\n< Content-Type: text/html; charset=utf-8\n< Content-Length: 234\n< Server: Werkzeug/1.0.1 Python/3.7.3\n< Date: Wed, 19 Aug 2020 00:16:43 GMT\n< \n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n<title>403 Forbidden</title>\n<h1>Forbidden</h1>\n<p>You don't have the permission to access the requested resource. It is either read-protected or not readable by the server.</p>\n* Closing connection 0\n\nbob@dylan:~$ \n\n```\n\n\n\n### 16. Generate reset password token\n\n\nIn this task, you will implement the `Auth.get_reset_password_token` method. It take an `email` string argument and returns a string.\n\nFind the user corresponding to the email. If the user does not exist, raise a `ValueError` exception. If it exists, generate a UUID and update the user’s `reset_token` database field. Return the token.\n\n\n\n### 17. Get reset password token\n\n\nIn this task, you will implement a `get_reset_password_token` function to respond to the `POST /reset_password` route.\n\nThe request is expected to contain form data with the `\"email\"` field.\n\nIf the email is not registered, respond with a 403 status code. Otherwise, generate a token and respond with a 200 HTTP status and the following JSON payload:\n\n```\n{\"email\": \"<user email>\", \"reset_token\": \"<reset token>\"}\n\n```\n\n\n### 18. Update password\n\n\nIn this task, you will implement the `Auth.update_password` method. It takes `reset_token` string argument and a `password` string argument and returns `None`.\n\nUse the `reset_token` to find the corresponding user. If it does not exist, raise a `ValueError` exception.\n\nOtherwise, hash the password and update the user’s `hashed_password` field with the new hashed password and the `reset_token` field to `None`.\n\n\n\n### 19. Update password end-point\n\n\nIn this task you will implement the `update_password` function in the `app` module to respond to the `PUT /reset_password` route.\n\nThe request is expected to contain form data with fields `\"email\"`, `\"reset_token\"` and `\"new_password\"`.\n\nUpdate the password. If the token is invalid, catch the exception and respond with a 403 HTTP code.\n\nIf the token is valid, respond with a 200 HTTP code and the following JSON payload:\n\n```\n{\"email\": \"<user email>\", \"message\": \"Password updated\"}\n\n```\n\n\n### 20. End-to-end integration test\n\n\nStart your app. Open a new terminal window.\n\nCreate a new module called `main.py`. Create one function for each of the following tasks. Use the `requests` module to query your web server for the corresponding end-point. Use `assert` to validate the response’s expected status code and payload (if any) for each task.\n\n- `register_user(email: str, password: str) -> None`\n- `log_in_wrong_password(email: str, password: str) -> None`\n- `log_in(email: str, password: str) -> str`\n- `profile_unlogged() -> None`\n- `profile_logged(session_id: str) -> None`\n- `log_out(session_id: str) -> None`\n- `reset_password_token(email: str) -> str`\n- `update_password(email: str, reset_token: str, new_password: str) -> None`\n\nThen copy the following code at the end of the `main` module:\n\n```\nEMAIL = \"[email protected]\"\nPASSWD = \"b4l0u\"\nNEW_PASSWD = \"t4rt1fl3tt3\"\n\n\nif __name__ == \"__main__\":\n\n register_user(EMAIL, PASSWD)\n log_in_wrong_password(EMAIL, NEW_PASSWD)\n profile_unlogged()\n session_id = log_in(EMAIL, PASSWD)\n profile_logged(session_id)\n log_out(session_id)\n reset_token = reset_password_token(EMAIL)\n update_password(EMAIL, reset_token, NEW_PASSWD)\n log_in(EMAIL, NEW_PASSWD)\n\n```\n\nRun `python main.py`. If everything is correct, you should see no output.\n\n" }, { "alpha_fraction": 0.600247859954834, "alphanum_fraction": 0.6114002466201782, "avg_line_length": 27.02083396911621, "blob_id": "bf0c5a76f1df5584883b2672ebff364e9ca32a9f", "content_id": "3830d34065b48f8eb54d8e2fca1aa5bbd77c266d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4035, "license_type": "no_license", "max_line_length": 71, "num_lines": 144, "path": "/0x03-user_authentication_service/app.py", "repo_name": "KhaoulaDER/holbertonschool-backend-user-data", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\" Route module for the API \"\"\"\n\nfrom flask import Flask, jsonify, request, abort, redirect, url_for\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom auth import Auth\n\n\napp = Flask(__name__)\nAUTH = Auth()\n\n\[email protected]('/', methods=['GET'], strict_slashes=False)\ndef status() -> str:\n \"\"\" GET /status\n Return:\n - JSON payload\n \"\"\"\n return jsonify({\"message\": \"Bienvenue\"})\n\n\[email protected]('/users', methods=['POST'], strict_slashes=False)\ndef new_user() -> str:\n \"\"\" POST /users\n Registers new user with email and pswd in request form-data,\n or finds if user already registered based on email\n Return:\n - JSON payload\n \"\"\"\n\n # Get data from form request, change to request.get_json() for body\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n try:\n new_user = AUTH.register_user(email, password)\n if new_user is not None:\n return jsonify({\n \"email\": new_user.email,\n \"message\": \"user created\"\n })\n except ValueRrror:\n return jsonify({\n \"message\": \"email already registered\"\n }), 400\n\n\[email protected]('/sessions', methods=['POST'], strict_slashes=False)\ndef login() -> str:\n \"\"\" POST /sessions\n Creates new session for user, stores as cookie\n Email and pswd fields in x-www-form-urlencoded request\n Return:\n - JSON payload\n \"\"\"\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n valid_user = AUTH.valid_login(email, password)\n\n if not valid_user:\n abort(401)\n session_id = AUTH.create_session(email)\n message = {\"email\": email, \"message\": \"logged in\"}\n response = jsonify(message)\n response.set_cookie(\"session_id\", session_id)\n return response\n\n\[email protected]('/sessions', methods=['DELETE'], strict_slashes=False)\ndef logout():\n \"\"\" DELETE /sessions\n Destroys session by finding session_id (key in cookie)\n Return:\n - Redirects user to status route (GET /)\n \"\"\"\n user_cookie = request.cookies.get(\"session_id\", None)\n user = AUTH.get_user_from_session_id(user_cookie)\n if user_cookie is None or user is None:\n abort(403)\n AUTH.destroy_session(user.id)\n return redirect('/')\n\n\[email protected]('/profile', methods=['GET'], strict_slashes=False)\ndef profile() -> str:\n \"\"\" GET /profile\n Return:\n - Use session_id to find the user.\n - 403 if session ID is invalid\n \"\"\"\n user_cookie = request.cookies.get(\"session_id\", None)\n if user_cookie is None:\n abort(403)\n user = AUTH.get_user_from_session_id(user_cookie)\n if user is None:\n abort(403)\n return jsonify({\"email\": user.email}), 200\n\n\[email protected]('/reset_password', methods=['POST'], strict_slashes=False)\ndef get_reset_password_token() -> str:\n \"\"\" POST /reset_password\n - email\n Return:\n - Generate a Token\n - 403 if email not registered\n \"\"\"\n user_request = request.form\n user_email = user_request.get('email')\n is_registered = AUTH.create_session(user_email)\n\n if not is_registered:\n abort(403)\n\n token = AUTH.get_reset_password_token(user_email)\n message = {\"email\": user_email, \"reset_token\": token}\n return jsonify(message)\n\n\[email protected]('/reset_password', methods=['PUT'], strict_slashes=False)\ndef update_password() -> str:\n \"\"\" PUT /reset_password\n - email\n - reset_token\n - new_password\n Return:\n - Update the password\n - 403 if token is invalid\n \"\"\"\n user_email = request.form.get('email')\n reset_token = request.form.get('reset_token')\n new_password = request.form.get('new_password')\n\n try:\n AUTH.update_password(reset_token, new_password)\n except Exception:\n abort(403)\n\n message = {\"email\": user_email, \"message\": \"Password updated\"}\n return jsonify(message), 200\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=\"5000\")\n" }, { "alpha_fraction": 0.587193489074707, "alphanum_fraction": 0.5885558724403381, "avg_line_length": 31.477876663208008, "blob_id": "ebe998f2067c42d8e86196870ff2d2ad3a6b88ef", "content_id": "61e70a8e8c7a5553e423bac8566f5335c208bf76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3670, "license_type": "no_license", "max_line_length": 76, "num_lines": 113, "path": "/0x03-user_authentication_service/auth.py", "repo_name": "KhaoulaDER/holbertonschool-backend-user-data", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\" Authentication\n\"\"\"\nfrom bcrypt import hashpw, gensalt, checkpw\nfrom db import DB\nfrom user import User\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.exc import InvalidRequestError\nfrom uuid import uuid4\nfrom typing import Union\n\n\ndef _hash_password(password: str) -> str:\n \"\"\" Takes in string arg, converts to unicode\n Returns salted, hashed pswd as bytestring\n \"\"\"\n return hashpw(password.encode('utf-8'), gensalt())\n\n\ndef _generate_uuid() -> str:\n \"\"\" Generates UUID\n Returns string representation of new UUID\n \"\"\"\n return str(uuid4())\n\n\nclass Auth:\n \"\"\"Auth class to interact with the authentication database.\n \"\"\"\n\n def __init__(self):\n \"\"\" Instance \"\"\"\n self._db = DB()\n\n def register_user(self, email: str, password: str) -> User:\n \"\"\" Registers and returns a new user if email isn't listed\"\"\"\n try:\n self._db.find_user_by(email=email)\n raise ValueError(f\"User {email} already exists\")\n except NoResultFound:\n hashed_password = _hash_password(password)\n new_user = self._db.add_user(email, hashed_password)\n return new_user\n\n def valid_login(self, email: str, password: str) -> bool:\n \"\"\" Checks if user pswd is valid, locating by email \"\"\"\n try:\n found_user = self._db.find_user_by(email=email)\n return checkpw(\n password.encode('utf-8'),\n found_user.hashed_password\n )\n except NoResultFound:\n return False\n\n def create_session(self, email: str) -> str:\n \"\"\" Creates session ID using UUID, finds user by email \"\"\"\n try:\n found_user = self._db.find_user_by(email=email)\n except NoResultFound:\n return None\n\n session_id = _generate_uuid()\n self._db.update_user(found_user.id, session_id=session_id)\n return session_id\n\n def get_user_from_session_id(self, session_id: str) -> Union[str, None]:\n \"\"\" Finds user by session_id \"\"\"\n if session_id is None:\n return None\n try:\n found_user = self._db.find_user_by(session_id=session_id)\n return found_user\n except NoResultFound:\n return None\n\n def destroy_session(self, user_id: str) -> None:\n \"\"\" Updates user's session_id to None\"\"\"\n if user_id is None:\n return None\n try:\n found_user = self._db.find_user_by(id=user_id)\n self._db.update_user(found_user.id, session_id=None)\n except NoResultFound:\n return None\n\n def get_reset_password_token(self, email: str) -> str:\n \"\"\" Finds user by email, updates user's reset_toke with UUID \"\"\"\n try:\n found_user = self._db.find_user_by(email=email)\n except NoResultFound:\n raise ValueError\n\n reset_token = _generate_uuid()\n self._db.update_user(found_user.id, reset_token=reset_token)\n return reset_token\n\n def update_password(self, reset_token: str, password: str) -> None:\n \"\"\" Use the reset_token to find the corresponding user.\n If it does not exist, raise a ValueError exception.\n \"\"\"\n if reset_token is None or password is None:\n return None\n\n try:\n user = self._db.find_user_by(reset_token=reset_token)\n except NoResultFound:\n raise ValueError\n\n hashed_password = _hash_password(password)\n self._db.update_user(user.id,\n hashed_password=hashed_password,\n reset_token=None)\n" } ]
3
dsande30/COSC483_PA3
https://github.com/dsande30/COSC483_PA3
549b9d1213db5a4bba35a83b67cf032b19d1fda4
515d4d3dc9e1e96286a032b6c55ca9f7e4721afc
0b2030425df45f2b65cc9e6f063a2ecd19de1814
refs/heads/master
2021-08-24T10:02:19.218198
2017-12-09T04:58:23
2017-12-09T04:58:23
113,354,184
0
0
null
2017-12-06T18:43:15
2017-12-06T19:47:18
2017-12-09T04:59:14
Python
[ { "alpha_fraction": 0.6085432171821594, "alphanum_fraction": 0.613006055355072, "avg_line_length": 32.37234115600586, "blob_id": "7c806e1d694f8713bd699e58a8f140a3b9739d73", "content_id": "b50338a8fcd79074a6a7ba5a98065aa1c79f4355", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3137, "license_type": "no_license", "max_line_length": 121, "num_lines": 94, "path": "/unlock.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import argparse\nimport subprocess\nimport os\nimport sys\nfrom Crypto.Random import random\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", dest = 'directory', help=\"Enter directory to lock\", required = True)\n parser.add_argument(\"-p\", dest = 'pubKeyFile', help=\"Enter action public key file\", required = True)\n parser.add_argument(\"-r\", dest = 'privKeyFile', help= \"Enter action private key file\", required=True)\n parser.add_argument(\"-vk\", dest = 'valFile', help= \"Enter validate pubkey file\", required=True)\n args = parser.parse_args()\n return args\n\ndef pubVerify(args):\n dest = args.pubKeyFile + \"-casig\"\n command = \"python2.7 rsa-validate.py -k \" + args.valFile + \" -m \" + args.pubKeyFile + \" -s \" + dest\n returnVal = subprocess.check_output([command], shell=True)\n return returnVal.strip()\n\ndef symVerify(args):\n command = \"python2.7 rsa-validate.py -k \" + args.pubKeyFile + \" -m symManifest -s symManifest-casig\"\n returnVal = subprocess.check_output([command], shell=True)\n return returnVal.strip()\n\ndef encVerify(key, file, currentdir):\n file = file[:-4]\n command = \"python2.7 \" + currentdir + \"/cbcmac-validate-2.py -k \" + str(key) + \" -m \" + file + \" -t \" + file + \"-tag\"\n result = subprocess.check_output([command], shell=True)\n return result.strip()\n\ndef decDir(directory, key):\n currentdir = os.getcwd()\n newlist = []\n for letter in currentdir:\n newlist.append(letter)\n for i in range(0, len(newlist)):\n if newlist[i] == ' ':\n newlist[i] = \"\\ \"\n currentdir = ''.join(newlist)\n\n for root, dirs, files in os.walk(directory):\n os.chdir(directory)\n #tags\n for file in files:\n if file[-4:] == \"-tag\":\n encIntegrity = encVerify(key, file, currentdir)\n if encIntegrity != \"True\":\n sys.exit(\"Bad file detected: %s\" % file)\n os.remove(file)\n\n os.chdir(\"..\")\n for root, dirs, files in os.walk(directory):\n #for files\n os.chdir(directory)\n for file in files:\n decFile(file.strip(), key, currentdir)\n\ndef decFile(file, key, currentdir):\n command = \"python2.7 \" + currentdir + \"/cbc-dec.py -k \" + str(key) + \" -i \" + file + \" -o \" + file\n subprocess.call([command], shell=True)\n\ndef decManifest(args, key):\n command = \"python2.7 rsa-dec.py -k \" + args.privKeyFile + \" -i \" + str(key)\n result = subprocess.check_output([command], shell=True)\n return result.strip()\n\ndef readManifest():\n fd = open(\"symManifest\", \"rb\")\n key = fd.readline()\n fd.close()\n return key.strip()\n\ndef main():\n args = getFlags()\n lockIntegrity = pubVerify(args)\n if lockIntegrity != \"True\":\n print(\"Locking Party's Public Key Was Unverified\")\n exit()\n symIntegrity = symVerify(args)\n if symIntegrity != \"True\":\n print(\"Symmetric Key Manifest Was Unverified\")\n exit()\n\n key = readManifest()\n aesKey = decManifest(args, key)\n decDir(args.directory, aesKey)\n\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5534505844116211, "alphanum_fraction": 0.5659675002098083, "avg_line_length": 23.840335845947266, "blob_id": "e482044ed9b3adfa8433edb7cbdad9a25a91b32c", "content_id": "e4921fdda793b2ea72356c18fc523d7dfe1f7720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2956, "license_type": "no_license", "max_line_length": 91, "num_lines": 119, "path": "/rsa-enc.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "#RSA Encrypt\nimport sys\nimport argparse\nfrom Crypto.Util import number\nfrom Crypto.Random import random\nimport fractions\nimport binascii\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter Key file\", required = True)\n parser.add_argument(\"-i\", dest = 'inputFile', help=\"Enter input file\", required = True)\n args = parser.parse_args()\n return args\n\n#Encrypts the messag eafter padding\ndef Encrypt(m, contents):\n #print \"M: %d\" % m\n #rint \"N: %d\" % contents[1]\n #rint \"e: %d\" % contents[2]\n return pow(m, contents[2], contents[1])\n #return ((m**contents[2]) % contents[1])\n\n#NOTE: This will change with proper message\ndef writeOutput(outputFile, paddedM):\n o = open(outputFile, 'wb')\n o.write(str(paddedM))\n o.close()\n\n#Reads in the key file\ndef readKey(keyFile):\n key = open(keyFile, 'rb')\n numBits = key.readline()\n N = key.readline()\n e = key.readline()\n key.close()\n numBits = numBits.strip()\n N = N.strip()\n e = e.strip()\n return int(numBits), int(N), int(e)\n\n#in this instance we don't need input file\n'''\n#Reads in the input file\ndef readInput(inputFile):\n i = open(inputFile, 'rb')\n m = i.readline()\n i.close()\n m = str(m)\n return m\n'''\n\n#Function pads the message and prepares for encryption\ndef pad(message, r):\n M = \"\"\n M += str(ord(b'\\x00')) + str(ord(b'\\x02'))\n test = 0\n\n\n #Gets r random bits for padding\n while test == 0:\n test = 1\n rand = random.getrandbits(r)\n rand = str(rand)\n length = len(rand)\n randlength = 0\n list = []\n for x in rand:\n list.append(x)\n for i in range(0, len(list)):\n if list[i] == \"0\":\n string = str(random.getrandbits(30))\n if string[0:1] != 0:\n list[i] = string[0:1]\n #print(\"Replacing with %s\" % list[i])\n #randlength += int(x).bit_length()\n rand = int(''.join(list))\n randlength = rand.bit_length()\n if randlength != r:\n test = 0\n\n M += str(rand) + str(ord(b'\\x00'))\n message = message.strip()\n messageLen = 0\n\n M += message\n\n bitLength = 0\n for i in range(0, len(M)):\n bitLength += int(M[i]).bit_length()\n if M[i] == \"0\":\n bitLength += 1\n\n #DEBUGGING\n #print \"Rand: %s\" % rand\n #print randlength\n #print \"r - 24 = %d\" % (r - 24)\n #print \"Message after pad: %s\" % message\n #print \"messageLen: %d\" % messageLen\n #print \"What's M: %s\" % M\n #print \"bitLength: %d\" % bitLength\n\n return int(M)\n\n\n\ndef main():\n args = getFlags()\n contents = readKey(args.keyFile)\n message = args.inputFile\n paddedM = pad(message, int(contents[0]) / 2)\n c = Encrypt(paddedM, contents)\n print(c)\n return\n #writeOutput(args.outputFile, c)\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.5502625703811646, "alphanum_fraction": 0.5622655749320984, "avg_line_length": 24.634614944458008, "blob_id": "f203aeb61df2361da07512e5154db30e1a8cb116", "content_id": "9f9214e71baf418e92edf77c6e3b0fa19887139d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2666, "license_type": "no_license", "max_line_length": 104, "num_lines": 104, "path": "/rsa-keygen.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "#RSA Key Generation\nimport sys\nimport argparse\nfrom Crypto.Util import number\nimport fractions\nimport subprocess\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", dest = 'publicFile', help=\"Enter public key file\", required = True)\n parser.add_argument(\"-s\", dest = 'secretFile', help=\"Enter private key file\", required = True)\n parser.add_argument(\"-n\", dest = 'numBits', type=int, help= \"Enter num of bits file\", required=True)\n parser.add_argument(\"-c\", dest = 'caFile', help=\"Enter CA private key\")\n args = parser.parse_args()\n\n return args\n\ndef variableGenerator(numBits):\n #to make p and q\n p = 0\n q = 0\n l = (numBits / 2)\n while(p ==0 and q == 0):\n p = number.getPrime(l)\n q = number.getPrime(l)\n #confirm lengths are the same, not the values\n if p.bit_length() != q.bit_length() or p == q:\n p = 0\n q = 0\n\n p = int(p)\n q = int(q)\n\n N = p * q\n order = (p-1)*(q-1)\n\n #calculate e: coprime to order\n ePrimes = [3,5,7,17,257, 65537]\n booly = 0\n for x in ePrimes:\n if(fractions.gcd(order, x) == 1):\n booly = 1\n e = x\n break\n if booly == 0:\n sys.exit(\"No coprime\")\n\n #d is inverse of e mod order\n #NOTE: if this is not allowed, let me know. I was unsure.\n d = number.inverse(e, order)\n\n #print \"P: %d\" % p\n #print \"Q: %d\" % q\n #print \"N: %d\" % N\n #print \"Order: %d\" % order\n\n return N, d, e\n\ndef writeFiles(args, keys):\n pub = open(args.publicFile, 'w')\n priv = open(args.secretFile, 'w')\n\n #Write public key\n pub.write(\"%d\\n\" % args.numBits)\n pub.write(\"%d\\n\" % keys[0])\n pub.write(\"%d\\n\" % keys[2])\n\n #Write private key\n priv.write(\"%d\\n\" % args.numBits)\n priv.write(\"%d\\n\" % keys[0])\n priv.write(\"%d\\n\" % keys[1])\n\n #close files\n pub.close()\n priv.close()\n\n #Read CA key\n if args.caFile is not None:\n CA = open(args.caFile, \"r\")\n caNumBits = CA.readline()\n caN = CA.readline()\n caD = CA.readline()\n CA.close()\n else:\n args.caFile = args.secretFile\n priv = open(args.secretFile, \"r\")\n caNumBits = priv.readline()\n caN = priv.readline()\n caD = priv.readline()\n priv.close()\n dest = args.publicFile + \"-casig\"\n\n command = \"python2.7 rsa-sign.py -k \" + args.caFile + \" -m \" + args.publicFile + \" -s \" + dest\n subprocess.call([command], shell=True)\n\n\ndef main():\n args = getFlags()\n keys = variableGenerator(args.numBits)\n writeFiles(args, keys)\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.5432525873184204, "alphanum_fraction": 0.5542099475860596, "avg_line_length": 23.08333396911621, "blob_id": "062247b54cb5baa49d9b69f16e8833d32edb7f62", "content_id": "9be988974c5ac22cd026078f8eed08a029a8e9f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1734, "license_type": "no_license", "max_line_length": 92, "num_lines": 72, "path": "/rsa-validate.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import argparse\nfrom Crypto.Hash import SHA256\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-m\", dest = 'msgFile', help=\"Enter message file\", required = True)\n parser.add_argument(\"-s\", dest = 'sigFile', help= \"Enter signature file\", required=True)\n args = parser.parse_args()\n return args\n\ndef openFiles(args):\n #keyFile\n fd = open(args.keyFile, \"r\")\n numBits = fd.readline()\n numBits = int(numBits.strip())\n N = fd.readline()\n N = int(N.strip())\n pubKey = fd.readline()\n pubKey = int(pubKey.strip())\n keyTuple = (numBits, N, pubKey)\n fd.close()\n\n #msgFile\n msg = \"\"\n fd = open(args.msgFile, \"rb\")\n while True:\n string = fd.readline()\n if string == \"\":\n break\n msg += string\n fd.close()\n\n #sigFile\n fd = open(args.sigFile, \"r\")\n sig = \"\"\n while True:\n string = fd.readline()\n if string == \"\":\n break\n sig += string\n sig = int(sig)\n fd.close()\n\n return (keyTuple, msg, sig)\n\ndef hash(message):\n h = SHA256.new()\n h.update(message)\n return h\n\ndef verify(h, sig, N, e):\n result = pow(sig, e, N)\n #print(\"Validating %s\\n\\n%s\" % (result, (int(h.hexdigest(), 16) % N)))\n if result == (int(h.hexdigest(), 16) % N):\n return 1\n else:\n return 0\n\ndef main():\n args = getFlags()\n results = openFiles(args)\n h = hash(results[1])\n value = verify(h, results[2], results[0][1], results[0][2])\n if value == 1:\n print(\"True\")\n else:\n print(\"False\")\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5396214127540588, "alphanum_fraction": 0.5556625127792358, "avg_line_length": 23.93600082397461, "blob_id": "0d58b9fd3c45c0c8415ee2093e1d7e45c46dbfb7", "content_id": "5b7a7102cf4553c07672a51cc4a03b7fbe85cf7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3117, "license_type": "no_license", "max_line_length": 91, "num_lines": 125, "path": "/cbc-enc.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "#CBC - ENC\n\nimport binascii\nimport getopt\nimport sys\nimport os\nfrom Crypto.Random import random\nfrom Crypto.Cipher import AES\nimport argparse\n\n#Pads the plaintext\ndef pad(message):\n if len(message) > 16:\n if len(message) % 16 != 0:\n message += \"0\" * (16 - (len(message) % 16))\n elif len(message) < 16:\n message += \"0\" * (16 - len(message))\n return message\n\n#XOR function\ndef xor(blocks, IV, key32):\n message = []\n cipher = AES.new(key32, AES.MODE_ECB)\n priorBlock = IV\n for currentBlock in blocks:\n ciphertext = cipher.encrypt(str(priorBlock))\n text = \"\"\n text += \"\".join(chr(ord(a)^ord(b)) for a,b in zip(currentBlock, ciphertext))\n priorBlock = text\n message += text\n return message\n\ndef main(argv):\n keyfile = ''\n inputfile = ''\n outputfile = ''\n ivfile = ''\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyfile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-i\", dest = 'inputfile', help=\"Enter input file\", required = True)\n parser.add_argument(\"-o\", dest = 'outputfile', help= \"Enter ouput file\", required=True)\n args = parser.parse_args()\n\n #Opens Files\n #k = open(args.keyfile, 'rb')\n i = open(args.inputfile, 'rb')\n if ivfile != \"\":\n v = open(ivfile, 'rb')\n\n s = []\n key = \"\"\n plaintext = \"\"\n IV = \"\"\n\n #reads in key\n key = args.keyfile\n\n #read in input\n while True:\n string = i.readline()\n if string == \"\":\n break\n plaintext += string\n\n i.close()\n os.remove(args.inputfile)\n\n #Reads in from the input file, key file, and optional IV file\n try:\n if ivfile != \"\":\n byte3 = v.read(1)\n if ivfile != \"\":\n while byte3 != \"\":\n IV += byte3\n byte3 = v.read(1)\n finally:\n if ivfile != \"\":\n v.close()\n\n #Pads the plaintext if needed\n plaintext = pad(plaintext)\n\n blocks = []\n\n\n #Separates the plaintext into blocks of 16 bytes\n x = 0\n check = plaintext[:]\n while len(check) > 0:\n slicelen = min(len(plaintext), 16)\n blocks.append(check[0:slicelen])\n check = check[slicelen:]\n x += 1\n\n #Generates IV if no file specified\n if ivfile == \"\":\n test = 0\n while test == 0:\n IV = str(random.getrandbits(64))\n if len(IV.encode('utf-8')) == 16:\n test = 1\n\n #Gets correct key and Then XORs text\n #key32 = \"\".join([ ' ' if x >= len(s) else s[x] for x in range(32)])\n key32 = key.strip()\n\n #Gets rid of any newline chars in the IV and unhexlifies it if\n #read in from a file\n IV = IV.rstrip(\"\\r\") #FIXME \\n\n if ivfile != \"\":\n IV = binascii.unhexlify(IV)\n\n #Calls the XOR function for the ciphertext\n ciphertext = xor(blocks, IV, key32)\n\n #Only the ciphertext appears in the output file\n o = open(args.outputfile, 'wb')\n o.write(str(IV))\n o.write(\"\\n\")\n o.write(\"\".join(ciphertext))\n o.close()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.6232368350028992, "alphanum_fraction": 0.6299183368682861, "avg_line_length": 32.25925827026367, "blob_id": "2140d4b5ed0b32db8fc4485f226fab7e58e3d13e", "content_id": "7e557b49882b6ea64b68846eb976106ad436b187", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2694, "license_type": "no_license", "max_line_length": 125, "num_lines": 81, "path": "/lock.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import os\nimport argparse\nfrom Crypto.Random import random\nimport subprocess\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", dest = 'directory', help=\"Enter directory to lock\", required = True)\n parser.add_argument(\"-p\", dest = 'pubKeyFile', help=\"Enter action public key file\", required = True)\n parser.add_argument(\"-r\", dest = 'privKeyFile', help= \"Enter action private key file\", required=True)\n parser.add_argument(\"-vk\", dest = 'valFile', help= \"Enter validate pubkey file\", required=True)\n args = parser.parse_args()\n return args\n\ndef verifyUnlocker(args):\n command = \"python2.7 rsa-validate.py -k \" + args.valFile + \" -m \" + args.pubKeyFile + \" -s \" + args.pubKeyFile + \"-casig\"\n #print(\"Command: %s\" % command)\n result = subprocess.check_output([command], shell=True)\n if(result.strip() == \"True\"):\n #print(\"Verified\")\n return\n else:\n sys.exit(\"Unverified unlocker\")\n\ndef randAESKey():\n val = random.getrandbits(128)\n val = str(val)\n val = val[0:16]\n return int(val)\n\ndef rsaEnc(args, key):\n command = \"python2.7 rsa-enc.py -k \" + args.pubKeyFile + \" -i \" + str(key)\n result = subprocess.check_output([command], shell=True)\n return result.strip()\n\ndef printManifest(encryptedKey):\n fd = open(\"symManifest\", \"w\")\n fd.write(encryptedKey)\n fd.close()\n\ndef signManifest(lock_priv):\n command = \"python2.7 rsa-sign.py -k \" + lock_priv + \" -m symManifest -s symManifest-casig\"\n subprocess.call([command], shell=True)\n\ndef encryptDir(directory, key):\n currentdir = os.getcwd()\n newlist = []\n for letter in currentdir:\n newlist.append(letter)\n for i in range(0, len(newlist)):\n if newlist[i] == ' ':\n newlist[i] = \"\\ \"\n currentdir = ''.join(newlist)\n\n\n for root, dirs, files in os.walk(directory):\n os.chdir(directory)\n for file in files:\n encryptFile(file, key, currentdir)\n tagFile(file, key, currentdir)\n\ndef encryptFile(file, key, currentdir):\n command = \"python2.7 \" + currentdir + \"/cbc-enc.py -k \" + str(key) + \" -i \" + file + \" -o \" + file\n subprocess.call([command], shell=True)\n\ndef tagFile(file, key, currentdir):\n command = \"python2.7 \" + currentdir + \"/cbcmac-tag_2.py -k \" + str(key) + \" -m \" + file + \" -t \" + file + \"-tag\"\n subprocess.call([command], shell=True)\n\ndef main():\n args = getFlags()\n check = verifyUnlocker(args)\n key = randAESKey()\n encryptedKey = rsaEnc(args, key)\n printManifest(encryptedKey)\n signManifest(args.privKeyFile)\n encryptDir(args.directory, key)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5744485259056091, "alphanum_fraction": 0.5900735259056091, "avg_line_length": 25.536584854125977, "blob_id": "6ab30a3f54c3bfed40d428013eedcf2aeb7fc438", "content_id": "74b10b0aa192b904890c9c06f4c419445511e4a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2176, "license_type": "no_license", "max_line_length": 91, "num_lines": 82, "path": "/cbcmac-tag_2.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import argparse\nfrom Crypto.Cipher import AES\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-m\", dest = 'msgFile', help=\"Enter message file\", required = True)\n parser.add_argument(\"-t\", dest = 'tagFile', help= \"Enter tag file\", required=True)\n args = parser.parse_args()\n return args\n\ndef getInfo(args):\n #fd = open(args.keyFile)\n key = args.keyFile\n #fd.close()\n\n fd = open(args.msgFile)\n msg = \"\"\n while True:\n string = fd.readline()\n if string == \"\":\n break\n msg += string\n fd.close()\n msg = msg.strip()\n return (key, msg)\n\n#Pads the plaintext\ndef pad(message):\n if len(message) > 16:\n if len(message) % 16 != 0:\n message += \"0\" * (16 - (len(message) % 16))\n elif len(message) < 16:\n message += \"0\" * (16 - len(message))\n return message\n\n#XOR function\ndef xor(blocks, key32):\n message = []\n cipher = AES.new(key32, AES.MODE_ECB)\n priorBlock = blocks[0]\n message = \"\"\n for currentBlock in blocks[1:]:\n ciphertext = cipher.encrypt(str(priorBlock))\n text = \"\"\n text += \"\".join(chr(ord(a)^ord(b)) for a,b in zip(currentBlock, ciphertext))\n priorBlock = text\n message += text\n return message\n\ndef blockify(plaintext):\n #Separates the plaintext into blocks of 16 bytes\n x = 0\n blocks = []\n length = len(plaintext)\n while(len(str(length)) < 16):\n length = \"0\" + str(length)\n blocks.append(length)\n check = plaintext[:]\n while len(check) > 0:\n slicelen = min(len(plaintext), 16)\n blocks.append(check[0:slicelen])\n check = check[slicelen:]\n x += 1\n return blocks\n\ndef writeFile(oFile, ciphertext):\n fd = open(oFile, \"wb\")\n fd.write(ciphertext)\n fd.close()\n\ndef main():\n args = getFlags()\n results = getInfo(args)\n paddedmsg = pad(results[1])\n blocks = blockify(paddedmsg)\n ciphertext = xor(blocks, results[0])\n writeFile(args.tagFile, ciphertext)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5597867369651794, "alphanum_fraction": 0.5738766193389893, "avg_line_length": 23.542055130004883, "blob_id": "34a43830106b3f2aed823511b322f755d530b1bf", "content_id": "4790755e3b19f69f3f127ba967a9eaeb82d90033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2626, "license_type": "no_license", "max_line_length": 91, "num_lines": 107, "path": "/cbcmac-validate.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import argparse\nfrom Crypto.Cipher import AES\nimport binascii\nimport difflib\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-m\", dest = 'msgFile', help=\"Enter message file\", required = True)\n parser.add_argument(\"-t\", dest = 'tagFile', help= \"Enter tag file\", required=True)\n args = parser.parse_args()\n return args\n\ndef readTag(tagFile):\n t = open(tagFile, 'rb')\n tag = \"\"\n while True:\n string = t.readline()\n if string == \"\":\n break\n tag += string\n t.close()\n return tag\n\ndef readKey(keyFile):\n key = open(keyFile, 'rb')\n validKey = key.readline().strip()\n key.close()\n return validKey\n\ndef readInput(msgFile):\n i = open(msgFile, 'r')\n m = \"\"\n while True:\n string = i.readline()\n if string == \"\":\n break\n m += string\n m = m.strip()\n i.close()\n return m\n\n#Pads the plaintext\ndef pad(message):\n if len(message) > 16:\n if len(message) % 16 != 0:\n message += \"0\" * (16 - (len(message) % 16))\n elif len(message) < 16:\n message += \"0\" * (16 - len(message))\n return message\n\n#XOR function\ndef xor(blocks, key32):\n message = []\n cipher = AES.new(key32, AES.MODE_ECB)\n priorBlock = blocks[0]\n message = \"\"\n for currentBlock in blocks[1:]:\n ciphertext = cipher.encrypt(str(priorBlock))\n text = \"\"\n text += \"\".join(chr(ord(a)^ord(b)) for a,b in zip(currentBlock, ciphertext))\n priorBlock = text\n message += text\n return message\n\ndef blockify(plaintext):\n #Separates the plaintext into blocks of 16 bytes\n x = 0\n blocks = []\n length = len(plaintext)\n while(len(str(length)) < 16):\n length = \"0\" + str(length)\n blocks.append(length)\n check = plaintext[:]\n while len(check) > 0:\n slicelen = min(len(plaintext), 16)\n blocks.append(check[0:slicelen])\n check = check[slicelen:]\n x += 1\n return blocks\n\ndef verify(key, message, tag):\n message = pad(str(message))\n blocks = blockify(message)\n compMsg = xor(blocks, key)\n if tag == compMsg:\n return 1\n else:\n return 0\n\ndef main():\n args = getFlags()\n key = readKey(args.keyFile)\n tag = readTag(args.tagFile)\n message = readInput(args.msgFile)\n validity = verify(key, message, tag)\n if validity == 1:\n print(\"True\")\n return 1\n else:\n print(\"False\")\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5954098105430603, "alphanum_fraction": 0.6019672155380249, "avg_line_length": 24.41666603088379, "blob_id": "12cae38c1253ffab18fa94a8d06081805dd49cc5", "content_id": "a6585b892f684a552c7614a2ac7be8b03924f72f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1525, "license_type": "no_license", "max_line_length": 92, "num_lines": 60, "path": "/rsa-sign.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "import argparse\nfrom Crypto.Hash import SHA256\n\ndef getFlags():\n #parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-m\", dest = 'msgFile', help=\"Enter message file\", required = True)\n parser.add_argument(\"-s\", dest = 'sigFile', help= \"Enter signature file\", required=True)\n args = parser.parse_args()\n return args\n\ndef readKey(keyFile):\n key = open(keyFile, 'rb')\n numBits = key.readline()\n N = key.readline()\n d = key.readline()\n key.close()\n numBits = numBits.strip()\n N = N.strip()\n d = d.strip()\n return int(numBits), int(N), int(d)\n\ndef readInput(inputFile):\n i = open(inputFile, 'r')\n m = \"\"\n while True:\n string = i.readline()\n if string == \"\":\n break\n m += string\n i.close()\n return m\n\ndef writeOutput(sigFile, signature):\n s = open(sigFile, 'wb')\n s.write(str(signature))\n s.close()\n\ndef doHash(message):\n h = SHA256.new()\n h.update(message)\n #print(\"Hash: %s\" % h.hexdigest())\n return h\n\ndef sign(h, N, d):\n signature = pow(int(h.hexdigest(), 16), d, N)\n return signature\n\ndef main():\n args = getFlags()\n contents = readKey(args.keyFile)\n message = readInput(args.msgFile)\n h = doHash(message)\n signature = sign(h, contents[1], contents[2])\n #print(\"Writing: \\n%s\" % signature)\n writeOutput(args.sigFile, signature)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.567480742931366, "alphanum_fraction": 0.5700514316558838, "avg_line_length": 20.550724029541016, "blob_id": "e8f962b22537a12e4e05b6523b3c77732aae39f1", "content_id": "56eb1a01d46169b2af0a9f06fa3e2bf51592686a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1556, "license_type": "no_license", "max_line_length": 91, "num_lines": 69, "path": "/rsa-dec.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "#RSA Decrypt\r\nimport sys\r\nimport argparse\r\nfrom Crypto.Util import number\r\nimport fractions\r\n\r\ndef getFlags():\r\n #parse command line args\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-k\", dest = 'keyFile', help=\"Enter Key file\", required = True)\r\n parser.add_argument(\"-i\", dest = 'inputFile', help=\"Enter input file\", required = True)\r\n args = parser.parse_args()\r\n\r\n return args\r\n\r\n#Reads in key file\r\ndef readKey(keyFile):\r\n key = open(keyFile, 'rb')\r\n numBits = key.readline()\r\n numBits = numBits.strip()\r\n N = key.readline()\r\n N = N.strip()\r\n d = key.readline()\r\n d = d.strip()\r\n key.close()\r\n return numBits, N, d\r\n\r\n#Reads in Input File\r\ndef readInput(inputFile):\r\n inp = open(inputFile, 'rb')\r\n c = inp.readline()\r\n c = c.strip()\r\n return c\r\n\r\n#Decrypts the file\r\ndef Dec(key, c):\r\n c = int(c)\r\n d = int(key[2])\r\n N = int(key[1])\r\n #print \"C: %d\" % c\r\n #print(\"N: %d\" % N)\r\n #print(\"d: %d\" % d)\r\n m = pow(c, d, N)\r\n return m\r\n\r\n#Writes the output to the designated output file\r\ndef writeOutput(outputFile, m):\r\n out = open(outputFile, 'wb')\r\n out.write(\"%d\" %m)\r\n out.close()\r\n\r\n#Removes the padding to reveal the original message\r\ndef unpad(m):\r\n #print m\r\n r, M = m.split(\"0\", 1)\r\n M = M.strip()\r\n return M\r\n\r\ndef main():\r\n args = getFlags()\r\n key = readKey(args.keyFile)\r\n message = args.inputFile\r\n m = Dec(key, message)\r\n m = unpad(str(m))\r\n print(str(m))\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n" }, { "alpha_fraction": 0.5854460000991821, "alphanum_fraction": 0.5962441563606262, "avg_line_length": 23.482759475708008, "blob_id": "e8633f16843656ad8c8d0011817d0854a20e7c09", "content_id": "d817185c57e28bffb6b36dee19e42fc24f3d5b47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2130, "license_type": "no_license", "max_line_length": 91, "num_lines": 87, "path": "/cbc-dec.py", "repo_name": "dsande30/COSC483_PA3", "src_encoding": "UTF-8", "text": "##CBC - DEC\n\nimport binascii\nimport getopt\nimport sys\nimport os\nimport argparse\nfrom Crypto.Random import random\nfrom Crypto.Cipher import AES\n\n#XOR function\ndef xor(blocks, IV, key32):\n message = []\n cipher = AES.new(key32, AES.MODE_ECB)\n priorBlock = IV\n for currentBlock in blocks:\n ciphertext = cipher.encrypt(str(priorBlock))\n text = \"\"\n text += \"\".join(chr(ord(a)^ord(b)) for a,b in zip(currentBlock, ciphertext))\n priorBlock = currentBlock\n message += text\n return message\n\ndef main(argv):\n keyfile = ''\n inputfile = ''\n outputfile = ''\n ivfile = ''\n\n #Reads flags from command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", dest = 'keyfile', help=\"Enter key file\", required = True)\n parser.add_argument(\"-i\", dest = 'inputfile', help=\"Enter input file\", required = True)\n parser.add_argument(\"-o\", dest = 'outputfile', help= \"Enter ouput file\", required=True)\n args = parser.parse_args()\n\n #Opens Files\n i = open(args.inputfile, 'rb')\n\n\n s = []\n key = \"\"\n ciphertext = \"\"\n\n #Takes the IV from the Encryption Part\n key = args.keyfile\n IV = i.readline()\n\n while True:\n string = i.readline()\n if string == \"\":\n break\n ciphertext += str(string)\n\n i.close()\n os.remove(args.inputfile)\n\n\n blocks = []\n\n #Splits up the ciphertext into blocks of 16 bytes\n x = 0\n check = ciphertext[:]\n while len(check) > 0:\n slicelen = min(len(ciphertext), 16)\n blocks.append(check[0:slicelen])\n check = check[slicelen:]\n x += 1\n\n #Gets correct key\n #key32 = \"\".join([ ' ' if x >= len(s) else s[x] for x in range(32)])\n key32 = key\n #print(\"Key %s\" % key32)\n\n #Strips any newline characters from the IV so that it is the right size\n IV = IV.rstrip(\"\\n\\r\")\n #print(\"IV %s\" % IV)\n\n #Calls the XOR function to get the original plaintext\n plaintext = xor(blocks, IV, key32)\n\n o = open(args.outputfile, 'wb')\n o.write(repr(\"\".join(plaintext)))\n o.close()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" } ]
11
YoonKiBum/programmers
https://github.com/YoonKiBum/programmers
edba69b9fa59ef879cfdab4e255dc51f1afddb85
f46e8387180d4ba705278d859b75a67a7978cf9e
3d71e8dcae76711526a4b76940b0a68b3700c0da
refs/heads/master
2023-03-17T15:07:30.284287
2021-02-23T10:44:43
2021-02-23T10:44:43
312,974,339
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4661017060279846, "alphanum_fraction": 0.5885122418403625, "avg_line_length": 33.25806427001953, "blob_id": "389a65eecf0760c996a72a33922d85c63e2666d0", "content_id": "6cf21376f20d0a774fb99ee560530c762543f2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 48, "num_lines": 31, "path": "/README.md", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "# programmers\n<hr>\n\n### 코딩테스트 연습 (Python 소스코드)\n\n### Level 1\n * 크레인 인형뽑기 게임: ([Python 코드](/Level1/64061.py))\n * 두 개 뽑아서 더하기: ([Python 코드](/Level1/68644.py))\n * 완주하지 못한 선수: ([Python 코드](/Level1/42576.py))\n * 모의고사: ([Python 코드](/Level1/42840.py))\n * k번째 수: ([Python 코드](/Level1/42748.py))\n * 체육복: ([Python 코드](/Level1/42862.py))\n * 2016년: ([Python 코드](/Level1/12901.py))\n * 3진법 뒤집기: ([Python 코드](/Level1/68935.py))\n * 가운데 글자 가져오기: ([Python 코드](/Level1/12903.py))\n * 같은 숫자는 싫어: ([Python 코드](/Level1/12906.py))\n \n### Level 2\n * 다리를 지나는 트럭: ([Python 코드](/Level2/42583.py))\n * 주식가격: ([Python 코드](/Level2/42584.py))\n * 스킬트리: ([Python 코드](/Level2/49993.py))\n### 카카오공채\n * 자물쇠와 열쇠: ([Python 코드](/카카오공채/60059.py))\n * 기둥과 보 설치: ([Python 코드](/카카오공채/60061.py))\n * 문자열 압축: ([Python 코드](/카카오공채/60057.py))\n * 괄호 변환: ([Python 코드](/카카오공채/60058.py))\n * 블록 이동하기: ([Python 코드](/카카오공채/60063.py))\n * 실패율: ([Python 코드](/카카오공채/42889.py))\n * 가사 검색: ([Python 코드](/카카오공채/60060.py))\n * 무지의 먹방 라이브: ([Python 코드](/카카오공채/42891.py))\n * 외벽 점검: ([Python 코드](/카카오공채/60062.py))\n" }, { "alpha_fraction": 0.44736841320991516, "alphanum_fraction": 0.4583333432674408, "avg_line_length": 25.823530197143555, "blob_id": "10174d7e3620bd33fcf72f52c1c178c178582adc", "content_id": "500b40da1e65650180a3280566130fcd45ed1572", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 46, "num_lines": 17, "path": "/Level2/49993.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "import copy\ndef solution(skill, skill_trees):\n count = 0\n check = [0] * len(skill)\n for word in skill_trees:\n copyskill = list(copy.deepcopy(skill))\n check = True\n for i in word:\n if i in copyskill:\n res = copyskill[0]\n if res != i:\n check = False\n break\n copyskill.pop(0)\n if check: \n count += 1\n return count\n" }, { "alpha_fraction": 0.6192052960395813, "alphanum_fraction": 0.6291390657424927, "avg_line_length": 24.16666603088379, "blob_id": "ed2837d7ad12ce569757025fbb8e849d5d9914bd", "content_id": "db5bd041bef13d865896451ff73483fc84a3e5a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/Level1/68644.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "from itertools import combinations\n\ndef solution(numbers):\n answer = []\n for combination in combinations(numbers, 2):\n x = int(combination[0])\n y = int(combination[1])\n answer.append(x+y)\n answer = set(answer)\n answer = list(answer)\n answer.sort()\n return answer\n" }, { "alpha_fraction": 0.43499043583869934, "alphanum_fraction": 0.45984703302383423, "avg_line_length": 33.86666488647461, "blob_id": "01ac47fc0ca59d5d93853ebc9ae7624507261619", "content_id": "bef4a9ab3a4b8c108198f2104bda22ecab3706c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 119, "num_lines": 30, "path": "/카카오공채/60061.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "# https://programmers.co.kr/learn/courses/30/lessons/60061\ndef check(answer):\n for i in range(len(answer)):\n x, y, a = answer[i]\n if a == 0: # 기둥 인 경우\n if y == 0 or [x-1, y, 1] in answer or [x, y, 1] in answer or [x, y-1, 0] in answer:\n continue\n else:\n return False\n elif a == 1: # 보 인 경우\n if [x, y-1, 0] in answer or [x+1, y-1, 0] in answer or ([x-1, y, 1] in answer and [x+1, y , 1] in answer): \n continue\n else:\n return False\n return True\n\ndef solution(n, build_frame):\n answer = []\n for i in range(len(build_frame)):\n x, y, a, b = build_frame[i]\n if b == 1: # 설치인 경우\n answer.append([x, y, a])\n if check(answer) == False: # 조건에서 어긋난 경우\n answer.pop()\n elif b == 0: # 삭제인 경우\n answer.remove([x, y, a])\n if check(answer) == False: # 조건에서 어긋난 경우\n answer.append([x, y, a])\n \n return sorted(answer)\n" }, { "alpha_fraction": 0.446153849363327, "alphanum_fraction": 0.4703296720981598, "avg_line_length": 31.5, "blob_id": "a326dbf5a7459cc4a978a862155a4376673344ff", "content_id": "121567e46450f25d6f3092ba7882f867f7c7186c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 79, "num_lines": 14, "path": "/Level1/64061.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(board, moves):\n answer = [-1]\n count = 0\n for i in range(len(moves)):\n for j in range(len(board)):\n if board[j][moves[i]-1] != 0:\n answer.append(board[j][moves[i]-1])\n board[j][moves[i]-1] = 0\n break\n if len(answer) >= 3 and answer[len(answer)-1] == answer[len(answer)-2]:\n answer.pop()\n answer.pop()\n count += 2\n return count\n" }, { "alpha_fraction": 0.4358353614807129, "alphanum_fraction": 0.4479418992996216, "avg_line_length": 23.294116973876953, "blob_id": "a11d325699728c74421ad4ec97f2ad18a42dec95", "content_id": "43d9f5b991d938615cfa8d0193fbdeba5caecb88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/카카오공채/42889.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(N, stages):\n total = len(stages)\n arr = []\n ans = []\n for i in range(1, N + 1):\n temp = stages.count(i)\n if total > 0:\n fail = temp / total\n total -= temp\n arr.append((fail, -(i)))\n else:\n arr.append((0, -(i)))\n arr.sort(reverse = True)\n \n for i in range(len(arr)):\n ans.append(-(arr[i][1]))\n return ans\n" }, { "alpha_fraction": 0.3840177655220032, "alphanum_fraction": 0.4694783687591553, "avg_line_length": 30.034482955932617, "blob_id": "80add779e8e57fbfb7a79531157d111e840f7207", "content_id": "df1539b6db799fc1ec199d8ab3b7ca7c67703984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 901, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/Level1/42840.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "# https://programmers.co.kr/learn/courses/30/lessons/42840\ndef solution(answers):\n answer = []\n people_1 = [1, 2, 3, 4, 5] * 2000\n people_2 = [2, 1, 2, 3, 2, 4, 2, 5] * 2000\n people_3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5] * 1000\n num_of_1 = 0; num_of_2 = 0; num_of_3 = 0\n \n for i in range(len(answers)):\n if answers[i] == people_1[i]:\n num_of_1 += 1\n if answers[i] == people_2[i]:\n num_of_2 += 1\n if answers[i] == people_3[i]:\n num_of_3 += 1\n \n temp = [(num_of_1,1), (num_of_2,2), (num_of_3,3)] \n temp.sort(reverse = True)\n temp.append((-1, -1))\n \n for i in range(3):\n if temp[i][0] != temp[i+1][0]:\n answer.append(temp[i][1])\n answer.sort()\n return answer\n elif temp[i][0] == temp[i+1][0]:\n answer.append(temp[i][1])\n answer.sort()\n return answer\n\n" }, { "alpha_fraction": 0.5360000133514404, "alphanum_fraction": 0.5440000295639038, "avg_line_length": 24, "blob_id": "4d5080e45ec7385d55b86e0dc3f4b357cd04b522", "content_id": "9c485825aebefb31e69640a0bc5c232726ece56e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/Level1/42748.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(array, commands):\n n = len(commands)\n answer = []\n for i in range(n):\n start, end, count = commands[i]\n temp = array[start-1:end]\n temp.sort()\n answer.append(temp[count-1])\n \n return answer\n" }, { "alpha_fraction": 0.35738831758499146, "alphanum_fraction": 0.4536082446575165, "avg_line_length": 23.25, "blob_id": "a9cf860a174d6f44a9e897edaa2526a7c04f7de6", "content_id": "b49850c5d0c5a8efbea09d56c478e137ea6e5542", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 60, "num_lines": 12, "path": "/Level1/12901.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(a, b):\n month = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n ans = ['FRI','SAT','SUN','MON','TUE','WED','THU']\n index = a - 1 \n day = b - 1\n \n sum = 0\n for i in range(index):\n sum += month[i]\n sum += day\n sum = sum % 7\n return ans[sum]\n" }, { "alpha_fraction": 0.43091654777526855, "alphanum_fraction": 0.45690834522247314, "avg_line_length": 31.488889694213867, "blob_id": "847ca7e2654788621c1a2914e563351241d300c3", "content_id": "da68286cc13e68f17555905be5135deb46ce0bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 67, "num_lines": 45, "path": "/카카오공채/60063.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "from collections import deque\n\ndef can_move(pos, graph):\n next_pos = []\n pos = list(pos)\n lx, ly, rx, ry = pos[0][0], pos[0][1], pos[1][0], pos[1][1]\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n for i in range(4):\n nlx, nly, nrx, nry = lx+dx[i], ly+dy[i], rx+dx[i], ry+dy[i]\n if graph[nlx][nly] == 0 and graph[nrx][nry] == 0:\n next_pos.append({(nlx, nly), (nrx, nry)})\n if lx == rx: # 가로인 경우\n for i in [-1, 1]:\n if graph[lx+i][ly] == 0 and graph[rx+i][ry] == 0:\n next_pos.append({(lx,ly), (lx+i, ly)})\n next_pos.append({(rx, ry), (rx+i, ry)})\n elif ly == ry: # 세로인 경우\n for i in [-1, 1]:\n if graph[lx][ly+i] == 0 and graph[rx][ry+i] == 0:\n next_pos.append({(lx, ly), (lx, ly+i)})\n next_pos.append({(rx, ry), (rx, ry+i)})\n return next_pos\n\ndef solution(board):\n n = len(board) + 2\n m = len(board)\n graph = [[1]*n for _ in range(n)]\n for i in range(m):\n for j in range(m):\n graph[i+1][j+1] = board[i][j]\n q = deque()\n visited = []\n pos = {(1, 1), (1, 2)}\n q.append((pos, 0))\n visited.append(pos)\n while q:\n pos, cost = q.popleft()\n if (m, m) in pos:\n return cost\n for next_pos in can_move(pos, graph):\n if next_pos not in visited:\n q.append((next_pos, cost+1))\n visited.append(next_pos)\n return 0\n" }, { "alpha_fraction": 0.49077489972114563, "alphanum_fraction": 0.5129151344299316, "avg_line_length": 26.100000381469727, "blob_id": "8b577448f1d6521cde1cab13d53b910cc7f1f024", "content_id": "57423838cee8e0ab8338166bc1ae53a64992141c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 39, "num_lines": 20, "path": "/Level1/42862.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(n, lost, reserve):\n lost.sort()\n reserve.sort()\n count = 0\n lost = set(lost)\n reserve = set(reserve)\n temp1 = lost - reserve\n temp2 = reserve - lost\n lost = list(temp1)\n reserve = list(temp2)\n for i in range(len(lost)):\n if len(reserve) <= 0:\n break\n if lost[i] - 1 in reserve:\n reserve.remove(lost[i]-1)\n count += 1\n elif lost[i] + 1 in reserve:\n reserve.remove(lost[i] + 1)\n count += 1\n return n - (len(lost) - count)\n" }, { "alpha_fraction": 0.3531745970249176, "alphanum_fraction": 0.3769841194152832, "avg_line_length": 18.384614944458008, "blob_id": "b6647bf18555789bbb087f31d0dcf2a584d857fd", "content_id": "bac1a4b438b884c1326c8146b1b3d8139387d74f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/Level1/68935.py", "repo_name": "YoonKiBum/programmers", "src_encoding": "UTF-8", "text": "def solution(n):\n arr = []\n while True:\n if n == 0:\n break\n i = n % 3\n arr.append(i)\n n = n // 3\n \n sum = 0\n for i in range(len(arr)):\n sum += (3 ** i) * arr[len(arr) - i - 1]\n return sum\n" } ]
12
leying95/stereopy
https://github.com/leying95/stereopy
0638be0586d056fc5a396c57359303a49f6cf639
1580a88a091a2ebc0f177ea73409e2c4b4dd4c7e
6c87cf3f76566cde6ec6d86891cae649b3fc1582
refs/heads/main
2023-03-29T22:12:40.346022
2021-04-09T03:17:09
2021-04-09T03:17:09
358,531,712
0
0
MIT
2021-04-16T08:36:07
2021-04-15T12:44:28
2021-04-15T13:25:58
null
[ { "alpha_fraction": 0.40083393454551697, "alphanum_fraction": 0.5601885318756104, "avg_line_length": 25.257143020629883, "blob_id": "7bd2f370968ac513aa8f45d4b85f38dba0a2b29f", "content_id": "e13c71464e3fbf9b2020ffb3cba140cc0bc8926e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6558, "license_type": "permissive", "max_line_length": 199, "num_lines": 210, "path": "/stereo/plots/plot_data_description.md", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "# 绘图数据需求\n\n以 AnnData 为参照获取对应的输入数据\n\n## plot_spatial_distribution\n\n### 所需 AnnData 数据:\n\n- 图像坐标信息: \n - `spatial_data = np.array(adata.obsm[\"spatial\"])`\n\n- 图像散点数值信息: \n - `color_data = np.array(adata.obs_vector(key))`\n - 其中 key 为散点所需呈现的数据种类,如 total_counts, n_genes_by_counts,可利用的数据种类都在 `adata.obs.keys()`\n\n### 实际所需数据格式:\n\n- 图像坐标信息demo:\n \n ```Text\n array([[ 256, -91],[ 185, -157],[ 257, -288],...,[ 151, -246],[ 290, -141],[ 177, -132]])\n ```\n\n- 图像散点数值信息demo:\n \n ```Text\n array([3582., 1773., 2283., ..., 4062., 4371., 1172.], dtype=float32)\n ```\n\n其中散点数值和坐标信息一一对应\n\n## plot_spatial_cluster && plot_cluster_umap\n\n这两个函数整合成了 plot_cluster_result,区别在于所用的 spatial data (pos_key) 不同, plot_spatial_cluster 使用的是 spatial 空间数据,plot_cluster_umap 使用的是 X_umap 的主成分值数据\n\n### 所需 AnnData 数据:\n\n- 图像坐标信息: \n - `spatial_data = np.array(adata.obsm[pos_key])`\n\n- 图像散点数值信息: \n - `color_data = np.array(adata.obs_vector(key))`\n - 其中 key 为散点所需呈现的数据种类,可利用的数据种类都在 adata.obs.keys(),此处用到的主要是聚类相关数据,如 [\"phenograph\", \"leiden\"]\n - 在 cluster 相关的图像中,散点数据属于 categorical 数据\n\n### 实际所需数据格式:\n\n- 图像坐标信息demo:\n \n spatial:\n\n ```Text\n array([[ 256, -91],[ 185, -157],[ 257, -288],...,[ 151, -246],[ 290, -141],[ 177, -132]])\n ```\n\n umap:\n \n ```Text\n array([[-2.234908 , 3.7087667 ], [ 3.7431364 , 2.003201 ], [ 4.849478 , -0.21215418], ..., [ 6.861894 , 1.7589074 ], [-0.4605783 , 4.0366364 ], [ 7.3391566 , -0.40746352]], dtype=float32)\n ```\n\n- 图像散点数值信息demo:\n \n ```Text\n ['0', '3', '5', '3', '2', ..., '4', '3', '6', '1', '5']\n Length: 22726\n Categories (10, object): ['0', '1', '2', '3', ..., '6', '7', '8', '9']\n ```\n\n其中散点数值和坐标信息一一对应\n\n\n## plot_to_select_filter_value\n\n### 所需 AnnData 数据\n\n- `x = adata.obs_vector(var1)`\n- `y = adata.obs_vector(var2)`\n\nvar1 var2 可以是任意两个 obs_key 中的值\n\n### 实际所需数据格式\n\n```Text\narray([3582., 1773., 2283., ..., 4062., 4371., 1172.], dtype=float32)\n```\n\n## plot_variable_gene\n\n### 所需 AnnData 数据:\n\n- `adata.var.highly_variable`\n- `adata.var.means`\n- `adata.var.dispersions`\n- `adata.var.dispersions_norm`\n\n### 实际所需数据格式\n\npandas.core.series.Series\n\n例如: adata.var.means:\n\n```Text\nAL355102.2 0.012042\nSLC25A2 0.000596\n ... \nAC069214.1 0.009608\nAL356056.1 0.001655\nName: means, Length: 33304, dtype: float64\n```\n\n## plot_expression_difference\n\n### 所需 AnnData 数据\n\n- 聚类得到的类名: `group_names = adata.uns[\"rank_genes_groups\"]['names'].dtype.names`\n- 在各个类名(group_name in group_names)的基础上获取各个类的基因列表('names')和分数('scores'):\n - `gene_names = adata.uns[\"rank_genes_groups\"]['names'][group_name][:number_of_gene_to_show]`\n - `scores = adata.uns[\"rank_genes_groups\"]['scores'][group_name][:number_of_gene_to_show]`\n\n### 实际所需数据格式:\n\n- 聚类得到的类名:\n\n ```Text\n tuple('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')\n ```\n- 各个类名对应的基因列表和分数列表:\n \n ```Text\n numpy.ndarray\n \n array(['TUBB4B', 'SLC12A2', 'EPHB3', ..., 'TFF3', 'TIMP1', 'OLFM4'],\n dtype=object)\n \n array([ 28.094688, 27.638664, 24.523687, ..., -41.63738 , -42.894638, -48.384773], dtype=float32)\n ```\n\n## plot_violin_distribution\n\n### 所需 AnnData 数据\n\n- `adata.obs['total_counts']`\n- `adata.obs['n_genes_by_counts']`\n- `adata.obs['pct_counts_mt']`\n\n### 实际所需数据格式\n\npandas.core.series.Series\n\n例如: total_counts:\n\n```Text\n256-91 3582.0\n185-157 1773.0\n ... \n290-141 4371.0\n177-132 1172.0\nName: total_counts, Length: 22726, dtype: float32\n```\n\n## plot_heatmap_maker_genes\n\n### 所需 AnnData 数据:\n\n- 聚类得到的类名: `marker_clusters = adata.uns[\"rank_genes_groups\"]['names'].dtype.names`\n- 在各个类名(cluster in marker_clusters)的基础上获取各个类的基因列表('names'):\n - `genes_array = adata.uns[\"rank_genes_groups\"]['names'][group_name][:number_of_gene_to_show]`\n- 设定表达量矩阵(热图矩阵)\n - 矩阵index: `adata.obs_name`\n - 整合各个类的基因列表,获取 uniq gene list,然后获取这些uniq gene 的表达量来构建 pandas.DataFrame\n - 表达量: `exp_matrix = adata.X[tuple([slice(None), adata.var.index.get_indexer(uniq_gene_names)])]`\n - pd.DataFrame: `pd.DataFrame(exp_matrix, columns=uniq_gene_names, index=adata.obs_names)`\n - 添加 obs 列在最开头: `pd.concat([draw_df, adata.obs[cluster_method]], axis=1)`\n \n\n### 实际所需数据格式:\n\n- 聚类得到的类名:\n\n ```Text\n tuple('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')\n ```\n\n- 各个类名对应的基因列表:\n \n ```Text\n numpy.ndarray\n \n array(['TUBB4B', 'SLC12A2', 'EPHB3', ..., 'TFF3', 'TIMP1', 'OLFM4'],\n dtype=object)\n ```\n\n- 表达量矩阵最终效果:\n \n ```Text\n TUBB4B SLC12A2 EPHB3 MT-ND1 FTX NAALADL2 \n phenograph \n 0 3.758313 1.884562 2.705292 4.040148 2.498723 2.238069 \n 0 3.118562 1.224227 1.758388 3.311160 1.758388 0.000000 \n 0 2.790583 0.000000 0.000000 3.175375 0.000000 0.000000 \n 0 0.000000 1.885036 1.885036 3.821939 0.000000 0.000000 \n 0 2.596922 0.000000 2.596922 3.644590 2.596922 2.596922 \n ... ... ... ... ... ... ... \n 9 3.172084 1.570598 0.000000 4.186041 1.570598 0.000000 \n 9 0.000000 1.847625 0.000000 3.893912 0.000000 2.458688 \n 9 2.681648 2.681648 0.000000 3.339970 1.314215 1.314215 \n 9 1.955572 1.955572 0.000000 3.229691 1.955572 0.000000 \n 9 2.963927 1.721601 1.721601 3.176694 1.721601 0.000000 \n ```\n\n\n" }, { "alpha_fraction": 0.5407407283782959, "alphanum_fraction": 0.614814817905426, "avg_line_length": 15.875, "blob_id": "6be96570692f13225dce46515d9ca9c98fef1296", "content_id": "0a956633adf2311ac4c74e39451ffd2cbcbd1029", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "permissive", "max_line_length": 25, "num_lines": 8, "path": "/stereo/plots/_plot_basic/__init__.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Shixu He [email protected]\n@last modified by: Shixu He\n@file:__init__.py.py\n@time:2021/03/15\n\"\"\"\n" }, { "alpha_fraction": 0.8245614171028137, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 27.5, "blob_id": "07e56b8199fa5930875326eee974fd0b94d0cbaa", "content_id": "a08af99a95386dcc9a13b89532990ab6cd210c1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "permissive", "max_line_length": 45, "num_lines": 2, "path": "/README.md", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "# stereopy\nA toolkit of spatial transcriptomic analysis.\n" }, { "alpha_fraction": 0.576362669467926, "alphanum_fraction": 0.582298994064331, "avg_line_length": 30.94827651977539, "blob_id": "da5efc016b2d7c7a1cc9131a627d2377e1ae0157", "content_id": "66c073cd335fd0fa657926a18e9d780efb70e29a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1853, "license_type": "permissive", "max_line_length": 78, "num_lines": 58, "path": "/stereo/log_manager.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:log_manager.py\n@time:2021/03/05\n\"\"\"\n\nimport logging\nfrom .config import stereo_conf\n\n\nclass LogManager(object):\n def __init__(self, log_path=None, level=None):\n self.level_map = {'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL}\n self.format = stereo_conf.log_format\n self.formatter = logging.Formatter(self.format, \"%Y-%m-%d %H:%M:%S\")\n self.log_path = log_path\n self.level = level.lower() if level else stereo_conf.log_level.lower()\n if self.log_path:\n self.file_handler = logging.FileHandler(self.log_path)\n self.file_handler.setLevel(self.level_map[self.level])\n self.file_handler.setFormatter(self.formatter)\n else:\n self.stream_handler = logging.StreamHandler()\n self.stream_handler.setLevel(self.level_map[self.level])\n self.stream_handler.setFormatter(self.formatter)\n\n def get_logger(self, name=\"Spateo\"):\n \"\"\"\n get logger object\n :param name: logger name\n :return: logger object\n \"\"\"\n alogger = logging.getLogger(name)\n alogger.propagate = 0\n alogger.setLevel(self.level_map[self.level])\n self._add_handler(alogger)\n return alogger\n\n def _add_handler(self, alogger):\n \"\"\"\n add handler of logger\n :param logger: logger object\n :return:\n \"\"\"\n if self.log_path:\n alogger.addHandler(self.file_handler)\n else:\n alogger.addHandler(self.stream_handler)\n\n\nlogger = LogManager().get_logger(name='Spateo')\n" }, { "alpha_fraction": 0.5441176295280457, "alphanum_fraction": 0.5572336912155151, "avg_line_length": 34.42253494262695, "blob_id": "223b8ada9eb9f510c9b68f7895d4f42a65b81f8e", "content_id": "0299148833875c5536949e7ad7ae0d0f9b9dafff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2516, "license_type": "permissive", "max_line_length": 117, "num_lines": 71, "path": "/stereo/tools/neighbors.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:neighbors.py\n@time:2021/03/23\n\"\"\"\nfrom scipy.sparse import coo_matrix\nfrom sklearn.neighbors import NearestNeighbors\nimport igraph as ig\nimport numpy as np\nfrom umap.umap_ import fuzzy_simplicial_set\n\n\nclass Neighbors(object):\n def __init__(self, x, n_neighbors):\n self.x = x\n self.n_neighbors = n_neighbors\n\n def find_n_neighbors(self):\n nbrs = NearestNeighbors(n_neighbors=self.n_neighbors + 1, algorithm='ball_tree').fit(self.x)\n dists, indices = nbrs.kneighbors(self.x)\n nn_idx = indices[:, 1:]\n nn_dist = dists[:, 1:]\n return nn_idx, nn_dist\n\n def get_igraph_from_knn(self, nn_idx, nn_dist):\n j = nn_idx.ravel().astype(int)\n dist = nn_dist.ravel()\n i = np.repeat(np.arange(nn_idx.shape[0]), self.n_neighbors)\n\n vertex = list(range(nn_dist.shape[0]))\n edges = list(tuple(zip(i, j)))\n G = ig.Graph()\n G.add_vertices(vertex)\n G.add_edges(edges)\n G.es['weight'] = dist\n return G\n\n def get_parse_distances(self, nn_idx, nn_dist):\n n_obs = self.x.shape[0]\n rows = np.zeros((n_obs * self.n_neighbors), dtype=np.int64)\n cols = np.zeros((n_obs * self.n_neighbors), dtype=np.int64)\n vals = np.zeros((n_obs * self.n_neighbors), dtype=np.float64)\n\n for i in range(nn_idx.shape[0]):\n for j in range(self.n_neighbors):\n if nn_idx[i, j] == -1:\n continue # We didn't get the full knn for i\n if nn_idx[i, j] == i:\n val = 0.0\n else:\n val = nn_dist[i, j]\n\n rows[i * self.n_neighbors + j] = i\n cols[i * self.n_neighbors + j] = nn_idx[i, j]\n vals[i * self.n_neighbors + j] = val\n\n distances = coo_matrix((vals, (rows, cols)), shape=(n_obs, n_obs))\n distances.eliminate_zeros()\n return distances.tocsr()\n\n def get_connectivities(self, nn_idx, nn_dist):\n n_obs = self.x.shape[0]\n x = coo_matrix(([], ([], [])), shape=(n_obs, 1))\n connectivities = fuzzy_simplicial_set(x, self.n_neighbors, None, None, knn_indices=nn_idx, knn_dists=nn_dist,\n set_op_mix_ratio=1.0, local_connectivity=1.0)\n if isinstance(connectivities, tuple):\n connectivities = connectivities[0]\n return connectivities.tocsr()\n\n" }, { "alpha_fraction": 0.5665071606636047, "alphanum_fraction": 0.5779904127120972, "avg_line_length": 18.716981887817383, "blob_id": "75c8c1af025576b41fdaa21b6c25ecd8e15afd72", "content_id": "cd7efb2f1f96eb9937d843ad9cef415c0aff5d4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "permissive", "max_line_length": 107, "num_lines": 53, "path": "/stereo/core/stereo_data.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:stereo_data.py\n@time:2021/03/22\n\"\"\"\n\n\nclass StereoData(object):\n def __init__(self, raw_file=None, exp_matrix=None, genes=None, bins=None, position=None, partitions=1):\n self.index = None\n self.index_file = None\n self.exp_matrix = exp_matrix\n self.genes = genes\n self.bins = bins\n self.position = position\n self.raw_file = raw_file\n self.partitions = partitions\n\n def filter_genes(self):\n pass\n\n def filter_bins(self):\n pass\n\n def search(self):\n pass\n\n def combine_bins(self, bin_size, step):\n pass\n\n def select_by_genes(self, gene_list):\n pass\n\n def select_by_position(self, x_min, y_min, x_max, y_max, bin_size):\n pass\n\n def transform_matrix(self):\n pass\n\n def get_genes(self):\n pass\n\n def get_bins(self):\n pass\n\n def split_data(self):\n pass\n\n def sparse2array(self):\n pass\n" }, { "alpha_fraction": 0.6382660865783691, "alphanum_fraction": 0.6636771559715271, "avg_line_length": 29.409090042114258, "blob_id": "b9472f782f76c2f6c8aaa8802826cc4656afb1ba", "content_id": "ed031fae5add10b548834f898ab95a813e1f9395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "permissive", "max_line_length": 71, "num_lines": 22, "path": "/stereo/preprocess/qc.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:qc.py\n@time:2021/03/26\n\"\"\"\nfrom scipy.sparse import issparse\nimport numpy as np\n\n\ndef cal_qc(andata):\n exp_matrix = andata.X.toarray() if issparse(andata.X) else andata.X\n total_count = exp_matrix.sum(1)\n n_gene_by_count = np.count_nonzero(exp_matrix, axis=1)\n mt_index = andata.var_names.str.startswith('MT-')\n mt_count = np.array(andata.X[:, mt_index].sum(1)).reshape(-1)\n andata.obs['total_counts'] = total_count\n andata.obs['pct_counts_mt'] = mt_count / total_count * 100\n andata.obs['n_genes_by_counts'] = n_gene_by_count\n return andata\n" }, { "alpha_fraction": 0.5326364636421204, "alphanum_fraction": 0.5358884930610657, "avg_line_length": 25.411043167114258, "blob_id": "57ec9a317b271c5001152cd0804fe0de76566362", "content_id": "5e726176ef70882e7b7c303bdb60a84d1c62f300", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4305, "license_type": "permissive", "max_line_length": 114, "num_lines": 163, "path": "/stereo/config.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:config.py\n@time:2021/03/05\n\"\"\"\nfrom typing import Union, Optional\nfrom pathlib import Path\nimport os\nfrom matplotlib import rcParams, rcParamsDefault\n\n\nclass StereoConfig(object):\n \"\"\"\n config of stereo.\n \"\"\"\n def __init__(\n self,\n file_format: str = \"h5ad\",\n auto_show: bool = True,\n n_jobs=1,\n log_file: Union[str, Path, None] = None,\n log_level: str = \"info\",\n log_format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n output: str = \"./output\",\n data_dir: str = None\n ):\n self._file_format = file_format\n self._auto_show = auto_show\n self._n_jobs = n_jobs\n self._log_file = log_file\n self._log_level = log_level\n self._log_format = log_format\n self.out_dir = output\n self.data_dir = data_dir if data_dir else os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n\n @property\n def log_file(self) -> Union[str, Path, None]:\n \"\"\"\n get the file path of log.\n :return:\n \"\"\"\n return self._log_file\n\n @log_file.setter\n def log_file(self, value):\n \"\"\"\n set file path of log.\n :param value: value of log file path\n :return:\n \"\"\"\n if value:\n dir_path = os.path.dirname(value)\n if not os.path.exists(dir_path):\n raise FileExistsError(\"folder does not exist, please check!\")\n self._log_file = value\n\n @property\n def log_format(self) -> str:\n \"\"\"\n get the format of log.\n :return:\n \"\"\"\n return self._log_format\n\n @log_format.setter\n def log_format(self, value):\n \"\"\"\n set file path of log.\n :param value: value of log format\n :return:\n \"\"\"\n self._log_format = value\n\n @property\n def log_level(self) -> str:\n \"\"\"\n get log level\n :return:\n \"\"\"\n return self._log_level\n\n @log_level.setter\n def log_level(self, value):\n \"\"\"\n set log level\n :param value: the value of log level\n :return:\n \"\"\"\n if value.lower() not in ['info', 'warning', 'debug', 'error', 'critical']:\n print('the log level is out of range, please check and it is not modified.')\n else:\n self._log_level = value\n\n @property\n def auto_show(self):\n \"\"\"\n Auto show figures if `auto_show == True` (default `True`).\n :return:\n \"\"\"\n return self._auto_show\n\n @auto_show.setter\n def auto_show(self, value):\n \"\"\"\n set value of auto_show\n :param value: value of auto_show\n :return:\n \"\"\"\n self._auto_show = value\n\n @property\n def file_format(self) -> str:\n \"\"\"\n file format of saving anndata object\n :return:\n \"\"\"\n return self._file_format\n\n @file_format.setter\n def file_format(self, value):\n \"\"\"\n set the value of file format\n :param value: the value of file format\n :return:\n \"\"\"\n self._file_format = value\n\n @property\n def n_jobs(self) -> int:\n return self._n_jobs\n\n @n_jobs.setter\n def n_jobs(self, value):\n self._n_jobs = value\n\n @staticmethod\n def set_plot_param(fontsize: int = 14, figsize: Optional[int] = None, color_map: Optional[str] = None,\n facecolor: Optional[str] = None, transparent: bool = False,):\n if fontsize is not None:\n rcParams['font.size'] = fontsize\n if color_map is not None:\n rcParams['image.cmap'] = color_map\n if figsize is not None:\n rcParams['figure.figsize'] = figsize\n if facecolor is not None:\n rcParams['figure.facecolor'] = facecolor\n rcParams['axes.facecolor'] = facecolor\n if transparent is not None:\n rcParams[\"savefig.transparent\"] = transparent\n\n @staticmethod\n def set_rcparams_defaults():\n \"\"\"\n reset `matplotlib.rcParams` to defaults.\n :return:\n \"\"\"\n rcParams.update(rcParamsDefault)\n\n\nstereo_conf = StereoConfig()\n" }, { "alpha_fraction": 0.552388072013855, "alphanum_fraction": 0.5629907250404358, "avg_line_length": 33.834495544433594, "blob_id": "5cf9c6e4a72c0d946056b70efefaa0185bdaeffa", "content_id": "e13b730eda2fc60a16a1c52046d3725d072fd48c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20531, "license_type": "permissive", "max_line_length": 155, "num_lines": 574, "path": "/stereo/plots/plot_utils.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Shixu He [email protected]\n@last modified by: Shixu He\n@file:plot_utils.py\n@time:2021/03/15\n\"\"\"\n\nfrom anndata import AnnData\nimport pandas as pd\nimport numpy as np\nimport math\n\nfrom matplotlib.colors import Normalize, ListedColormap\nfrom matplotlib import gridspec\nfrom matplotlib.cm import get_cmap\nfrom matplotlib.axes import Axes\nimport matplotlib.pyplot as plt\nimport seaborn\n\nfrom ._plot_basic.scatter_plt import scatter, plot_cluster_result\nfrom ._plot_basic.heatmap_plt import heatmap, _plot_categories_as_colorblocks, _plot_gene_groups_brackets\n\nfrom typing import Optional, Sequence, Union\n\nfrom ..log_manager import logger\n\n\ndef plot_spatial_distribution(\n adata: AnnData,\n obs_key: list = [\"total_counts\", \"n_genes_by_counts\"],\n ncols=2,\n dot_size=None,\n color_list=None,\n invert_y=False\n): # scatter plot, 表达矩阵空间分布\n \"\"\"\n Plot spatial distribution of specified obs data.\n ============ Arguments ============\n :param adata: AnnData object.\n :param obs_key: specified obs key list, for example: [\"total_counts\", \"n_genes_by_counts\"]\n :param ncols: numbr of plot columns.\n :param dot_size: marker size.\n :param cmap: Color map.\n :param invert_y: whether to invert y-axis.\n ============ Return ============\n None\n ============ Example ============\n plot_spatial_distribution(adata=adata)\n \"\"\"\n # sc.pl.embedding(adata, basis=\"spatial\", color=[\"total_counts\", \"n_genes_by_counts\"],size=30)\n\n if dot_size is None:\n dot_size = 120000 / adata.shape[0]\n\n ncols = min(ncols, len(obs_key))\n nrows = np.ceil(len(obs_key) / ncols).astype(int)\n # each panel will have the size of rcParams['figure.figsize']\n fig = plt.figure(figsize=(ncols * 10, nrows * 8))\n left = 0.2 / ncols\n bottom = 0.13 / nrows\n axs = gridspec.GridSpec(\n nrows=nrows,\n ncols=ncols,\n left=left,\n right=1 - (ncols - 1) * left - 0.01 / ncols,\n bottom=bottom,\n top=1 - (nrows - 1) * bottom - 0.1 / nrows,\n # hspace=hspace,\n # wspace=wspace,\n )\n\n if color_list is None:\n cmap = get_cmap()\n else:\n cmap = ListedColormap(color_list)\n # 把特定值改为 np.nan 之后,可以利用 cmap.set_bad(\"white\") 来遮盖掉这部分数据\n\n # 散点图上每个点的坐标数据来自于 adata 的 obsm[\"spatial\"],每个点的颜色(数值)数据来自于 adata 的 obs_vector()\n for i, key in enumerate(obs_key):\n # color_data = np.asarray(adata.obs_vector(key), dtype=float)\n color_data = adata.obs_vector(key)\n order = np.argsort(~pd.isnull(color_data), kind=\"stable\")\n spatial_data = np.array(adata.obsm[\"spatial\"])[:, 0: 2]\n color_data = color_data[order]\n spatial_data = spatial_data[order, :]\n\n # color_data 是图像中各个点的值,也对应了每个点的颜色。data_points则对应了各个点的坐标\n ax = fig.add_subplot(axs[i]) # ax = plt.subplot(axs[i]) || ax = fig.add_subplot(axs[1, 1]))\n ax.set_title(key)\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_xlabel(\"spatial1\")\n ax.set_ylabel(\"spatial2\")\n pathcollection = scatter(\n spatial_data[:, 0],\n spatial_data[:, 1],\n ax=ax,\n marker=\".\",\n dot_colors=color_data,\n dot_size=dot_size,\n cmap=cmap,\n )\n plt.colorbar(\n pathcollection,\n ax=ax,\n pad=0.01,\n fraction=0.08,\n aspect=30,\n )\n ax.autoscale_view()\n if invert_y:\n ax.invert_yaxis()\n\n\ndef plot_spatial_cluster(\n adata: AnnData,\n obs_key: list = [\"phenograph\"],\n plot_cluster: list = None,\n bad_color=\"lightgrey\",\n ncols=2,\n dot_size=None,\n invert_y=False,\n color_list=['violet', 'turquoise', 'tomato', 'teal',\n 'tan', 'silver', 'sienna', 'red', 'purple',\n 'plum', 'pink', 'orchid', 'orangered', 'orange',\n 'olive', 'navy', 'maroon', 'magenta', 'lime',\n 'lightgreen', 'lightblue', 'lavender', 'khaki',\n 'indigo', 'grey', 'green', 'gold', 'fuchsia',\n 'darkgreen', 'darkblue', 'cyan', 'crimson', 'coral',\n 'chocolate', 'chartreuse', 'brown', 'blue', 'black',\n 'beige', 'azure', 'aquamarine', 'aqua']): # scatter plot, 聚类后表达矩阵空间分布\n \"\"\"\n Plot spatial distribution of specified obs data.\n ============ Arguments ============\n :param adata: AnnData object.\n :param obs_key: specified obs cluster key list, for example: [\"phenograph\"].\n :param plot_cluster: the name list of clusters to show.\n :param ncols: numbr of plot columns.\n :param dot_size: marker size.\n :param cmap: Color map.\n :param invert_y: whether to invert y-axis.\n ============ Return ============\n None.\n ============ Example ============\n plot_spatial_cluster(adata = adata)\n \"\"\"\n # sc.pl.embedding(adata, basis=\"spatial\", color=[\"total_counts\", \"n_genes_by_counts\"],size=30)\n\n if isinstance(obs_key, str):\n obs_key = [\"obs_key\"]\n\n plot_cluster_result(adata, obs_key=obs_key, pos_key=\"spatial\", plot_cluster=plot_cluster, bad_color=bad_color,\n ncols=ncols, dot_size=dot_size, invert_y=invert_y, color_list=color_list)\n\n\ndef plot_to_select_filter_value(\n adata: AnnData,\n x=[\"total_counts\", \"total_counts\"],\n y=[\"pct_counts_mt\", \"n_genes_by_counts\"],\n ncols=1,\n **kwargs): # scatter plot, 线粒体分布图\n \"\"\"\n Plot .\n ============ Arguments ============\n :param adata: AnnData object.\n :param x, y: obs key pairs for drawing. For example, assume x=[\"a\", \"a\", \"b\"] and y=[\"c\", \"d\", \"e\"], the output plots will include \"a-c\", \"a-d\", \"b-e\".\n ============ Return ============\n None.\n ============ Example ============\n plot_spatial_cluster(adata = adata)\n \"\"\"\n # sc.pl.scatter(adata, x='total_counts', y='pct_counts_mt')\n # sc.pl.scatter(adata, x='total_counts', y='n_genes_by_counts')\n if isinstance(x, str):\n x = [x]\n if isinstance(y, str):\n y = [y]\n\n width = 20\n height = 10\n nrows = math.ceil(len(x) / ncols)\n\n doc_color = \"gray\"\n\n fig = plt.figure(figsize=(width, height))\n axs = gridspec.GridSpec(\n nrows=nrows,\n ncols=ncols,\n )\n for i, (xi, yi) in enumerate(zip(x, y)):\n draw_data = np.c_[adata.obs_vector(xi), adata.obs_vector(yi)]\n dot_size = 120000 / draw_data.shape[0]\n ax = fig.add_subplot(axs[i])\n # ax.set_title()\n # ax.set_yticks([])\n # ax.set_xticks([])\n ax.set_xlabel(xi)\n ax.set_ylabel(yi)\n scatter(\n draw_data[:, 0],\n draw_data[:, 1],\n ax=ax,\n marker=\".\",\n dot_colors=doc_color,\n dot_size=dot_size\n )\n\n\ndef plot_variable_gene(adata: AnnData, logarize=False): # scatter plot, 表达量差异-均值图\n \"\"\"\n Copied from scanpy and modified.\n \"\"\"\n # 该图像需要前置数据处理:sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)\n # 再画图:sc.pl.highly_variable_genes(adata)\n\n result = adata.var\n gene_subset = result.highly_variable\n means = result.means\n var_or_disp = result.dispersions\n var_or_disp_norm = result.dispersions_norm\n width = 10\n height = 10\n\n plt.figure(figsize=(2 * width, height))\n plt.subplots_adjust(wspace=0.3)\n for idx, d in enumerate([var_or_disp_norm, var_or_disp]):\n plt.subplot(1, 2, idx + 1)\n for label, color, mask in zip(\n ['highly variable genes', 'other genes'],\n ['black', 'grey'],\n [gene_subset, ~gene_subset],\n ):\n if False:\n means_, var_or_disps_ = np.log10(means[mask]), np.log10(d[mask])\n else:\n means_, var_or_disps_ = means[mask], d[mask]\n plt.scatter(means_, var_or_disps_, label=label, c=color, s=1)\n if logarize: # there's a bug in autoscale\n plt.xscale('log')\n plt.yscale('log')\n y_min = np.min(var_or_disp)\n y_min = 0.95 * y_min if y_min > 0 else 1e-1\n plt.xlim(0.95 * np.min(means), 1.05 * np.max(means))\n plt.ylim(y_min, 1.05 * np.max(var_or_disp))\n if idx == 0:\n plt.legend()\n plt.xlabel(('$log_{10}$ ' if False else '') + 'mean expressions of genes')\n data_type = 'dispersions'\n plt.ylabel(\n ('$log_{10}$ ' if False else '')\n + '{} of genes'.format(data_type)\n + (' (normalized)' if idx == 0 else ' (not normalized)')\n )\n\n\ndef plot_cluster_umap(\n adata: AnnData,\n obs_key: list = [\"phenograph\"],\n plot_cluster: list = None,\n bad_color=\"lightgrey\",\n ncols=2,\n dot_size=None,\n invert_y=False,\n color_list=['violet', 'turquoise', 'tomato', 'teal',\n 'tan', 'silver', 'sienna', 'red', 'purple',\n 'plum', 'pink', 'orchid', 'orangered', 'orange',\n 'olive', 'navy', 'maroon', 'magenta', 'lime',\n 'lightgreen', 'lightblue', 'lavender', 'khaki',\n 'indigo', 'grey', 'green', 'gold', 'fuchsia',\n 'darkgreen', 'darkblue', 'cyan', 'crimson', 'coral',\n 'chocolate', 'chartreuse', 'brown', 'blue', 'black',\n 'beige', 'azure', 'aquamarine', 'aqua',\n ]\n): # scatter plot,聚类结果PCA/umap图\n \"\"\"\n Plot spatial distribution of specified obs data.\n ============ Arguments ============\n :param adata: AnnData object.\n :param obs_key: specified obs cluster key list, for example: [\"phenograph\"].\n :param plot_cluster: the name list of clusters to show.\n :param ncols: numbr of plot columns.\n :param dot_size: marker size.\n :param cmap: Color map.\n :param invert_y: whether to invert y-axis.\n ============ Return ============\n None.\n ============ Example ============\n plot_cluster_umap(adata = adata)\n \"\"\"\n\n if (isinstance(obs_key, str)):\n obs_key = [obs_key]\n\n plot_cluster_result(adata, obs_key=obs_key, pos_key=\"X_umap\", plot_cluster=plot_cluster, bad_color=bad_color,\n ncols=ncols, dot_size=dot_size, invert_y=invert_y, color_list=color_list)\n\n\ndef plot_expression_difference(\n adata: AnnData,\n groups: Union[str, Sequence[str]] = None,\n n_genes: int = 20,\n key: Optional[str] = 'rank_genes_groups',\n fontsize: int = 8,\n ncols: int = 4,\n sharey: bool = True,\n show: Optional[bool] = None,\n save: Optional[bool] = None,\n ax: Optional[Axes] = None,\n **kwds,\n): # scatter plot, 差异基因显著性图,类碎石图\n \"\"\"\n Copied from scanpy and modified.\n \"\"\"\n\n # 调整图像 panel/grid 相关参数\n if 'n_panels_per_row' in kwds:\n n_panels_per_row = kwds['n_panels_per_row']\n else:\n n_panels_per_row = ncols\n group_names = adata.uns[key]['names'].dtype.names if groups is None else groups\n # one panel for each group\n # set up the figure\n n_panels_x = min(n_panels_per_row, len(group_names))\n n_panels_y = np.ceil(len(group_names) / n_panels_x).astype(int)\n\n # 初始化图像\n width = 10\n height = 10\n fig = plt.figure(\n figsize=(\n n_panels_x * width, # rcParams['figure.figsize'][0],\n n_panels_y * height, # rcParams['figure.figsize'][1],\n )\n )\n gs = gridspec.GridSpec(nrows=n_panels_y, ncols=n_panels_x, wspace=0.22, hspace=0.3)\n\n ax0 = None\n ymin = np.Inf\n ymax = -np.Inf\n for count, group_name in enumerate(group_names):\n gene_names = adata.uns[key]['names'][group_name][:n_genes]\n scores = adata.uns[key]['scores'][group_name][:n_genes]\n\n # Setting up axis, calculating y bounds\n if sharey:\n ymin = min(ymin, np.min(scores))\n ymax = max(ymax, np.max(scores))\n\n if ax0 is None:\n ax = fig.add_subplot(gs[count])\n ax0 = ax\n else:\n ax = fig.add_subplot(gs[count], sharey=ax0)\n else:\n ymin = np.min(scores)\n ymax = np.max(scores)\n ymax += 0.3 * (ymax - ymin)\n\n ax = fig.add_subplot(gs[count])\n ax.set_ylim(ymin, ymax)\n\n ax.set_xlim(-0.9, n_genes - 0.1)\n\n # Making labels\n for ig, gene_name in enumerate(gene_names):\n ax.text(\n ig,\n scores[ig],\n gene_name,\n rotation='vertical',\n verticalalignment='bottom',\n horizontalalignment='center',\n fontsize=fontsize,\n )\n\n ax.set_title('{} vs. {}'.format(group_name, \"Others\"))\n if count >= n_panels_x * (n_panels_y - 1):\n ax.set_xlabel('ranking')\n\n # print the 'score' label only on the first panel per row.\n if count % n_panels_x == 0:\n ax.set_ylabel('score')\n\n if sharey is True:\n ymax += 0.3 * (ymax - ymin)\n ax.set_ylim(ymin, ymax)\n\n\ndef plot_violin_distribution(adata): # 小提琴统计图\n \"\"\"\n 绘制数据的分布小提琴图。\n ============ Arguments ============\n :param adata: AnnData object.\n ============ Return ============\n None\n \"\"\"\n _, axs = plt.subplots(1, 3, figsize=(15, 4))\n seaborn.violinplot(y=adata.obs['total_counts'], ax=axs[0])\n seaborn.violinplot(y=adata.obs['n_genes_by_counts'], ax=axs[1])\n seaborn.violinplot(y=adata.obs['pct_counts_mt'], ax=axs[2])\n\n\ndef plot_heatmap_maker_genes(\n adata: AnnData = None,\n cluster_method=\"phenograph\",\n marker_uns_key=None,\n num_show_gene=8,\n show_labels=True,\n order_cluster=True,\n marker_clusters=None,\n cluster_colors_array=None,\n **kwargs\n): # heatmap, 差异基因热图\n \"\"\"\n 绘制 Marker gene 的热图。热图中每一行代表一个 bin 的所有基因的表达量,所有的 bin 会根据所属的 cluster 进行聚集, cluster 具体展示在热图的左侧,用颜色区分。\n ============ Arguments ============\n :param adata: AnnData object.\n :param cluster_methpd: method used in clustering. for example: phenograph, leiden\n :param marker_uns_key: the key of adata.uns, the default value is \"marker_genes\"\n :param num_show_gene: number of genes to show in each cluster.\n :param show_labels: show gene name on axis.\n :param order_cluster: reorder the cluster list in plot (y axis).\n :param marker_clusters: the list of clusters to show on the heatmap.\n :param cluster_colors_array: the list of colors in the color block on the left of heatmap.\n ============ Return ============\n\n ============ Example ============\n plot_heatmap_maker_genes(adata=adata, marker_uns_key = \"rank_genes_groups\", figsize = (20, 10))\n \"\"\"\n\n if marker_uns_key is None:\n marker_uns_key = 'marker_genes' # \"rank_genes_groups\" in original scanpy pipeline\n\n # if cluster_method is None:\n # cluster_method = str(adata.uns[marker_uns_key]['params']['groupby'])\n\n # cluster_colors_array = adata.uns[\"phenograph_colors\"]\n\n if marker_clusters is None:\n marker_clusters = adata.uns[marker_uns_key]['names'].dtype.names\n\n if not set(marker_clusters).issubset(set(adata.uns[marker_uns_key]['names'].dtype.names)):\n marker_clusters = adata.uns[marker_uns_key]['names'].dtype.names\n\n gene_names_dict = {} # dict in which each cluster is the keyand the num_show_gene are the values\n\n for cluster in marker_clusters:\n # get all genes that are 'non-nan'\n genes_array = adata.uns[marker_uns_key]['names'][cluster]\n genes_array = genes_array[~pd.isnull(genes_array)]\n\n if len(genes_array) == 0:\n logger.warning(\"Cluster {} has no genes.\".format(cluster))\n continue\n gene_names_dict[cluster] = list(genes_array[:num_show_gene])\n\n adata._sanitize()\n\n gene_names = []\n gene_group_labels = []\n gene_group_positions = []\n start = 0\n for label, gene_list in gene_names_dict.items():\n if isinstance(gene_list, str):\n gene_list = [gene_list]\n gene_names.extend(list(gene_list))\n gene_group_labels.append(label)\n gene_group_positions.append((start, start + len(gene_list) - 1))\n start += len(gene_list)\n\n # 此处获取所有绘图所需的数据 (表达量矩阵)\n draw_df = pd.DataFrame(index=adata.obs_names)\n uniq_gene_names = np.unique(gene_names)\n draw_df = pd.concat(\n [draw_df, pd.DataFrame(adata.X[tuple([slice(None), adata.var.index.get_indexer(uniq_gene_names)])],\n columns=uniq_gene_names, index=adata.obs_names)],\n axis=1\n )\n\n # add obs values\n draw_df = pd.concat([draw_df, adata.obs[cluster_method]], axis=1)\n\n # reorder columns to given order (including duplicates keys if present)\n draw_df = draw_df[list([cluster_method]) + list(uniq_gene_names)]\n draw_df = draw_df[gene_names].set_index(draw_df[cluster_method].astype('category'))\n if order_cluster:\n draw_df = draw_df.sort_index()\n\n # From scanpy\n # define a layout of 2 rows x 3 columns\n # first row is for 'brackets' (if no brackets needed, the height of this row\n # is zero) second row is for main content. This second row is divided into\n # three axes:\n # first ax is for the categories defined by `cluster_method`\n # second ax is for the heatmap\n # fourth ax is for colorbar\n\n kwargs.setdefault(\"figsize\", (10, 10))\n kwargs.setdefault(\"colorbar_width\", 0.2)\n colorbar_width = kwargs.get(\"colorbar_width\")\n figsize = kwargs.get(\"figsize\")\n\n cluster_block_width = kwargs.setdefault(\"cluster_block_width\", 0.2) if order_cluster else 0\n if figsize is None:\n height = 6\n if show_labels:\n heatmap_width = len(gene_names) * 0.3\n else:\n heatmap_width = 8\n width = heatmap_width + cluster_block_width\n else:\n width, height = figsize\n heatmap_width = width - cluster_block_width\n\n if gene_group_positions is not None and len(gene_group_positions) > 0:\n # add some space in case 'brackets' want to be plotted on top of the image\n height_ratios = [0.15, height]\n else:\n height_ratios = [0, height]\n\n width_ratios = [\n cluster_block_width,\n heatmap_width,\n colorbar_width,\n ]\n\n fig = plt.figure(figsize=(width, height))\n\n axs = gridspec.GridSpec(\n nrows=2,\n ncols=3,\n width_ratios=width_ratios,\n wspace=0.15 / width,\n hspace=0.13 / height,\n height_ratios=height_ratios,\n )\n\n heatmap_ax = fig.add_subplot(axs[1, 1])\n\n width, height = fig.get_size_inches()\n max_cbar_height = 4.0\n if height > max_cbar_height:\n # to make the colorbar shorter, the\n # ax is split and the lower portion is used.\n axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 2],\n height_ratios=[height - max_cbar_height, max_cbar_height],\n )\n heatmap_cbar_ax = fig.add_subplot(axs2[1])\n else:\n heatmap_cbar_ax = fig.add_subplot(axs[1, 2])\n\n heatmap(df=draw_df, ax=heatmap_ax,\n norm=Normalize(vmin=None, vmax=None), plot_colorbar=True, colorbar_ax=heatmap_cbar_ax,\n show_labels=True, plot_hline=True)\n\n if order_cluster:\n _plot_categories_as_colorblocks(\n fig.add_subplot(axs[1, 0]), draw_df, colors=cluster_colors_array, orientation='left'\n )\n\n # plot cluster legends on top of heatmap_ax (if given)\n if gene_group_positions is not None and len(gene_group_positions) > 0:\n _plot_gene_groups_brackets(\n fig.add_subplot(axs[0, 1], sharex=heatmap_ax),\n group_positions=gene_group_positions,\n group_labels=gene_group_labels,\n rotation=None,\n left_adjustment=-0.3,\n right_adjustment=0.3,\n )\n\n # plt.savefig()\n" }, { "alpha_fraction": 0.5822505354881287, "alphanum_fraction": 0.5927923321723938, "avg_line_length": 32.99166488647461, "blob_id": "a8e3e35c6156f10d95e13736868dfb21b8d5ee67", "content_id": "3526ea0c74b7e911d1ee4dc7c4cc474d0dd5b8eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4087, "license_type": "permissive", "max_line_length": 117, "num_lines": 120, "path": "/stereo/tools/cluster.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:cluster.py\n@time:2021/03/19\n\"\"\"\nimport leidenalg as la\nfrom sklearn.decomposition import PCA\nfrom scipy.sparse import coo_matrix\nfrom sklearn.neighbors import NearestNeighbors\nimport igraph as ig\nimport numpy as np\nfrom umap.umap_ import fuzzy_simplicial_set\n\n\nclass Neighbors(object):\n def __init__(self, x, n_neighbors):\n self.x = x\n self.n_neighbors = n_neighbors\n\n def find_n_neighbors(self):\n nbrs = NearestNeighbors(n_neighbors=self.n_neighbors + 1, algorithm='ball_tree').fit(self.x)\n dists, indices = nbrs.kneighbors(self.x)\n nn_idx = indices[:, 1:]\n nn_dist = dists[:, 1:]\n return nn_idx, nn_dist\n\n def get_igraph_from_knn(self, nn_idx, nn_dist):\n j = nn_idx.ravel().astype(int)\n dist = nn_dist.ravel()\n i = np.repeat(np.arange(nn_idx.shape[0]), self.n_neighbors)\n\n vertex = list(range(nn_dist.shape[0]))\n edges = list(tuple(zip(i, j)))\n G = ig.Graph()\n G.add_vertices(vertex)\n G.add_edges(edges)\n G.es['weight'] = dist\n return G\n\n def get_parse_distances(self, nn_idx, nn_dist):\n n_obs = self.x.shape[0]\n rows = np.zeros((n_obs * self.n_neighbors), dtype=np.int64)\n cols = np.zeros((n_obs * self.n_neighbors), dtype=np.int64)\n vals = np.zeros((n_obs * self.n_neighbors), dtype=np.float64)\n\n for i in range(nn_idx.shape[0]):\n for j in range(self.n_neighbors):\n if nn_idx[i, j] == -1:\n continue # We didn't get the full knn for i\n if nn_idx[i, j] == i:\n val = 0.0\n else:\n val = nn_dist[i, j]\n\n rows[i * self.n_neighbors + j] = i\n cols[i * self.n_neighbors + j] = nn_idx[i, j]\n vals[i * self.n_neighbors + j] = val\n\n distances = coo_matrix((vals, (rows, cols)), shape=(n_obs, n_obs))\n distances.eliminate_zeros()\n return distances.tocsr()\n\n def get_connectivities(self, nn_idx, nn_dist):\n n_obs = self.x.shape[0]\n x = coo_matrix(([], ([], [])), shape=(n_obs, 1))\n connectivities = fuzzy_simplicial_set(x, self.n_neighbors, None, None, knn_indices=nn_idx, knn_dists=nn_dist,\n set_op_mix_ratio=1.0, local_connectivity=1.0)\n if isinstance(connectivities, tuple):\n connectivities = connectivities[0]\n return connectivities.tocsr()\n\n\ndef run_neighbors(x, neighbors=30):\n neighbor = Neighbors(x, neighbors)\n nn_idx, nn_dist = neighbor.find_n_neighbors()\n return neighbor, nn_idx, nn_dist\n\n\ndef run_louvain(x, neighbor, nn_idx, nn_dist):\n g = neighbor.get_igraph_from_knn(nn_idx, nn_dist)\n louvain_partition = g.community_multilevel(weights=g.es['weight'], return_levels=False)\n clusters = np.arange(x.shape[0])\n for i in range(len(louvain_partition)):\n clusters[louvain_partition[i]] = str(i)\n return clusters\n\n\ndef run_knn_leiden(x, neighbor, nn_idx, nn_dist, diff=1):\n g = neighbor.get_igraph_from_knn(nn_idx, nn_dist)\n optimiser = la.Optimiser()\n leiden_partition = la.ModularityVertexPartition(g, weights=g.es['weight'])\n while diff > 0:\n diff = optimiser.optimise_partition(leiden_partition, n_iterations=10)\n clusters = np.arange(x.shape[0])\n for i in range(len(leiden_partition)):\n clusters[leiden_partition[i]] = str(i)\n return clusters\n\n\ndef run_cluster(x, method='leiden', do_pca=True, n_pcs=30):\n \"\"\"\n :param x: np.array, shape: (m, n),m个bin, n为embedding\n :param method:\n :param do_pca:\n :param n_pcs:\n :return:\n \"\"\"\n if do_pca:\n pca_obj = PCA(n_components=n_pcs)\n x = pca_obj.fit_transform(x)\n\n neighbor, nn_idx, nn_dist = run_neighbors(x)\n if method == 'leiden':\n cluster = run_knn_leiden(x, neighbor, nn_idx, nn_dist)\n else:\n cluster = run_louvain(x, neighbor, nn_idx, nn_dist)\n return cluster\n" }, { "alpha_fraction": 0.5596755146980286, "alphanum_fraction": 0.5709463357925415, "avg_line_length": 36.02822494506836, "blob_id": "e7cc0ef7656786a8185b9d2de04832ad295a73f2", "content_id": "f68ab32909c095d79527096c45e06fecb5e95b9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18936, "license_type": "permissive", "max_line_length": 158, "num_lines": 496, "path": "/stereo/plots/plots.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:plots.py\n@time:2021/03/31\n\"\"\"\nfrom matplotlib.cm import get_cmap\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap, to_hex, Normalize\nfrom matplotlib import gridspec\nfrom ._plot_basic.get_stereo_data import get_cluster_res, get_reduce_x, get_position_array, get_degs_res\nimport numpy as np\nimport pandas as pd\nfrom anndata import AnnData\nfrom ._plot_basic.scatter_plt import scatter\nimport seaborn\nfrom typing import Optional, Sequence, Union\nfrom matplotlib.axes import Axes\nfrom ._plot_basic.heatmap_plt import heatmap, plot_categories_as_colorblocks, plot_gene_groups_brackets\nfrom ..log_manager import logger\nfrom scipy.sparse import issparse\n\n\ndef plot_spatial_cluster(\n adata: AnnData,\n obs_key: list = [\"phenograph\"],\n pos_key: str = \"spatial\",\n plot_cluster: list = None,\n bad_color: str = \"lightgrey\",\n ncols: int = 2,\n dot_size: int = None,\n color_list=['violet', 'turquoise', 'tomato', 'teal','tan', 'silver', 'sienna', 'red', 'purple', 'plum', 'pink',\n 'orchid', 'orangered', 'orange', 'olive', 'navy', 'maroon', 'magenta', 'lime',\n 'lightgreen', 'lightblue', 'lavender', 'khaki', 'indigo', 'grey', 'green', 'gold', 'fuchsia',\n 'darkgreen', 'darkblue', 'cyan', 'crimson', 'coral', 'chocolate', 'chartreuse', 'brown', 'blue', 'black',\n 'beige', 'azure', 'aquamarine', 'aqua',\n ],\n): # scatter plot, 聚类后表达矩阵空间分布\n \"\"\"\n Plot spatial distribution of specified obs data.\n ============ Arguments ============\n :param adata: AnnData object.\n :param obs_key: specified obs cluster key list, for example: [\"phenograph\"].\n :param pos_key: the coordinates of data points for scatter plots. the data points are stored in adata.obsm[pos_key]. choice: \"spatial\", \"X_umap\", \"X_pca\".\n :param plot_cluster: the name list of clusters to show.\n :param bad_color: the name list of clusters to show.\n :param ncols: numbr of plot columns.\n :param dot_size: marker size.\n :param color_list: whether to invert y-axis.\n ============ Return ============\n None.\n ============ Example ============\n plot_spatial_cluster(adata = adata)\n \"\"\"\n # sc.pl.embedding(adata, basis=\"spatial\", color=[\"total_counts\", \"n_genes_by_counts\"],size=30)\n\n if dot_size is None:\n dot_size = 120000 / adata.shape[0]\n\n ncols = min(ncols, len(obs_key))\n nrows = np.ceil(len(obs_key) / ncols).astype(int)\n # each panel will have the size of rcParams['figure.figsize']\n fig = plt.figure(figsize=(ncols * 10, nrows * 8))\n left = 0.2 / ncols\n bottom = 0.13 / nrows\n axs = gridspec.GridSpec(\n nrows=nrows,\n ncols=ncols,\n left=left,\n right=1 - (ncols - 1) * left - 0.01 / ncols,\n bottom=bottom,\n top=1 - (nrows - 1) * bottom - 0.1 / nrows,\n # hspace=hspace,\n # wspace=wspace,\n )\n\n if color_list is None:\n cmap = get_cmap()\n else:\n cmap = ListedColormap(color_list)\n cmap.set_bad(bad_color)\n # 把特定值改为 np.nan 之后,可以利用 cmap.set_bad(\"white\") 来遮盖掉这部分数据\n\n for i, key in enumerate(obs_key):\n # color_data = adata.obs_vector(key) # TODO replace by get_cluster_res\n color_data = get_cluster_res(adata, data_key=key)\n pc_logic = False\n\n # color_data = np.asarray(color_data_raw, dtype=float)\n order = np.argsort(~pd.isnull(color_data), kind=\"stable\")\n# spatial_data = np.array(adata.obsm[pos_key])[:, 0: 2]\n spatial_data = get_reduce_x(data=adata, data_key=pos_key)[:, 0:2] if pos_key != 'spatial' \\\n else get_position_array(adata, pos_key)\n color_data = color_data[order]\n spatial_data = spatial_data[order, :]\n\n color_dict = {}\n has_na = False\n if pd.api.types.is_categorical_dtype(color_data):\n pc_logic = True\n if plot_cluster is None:\n plot_cluster = list(color_data.categories)\n if pc_logic:\n cluster_n = len(np.unique(color_data))\n if len(color_list) < cluster_n:\n color_list = color_list * cluster_n\n cmap = ListedColormap(color_list)\n cmap.set_bad(bad_color)\n if len(color_data.categories) > len(plot_cluster):\n color_data = color_data.replace(color_data.categories.difference(plot_cluster), np.nan)\n has_na = True\n color_dict = {str(k): to_hex(v) for k, v in enumerate(color_list)}\n print(color_dict)\n color_data = color_data.map(color_dict)\n if pd.api.types.is_categorical_dtype(color_data):\n color_data = pd.Categorical(color_data)\n if has_na:\n color_data = color_data.add_categories([to_hex(bad_color)])\n color_data = color_data.fillna(to_hex(bad_color))\n # color_dict[\"NA\"]\n\n # color_data 是图像中各个点的值,也对应了每个点的颜色。data_points则对应了各个点的坐标\n ax = fig.add_subplot(axs[i]) # ax = plt.subplot(axs[i]) || ax = fig.add_subplot(axs[1, 1]))\n ax.set_title(key)\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_xlabel(\"spatial1\")\n ax.set_ylabel(\"spatial2\")\n pathcollection = scatter(\n spatial_data[:, 0],\n spatial_data[:, 1],\n ax=ax,\n marker=\".\",\n dot_colors=color_data,\n dot_size=dot_size\n )\n if pc_logic:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])\n # -------------modified by [email protected]\n # valid_cate = color_data.categories\n # cat_num = len(adata.obs_vector(key).categories)\n # for label in adata.obs_vector(key).categories:\n categories = get_cluster_res(adata, data_key=key).categories\n cat_num = len(categories)\n for label in categories:\n # --------modified end------------------\n ax.scatter([], [], c=color_dict[label], label=label)\n ax.legend(\n frameon=False,\n loc='center left',\n bbox_to_anchor=(1, 0.5),\n ncol=(1 if cat_num <= 14 else 2 if cat_num <= 30 else 3),\n # fontsize=legend_fontsize,\n )\n else:\n plt.colorbar(pathcollection, ax=ax, pad=0.01, fraction=0.08, aspect=30)\n ax.autoscale_view()\n\n\ndef plot_violin_distribution(adata): # 小提琴统计图\n \"\"\"\n 绘制数据的分布小提琴图。\n ============ Arguments ============\n :param adata: AnnData object.\n ============ Return ============\n None\n \"\"\"\n _, axs = plt.subplots(1, 3, figsize=(15, 4))\n seaborn.violinplot(y=adata.obs['total_counts'], ax=axs[0])\n seaborn.violinplot(y=adata.obs['n_genes_by_counts'], ax=axs[1])\n seaborn.violinplot(y=adata.obs['pct_counts_mt'], ax=axs[2])\n\n\ndef plot_degs(\n adata: AnnData,\n groups: Union[str, Sequence[str]] = 'all',\n n_genes: int = 20,\n key: Optional[str] = 'find_marker',\n fontsize: int = 8,\n ncols: int = 4,\n sharey: bool = True,\n ax: Optional[Axes] = None,\n **kwds,\n): # scatter plot, 差异基因显著性图,类碎石图\n \"\"\"\n Copied from scanpy and modified.\n \"\"\"\n\n # 调整图像 panel/grid 相关参数\n if 'n_panels_per_row' in kwds:\n n_panels_per_row = kwds['n_panels_per_row']\n else:\n n_panels_per_row = ncols\n # group_names = adata.uns[key]['names'].dtype.names if groups is None else groups\n if groups == 'all':\n group_names = list(adata.uns[key].keys())\n else:\n group_names = [groups] if isinstance(groups, str) else groups\n # one panel for each group\n # set up the figure\n n_panels_x = min(n_panels_per_row, len(group_names))\n n_panels_y = np.ceil(len(group_names) / n_panels_x).astype(int)\n # 初始化图像\n width = 10\n height = 10\n fig = plt.figure(\n figsize=(\n n_panels_x * width, # rcParams['figure.figsize'][0],\n n_panels_y * height, # rcParams['figure.figsize'][1],\n )\n )\n gs = gridspec.GridSpec(nrows=n_panels_y, ncols=n_panels_x, wspace=0.22, hspace=0.3)\n\n ax0 = None\n ymin = np.Inf\n ymax = -np.Inf\n for count, group_name in enumerate(group_names):\n result = get_degs_res(adata, data_key=key, group_key=group_name, top_k=n_genes)\n gene_names = result.genes.values\n scores = result.scores.values\n # Setting up axis, calculating y bounds\n if sharey:\n ymin = min(ymin, np.min(scores))\n ymax = max(ymax, np.max(scores))\n\n if ax0 is None:\n ax = fig.add_subplot(gs[count])\n ax0 = ax\n else:\n ax = fig.add_subplot(gs[count], sharey=ax0)\n else:\n ymin = np.min(scores)\n ymax = np.max(scores)\n ymax += 0.3 * (ymax - ymin)\n\n ax = fig.add_subplot(gs[count])\n ax.set_ylim(ymin, ymax)\n\n ax.set_xlim(-0.9, n_genes - 0.1)\n\n # Making labels\n for ig, gene_name in enumerate(gene_names):\n ax.text(\n ig,\n scores[ig],\n gene_name,\n rotation='vertical',\n verticalalignment='bottom',\n horizontalalignment='center',\n fontsize=fontsize,\n )\n\n ax.set_title(group_name)\n if count >= n_panels_x * (n_panels_y - 1):\n ax.set_xlabel('ranking')\n\n # print the 'score' label only on the first panel per row.\n if count % n_panels_x == 0:\n ax.set_ylabel('score')\n\n if sharey is True:\n ymax += 0.3 * (ymax - ymin)\n ax.set_ylim(ymin, ymax)\n\n\ndef plot_spatial_distribution(\n adata: AnnData,\n obs_key: list = [\"total_counts\", \"n_genes_by_counts\"],\n ncols=2,\n dot_size=None,\n color_list=None,\n invert_y=False\n): # scatter plot, 表达矩阵空间分布\n \"\"\"\n Plot spatial distribution of specified obs data.\n ============ Arguments ============\n :param adata: AnnData object.\n :param obs_key: specified obs key list, for example: [\"total_counts\", \"n_genes_by_counts\"]\n :param ncols: numbr of plot columns.\n :param dot_size: marker size.\n :param color_list: Color list.\n :param invert_y: whether to invert y-axis.\n ============ Return ============\n None\n ============ Example ============\n plot_spatial_distribution(adata=adata)\n \"\"\"\n # sc.pl.embedding(adata, basis=\"spatial\", color=[\"total_counts\", \"n_genes_by_counts\"],size=30)\n\n if dot_size is None:\n dot_size = 120000 / adata.shape[0]\n\n ncols = min(ncols, len(obs_key))\n nrows = np.ceil(len(obs_key) / ncols).astype(int)\n # each panel will have the size of rcParams['figure.figsize']\n fig = plt.figure(figsize=(ncols * 10, nrows * 8))\n left = 0.2 / ncols\n bottom = 0.13 / nrows\n axs = gridspec.GridSpec(\n nrows=nrows,\n ncols=ncols,\n left=left,\n right=1 - (ncols - 1) * left - 0.01 / ncols,\n bottom=bottom,\n top=1 - (nrows - 1) * bottom - 0.1 / nrows,\n # hspace=hspace,\n # wspace=wspace,\n )\n\n if color_list is None:\n cmap = get_cmap()\n else:\n cmap = ListedColormap(color_list)\n # 把特定值改为 np.nan 之后,可以利用 cmap.set_bad(\"white\") 来遮盖掉这部分数据\n\n # 散点图上每个点的坐标数据来自于 adata 的 obsm[\"spatial\"],每个点的颜色(数值)数据来自于 adata 的 obs_vector()\n for i, key in enumerate(obs_key):\n # color_data = np.asarray(adata.obs_vector(key), dtype=float)\n color_data = adata.obs_vector(key)\n order = np.argsort(~pd.isnull(color_data), kind=\"stable\")\n spatial_data = np.array(adata.obsm[\"spatial\"])[:, 0: 2]\n color_data = color_data[order]\n spatial_data = spatial_data[order, :]\n\n # color_data 是图像中各个点的值,也对应了每个点的颜色。data_points则对应了各个点的坐标\n ax = fig.add_subplot(axs[i]) # ax = plt.subplot(axs[i]) || ax = fig.add_subplot(axs[1, 1]))\n ax.set_title(key)\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_xlabel(\"spatial1\")\n ax.set_ylabel(\"spatial2\")\n pathcollection = scatter(\n spatial_data[:, 0],\n spatial_data[:, 1],\n ax=ax,\n marker=\".\",\n dot_colors=color_data,\n dot_size=dot_size,\n cmap=cmap,\n )\n plt.colorbar(\n pathcollection,\n ax=ax,\n pad=0.01,\n fraction=0.08,\n aspect=30,\n )\n ax.autoscale_view()\n if invert_y:\n ax.invert_yaxis()\n\n\ndef plot_heatmap_maker_genes(\n adata: AnnData = None,\n cluster_method=\"phenograph\",\n marker_uns_key=None,\n num_show_gene=8,\n show_labels=True,\n order_cluster=True,\n marker_clusters=None,\n cluster_colors_array=None,\n **kwargs\n): # heatmap, 差异基因热图\n \"\"\"\n 绘制 Marker gene 的热图。热图中每一行代表一个 bin 的所有基因的表达量,所有的 bin 会根据所属的 cluster 进行聚集, cluster 具体展示在热图的左侧,用颜色区分。\n ============ Arguments ============\n :param adata: AnnData object.\n :param cluster_method: method used in clustering. for example: phenograph, leiden\n :param marker_uns_key: the key of adata.uns, the default value is \"marker_genes\"\n :param num_show_gene: number of genes to show in each cluster.\n :param show_labels: show gene name on axis.\n :param order_cluster: reorder the cluster list in plot (y axis).\n :param marker_clusters: the list of clusters to show on the heatmap.\n :param cluster_colors_array: the list of colors in the color block on the left of heatmap.\n ============ Return ============\n\n ============ Example ============\n plot_heatmap_maker_genes(adata=adata, marker_uns_key = \"rank_genes_groups\", figsize = (20, 10))\n \"\"\"\n if marker_uns_key is None:\n marker_uns_key = 'marker_genes' # \"rank_genes_groups\" in original scanpy pipeline\n marker_res = adata.uns[marker_uns_key]\n default_cluster = [i for i in marker_res.keys()]\n if marker_clusters is None:\n marker_clusters = default_cluster\n if not set(marker_clusters).issubset(set(default_cluster)):\n marker_clusters = default_cluster\n\n gene_names_dict = {} # dict in which each cluster is the keyand the num_show_gene are the values\n\n for cluster in marker_clusters:\n top_marker = marker_res[cluster].top_k_marker(top_k_genes=num_show_gene, sort_key='scores')\n genes_array = top_marker['genes'].values\n if len(genes_array) == 0:\n logger.warning(\"Cluster {} has no genes.\".format(cluster))\n continue\n gene_names_dict[cluster] = genes_array\n gene_names = []\n gene_group_labels = []\n gene_group_positions = []\n start = 0\n for label, gene_list in gene_names_dict.items():\n if isinstance(gene_list, str):\n gene_list = [gene_list]\n gene_names.extend(list(gene_list))\n gene_group_labels.append(label)\n gene_group_positions.append((start, start + len(gene_list) - 1))\n start += len(gene_list)\n\n # 此处获取所有绘图所需的数据 (表达量矩阵)\n uniq_gene_names = np.unique(gene_names)\n exp_matrix = adata.X.toarray() if issparse(adata.X) else adata.X\n draw_df = pd.DataFrame(exp_matrix[:, adata.var.index.get_indexer(uniq_gene_names)],\n columns=uniq_gene_names, index=adata.obs_names)\n # add obs values\n cluster_data = adata.uns[cluster_method].cluster.set_index('bins')\n draw_df = pd.concat([draw_df, cluster_data], axis=1)\n draw_df = draw_df[gene_names].set_index(draw_df['cluster'].astype('category'))\n if order_cluster:\n draw_df = draw_df.sort_index()\n kwargs.setdefault(\"figsize\", (10, 10))\n kwargs.setdefault(\"colorbar_width\", 0.2)\n colorbar_width = kwargs.get(\"colorbar_width\")\n figsize = kwargs.get(\"figsize\")\n\n cluster_block_width = kwargs.setdefault(\"cluster_block_width\", 0.2) if order_cluster else 0\n if figsize is None:\n height = 6\n if show_labels:\n heatmap_width = len(gene_names) * 0.3\n else:\n heatmap_width = 8\n width = heatmap_width + cluster_block_width\n else:\n width, height = figsize\n heatmap_width = width - cluster_block_width\n\n if gene_group_positions is not None and len(gene_group_positions) > 0:\n # add some space in case 'brackets' want to be plotted on top of the image\n height_ratios = [0.15, height]\n else:\n height_ratios = [0, height]\n\n width_ratios = [\n cluster_block_width,\n heatmap_width,\n colorbar_width,\n ]\n\n fig = plt.figure(figsize=(width, height))\n\n axs = gridspec.GridSpec(\n nrows=2,\n ncols=3,\n width_ratios=width_ratios,\n wspace=0.15 / width,\n hspace=0.13 / height,\n height_ratios=height_ratios,\n )\n\n heatmap_ax = fig.add_subplot(axs[1, 1])\n\n width, height = fig.get_size_inches()\n max_cbar_height = 4.0\n if height > max_cbar_height:\n # to make the colorbar shorter, the\n # ax is split and the lower portion is used.\n axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 2],\n height_ratios=[height - max_cbar_height, max_cbar_height],\n )\n heatmap_cbar_ax = fig.add_subplot(axs2[1])\n else:\n heatmap_cbar_ax = fig.add_subplot(axs[1, 2])\n\n heatmap(df=draw_df, ax=heatmap_ax,\n norm=Normalize(vmin=None, vmax=None), plot_colorbar=True, colorbar_ax=heatmap_cbar_ax,\n show_labels=True, plot_hline=True)\n\n if order_cluster:\n plot_categories_as_colorblocks(\n fig.add_subplot(axs[1, 0]), draw_df, colors=cluster_colors_array, orientation='left'\n )\n\n # plot cluster legends on top of heatmap_ax (if given)\n if gene_group_positions is not None and len(gene_group_positions) > 0:\n plot_gene_groups_brackets(\n fig.add_subplot(axs[0, 1], sharex=heatmap_ax),\n group_positions=gene_group_positions,\n group_labels=gene_group_labels,\n rotation=None,\n left_adjustment=-0.3,\n right_adjustment=0.3,\n )\n" }, { "alpha_fraction": 0.6450777053833008, "alphanum_fraction": 0.6709844470024109, "avg_line_length": 19.3157901763916, "blob_id": "e0b8821e51448972842b0596d7ba29c224772cd5", "content_id": "6e56426ee185d43f05f283d62f55808b97650c56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "permissive", "max_line_length": 53, "num_lines": 19, "path": "/stereo/utils/__init__.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:__init__.py.py\n@time:2021/03/05\n\"\"\"\nimport shutil\nimport os\nfrom .correlation import pearson_corr, spearmanr_corr\nfrom .data_helper import select_group\n\n\ndef remove_file(path):\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n" }, { "alpha_fraction": 0.6289620995521545, "alphanum_fraction": 0.6656308174133301, "avg_line_length": 31.836734771728516, "blob_id": "929008472630fdf635cba821106fb912cf41f0c5", "content_id": "d99b946e6893eb24287dce6f23965fb054385454", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1609, "license_type": "permissive", "max_line_length": 112, "num_lines": 49, "path": "/stereo/utils/correlation.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:correlation.py\n@time:2021/03/11\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\n\ndef pearson(arr1, arr2):\n \"\"\"\n calculate pearson correlation between two numpy arrays.\n :param arr1: one array, the feature is a column. the shape is `m * n`\n :param arr2: the other array, the feature is a column. the shape is `m * k`\n :return: a pearson score np.array , the shape is `k * n`\n \"\"\"\n assert arr1.shape[0] == arr2.shape[0]\n n = arr1.shape[0]\n sums = np.multiply.outer(arr2.sum(0), arr1.sum(0))\n stds = np.multiply.outer(arr2.std(0), arr1.std(0))\n return (arr2.T.dot(arr1) - sums / n) / stds / n\n\n\ndef pearson_corr(df1, df2):\n \"\"\"\n calculate pearson correlation between two dataframes.\n :param df1: one dataframe\n :param df2: the other dataframe\n :return: a pearson score dataframe, the index is the columns of `df1`, the columns is the columns of `df2`\n \"\"\"\n v1, v2 = df1.values, df2.values\n corr_matrix = pearson(v1, v2)\n return pd.DataFrame(corr_matrix, df2.columns, df1.columns)\n\n\ndef spearmanr_corr(df1, df2):\n \"\"\"\n calculate pearson correlation between two dataframes.\n :param df1: one dataframe\n :param df2: the other dataframe\n :return: a spearmanr score dataframe, the index is the columns of `df1`, the columns is the columns of `df2`\n \"\"\"\n score, pvalue = stats.spearmanr(df1.values, df2.values)\n score = score[df1.shape[1]:, 0:df1.shape[1]]\n return pd.DataFrame(score, df2.columns, df1.columns)\n" }, { "alpha_fraction": 0.5926466584205627, "alphanum_fraction": 0.5964400172233582, "avg_line_length": 35.07368469238281, "blob_id": "88d22d27b7f2d3e25e5c1a50cc04b09fd00b9a0c", "content_id": "dd127ef9b878183a451a018bf664e3130c5144eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3427, "license_type": "permissive", "max_line_length": 118, "num_lines": 95, "path": "/stereo/core/stereo_result.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:stereo_result.py\n@time:2021/03/18\n\"\"\"\nfrom typing import Optional\nimport numpy as np\nimport pandas as pd\nfrom ..log_manager import logger\n\n\nclass StereoResult(object):\n def __init__(self, name: str = 'stereo', param: Optional[dict] = None):\n self.name = name\n self.params = {} if param is None else param\n\n def update_params(self, v):\n self.params = v\n\n def __str__(self):\n class_info = f'{self.__class__.__name__} of {self.name}. \\n'\n class_info += f' params: {self.params}\\n'\n return class_info\n\n def __repr__(self):\n return self.__str__()\n\n\nclass DimReduceResult(StereoResult):\n def __init__(self, name: str = 'dim_reduce', param: Optional[dict] = None, x_reduce: Optional[np.ndarray] = None,\n variance_pca: Optional[np.ndarray] = None, variance_ratio: Optional[np.ndarray] = None,\n pcs: Optional[np.ndarray] = None):\n super(DimReduceResult, self).__init__(name, param)\n self.x_reduce = x_reduce\n self.variance_pca = variance_pca\n self.variance_ratio = variance_ratio\n self.pcs = pcs\n\n\nclass FindMarkerResult(StereoResult):\n def __init__(self, name: str = 'find_marker', param: Optional[dict] = None,\n degs_data: Optional[pd.DataFrame] = None):\n super(FindMarkerResult, self).__init__(name, param)\n self.degs_data = degs_data\n\n def __str__(self):\n info = super(FindMarkerResult, self).__str__()\n if self.degs_data is not None:\n info += f' result: a DataFrame which has `genes`,`pvalues`,`pvalues_adj`, `log2fc`, `score` columns.\\n'\n info += f' the shape is: {self.degs_data.shape}'\n return info\n\n def top_k_marker(self, top_k_genes=10, sort_key='pvalues', ascend=False):\n \"\"\"\n obtain the first k significantly different genes\n :param top_k_genes: the number of top k\n :param sort_key: sort by the column\n :param ascend: the ascend order of sorting.\n :return:\n \"\"\"\n if self.degs_data is not None:\n top_k_data = self.degs_data.sort_values(by=sort_key, ascending=ascend).head(top_k_genes)\n return top_k_data\n else:\n logger.warning('the result of degs is None, return None.')\n return None\n\n\nclass CellTypeResult(StereoResult):\n def __init__(self, name='cell_type_anno', param=None, anno_data=None):\n super(CellTypeResult, self).__init__(name=name, param=param)\n self.anno_data = anno_data\n\n def __str__(self):\n info = super(CellTypeResult, self).__str__()\n if self.anno_data is not None:\n info += f' result: a DataFrame which has `cells`,`cell type`,`corr_score` columns.\\n'\n info += f' the shape is: {self.anno_data.shape}'\n return info\n\n\nclass ClusterResult(StereoResult):\n def __init__(self, name='cluster', param=None, cluster_info=None):\n super(ClusterResult, self).__init__(name=name, param=param)\n self.cluster = cluster_info\n\n def __str__(self):\n info = super(ClusterResult, self).__str__()\n if self.cluster is not None:\n info += f' result: a DataFrame which has `cells`,`cluster` columns.\\n'\n info += f' the shape is: {self.cluster.shape}'\n return info\n" }, { "alpha_fraction": 0.5367646813392639, "alphanum_fraction": 0.6102941036224365, "avg_line_length": 16, "blob_id": "91821cce15c5c57d0d9f7879ebe521c065656519", "content_id": "726bfa3d8ddd4c38dd355f36eb882616ac52e353", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "permissive", "max_line_length": 25, "num_lines": 8, "path": "/stereo/core/__init__.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Ping Qiu [email protected]\n@last modified by: Ping Qiu\n@file:__init__.py.py\n@time:2021/03/17\n\"\"\"\n" }, { "alpha_fraction": 0.5546501278877258, "alphanum_fraction": 0.5627103447914124, "avg_line_length": 31.63005828857422, "blob_id": "c3432564975ba70a1548330bb225b99d38d4b4bf", "content_id": "050413ffad7b852cc1e8d0feae37c28313c3be0a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11290, "license_type": "permissive", "max_line_length": 101, "num_lines": 346, "path": "/stereo/plots/_plot_basic/heatmap_plt.py", "repo_name": "leying95/stereopy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding: utf-8\n\"\"\"\n@author: Shixu He [email protected]\n@last modified by: Shixu He\n@file:heatmap_plt.py\n@time:2021/03/15\n\"\"\"\n\nfrom matplotlib.cm import get_cmap\nfrom matplotlib.axes import Axes\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\nfrom matplotlib import gridspec\n\nfrom anndata import AnnData\nimport numpy as np\nimport pandas as pd\n\nfrom typing import List, Iterable, Sequence, Optional, Tuple\nfrom typing_extensions import Literal\n\nfrom ...log_manager import logger\n\n\ndef heatmap(df: pd.DataFrame = None, ax: Axes = None, cmap=None, norm=None, plot_colorbar=False,\n colorbar_ax: Axes = None, show_labels=True, plot_hline=False, **kwargs):\n \"\"\"\n :param df:\n :param ax:\n :param cmap:\n :param norm:\n :param plot_colorbar:\n :param colorbar_ax:\n :param show_labels:\n :param plot_hline:\n :param kwargs:\n :return:\n \"\"\"\n\n if norm == None:\n norm = Normalize(vmin=None, vmax=None)\n if (plot_colorbar and colorbar_ax == None):\n logger.warning(\"Colorbar ax is not provided.\")\n plot_colorbar = False\n\n kwargs.setdefault('interpolation', 'nearest')\n\n im = ax.imshow(df.values, aspect='auto', norm=norm, **kwargs)\n\n ax.set_ylim(df.shape[0] - 0.5, -0.5)\n ax.set_xlim(-0.5, df.shape[1] - 0.5)\n ax.tick_params(axis='y', left=False, labelleft=False)\n ax.set_ylabel('')\n ax.grid(False)\n\n if show_labels:\n ax.tick_params(axis='x', labelsize='small')\n ax.set_xticks(np.arange(df.shape[1]))\n ax.set_xticklabels(list(df.columns), rotation=90)\n else:\n ax.tick_params(axis='x', labelbottom=False, bottom=False)\n\n if plot_colorbar:\n plt.colorbar(im, cax=colorbar_ax)\n\n if plot_hline:\n line_coord = (\n np.cumsum(df.index.value_counts(sort=False))[:-1] - 0.5\n )\n ax.hlines(\n line_coord,\n -0.5,\n df.shape[1] - 0.5,\n lw=1,\n color='black',\n zorder=10,\n clip_on=False,\n )\n\n\ndef plot_categories_as_colorblocks(\n groupby_ax: Axes,\n obs_tidy: pd.DataFrame,\n colors=None,\n orientation: Literal['top', 'bottom', 'left', 'right'] = 'left',\n cmap_name: str = 'tab20',\n):\n \"\"\"from scanpy\"\"\"\n\n groupby = obs_tidy.index.name\n from matplotlib.colors import ListedColormap, BoundaryNorm\n\n if colors is None:\n groupby_cmap = plt.get_cmap(cmap_name)\n else:\n groupby_cmap = ListedColormap(colors, groupby + '_cmap')\n norm = BoundaryNorm(np.arange(groupby_cmap.N + 1) - 0.5, groupby_cmap.N)\n\n # determine groupby label positions such that they appear\n # centered next/below to the color code rectangle assigned to the category\n value_sum = 0\n ticks = [] # list of centered position of the labels\n labels = []\n label2code = {} # dictionary of numerical values asigned to each label\n for code, (label, value) in enumerate(\n obs_tidy.index.value_counts(sort=False).iteritems()\n ):\n ticks.append(value_sum + (value / 2))\n labels.append(label)\n value_sum += value\n label2code[label] = code\n\n groupby_ax.grid(False)\n\n if orientation == 'left':\n groupby_ax.imshow(\n np.array([[label2code[lab] for lab in obs_tidy.index]]).T,\n aspect='auto',\n cmap=groupby_cmap,\n norm=norm,\n )\n if len(labels) > 1:\n groupby_ax.set_yticks(ticks)\n groupby_ax.set_yticklabels(labels)\n\n # remove y ticks\n groupby_ax.tick_params(axis='y', left=False, labelsize='small')\n # remove x ticks and labels\n groupby_ax.tick_params(axis='x', bottom=False, labelbottom=False)\n\n # remove surrounding lines\n groupby_ax.spines['right'].set_visible(False)\n groupby_ax.spines['top'].set_visible(False)\n groupby_ax.spines['left'].set_visible(False)\n groupby_ax.spines['bottom'].set_visible(False)\n\n groupby_ax.set_ylabel(groupby)\n else:\n groupby_ax.imshow(\n np.array([[label2code[lab] for lab in obs_tidy.index]]),\n aspect='auto',\n cmap=groupby_cmap,\n norm=norm,\n )\n if len(labels) > 1:\n groupby_ax.set_xticks(ticks)\n if max([len(str(x)) for x in labels]) < 3:\n # if the labels are small do not rotate them\n rotation = 0\n else:\n rotation = 90\n groupby_ax.set_xticklabels(labels, rotation=rotation)\n\n # remove x ticks\n groupby_ax.tick_params(axis='x', bottom=False, labelsize='small')\n # remove y ticks and labels\n groupby_ax.tick_params(axis='y', left=False, labelleft=False)\n\n # remove surrounding lines\n groupby_ax.spines['right'].set_visible(False)\n groupby_ax.spines['top'].set_visible(False)\n groupby_ax.spines['left'].set_visible(False)\n groupby_ax.spines['bottom'].set_visible(False)\n\n groupby_ax.set_xlabel(groupby)\n\n return label2code, ticks, labels, groupby_cmap, norm\n\n\ndef plot_gene_groups_brackets(\n gene_groups_ax: Axes,\n group_positions: Iterable[Tuple[int, int]],\n group_labels: Sequence[str],\n left_adjustment: float = -0.3,\n right_adjustment: float = 0.3,\n rotation: Optional[float] = None,\n orientation: Literal['top', 'right'] = 'top',\n):\n \"\"\"from scanpy\"\"\"\n import matplotlib.patches as patches\n from matplotlib.path import Path\n\n # get the 'brackets' coordinates as lists of start and end positions\n\n left = [x[0] + left_adjustment for x in group_positions]\n right = [x[1] + right_adjustment for x in group_positions]\n\n # verts and codes are used by PathPatch to make the brackets\n verts = []\n codes = []\n if orientation == 'top':\n # rotate labels if any of them is longer than 4 characters\n if rotation is None and group_labels:\n if max([len(x) for x in group_labels]) > 4:\n rotation = 90\n else:\n rotation = 0\n for idx in range(len(left)):\n verts.append((left[idx], 0)) # lower-left\n verts.append((left[idx], 0.6)) # upper-left\n verts.append((right[idx], 0.6)) # upper-right\n verts.append((right[idx], 0)) # lower-right\n\n codes.append(Path.MOVETO)\n codes.append(Path.LINETO)\n codes.append(Path.LINETO)\n codes.append(Path.LINETO)\n\n try:\n group_x_center = left[idx] + float(right[idx] - left[idx]) / 2\n gene_groups_ax.text(\n group_x_center,\n 1.1,\n group_labels[idx],\n ha='center',\n va='bottom',\n rotation=rotation,\n )\n except:\n pass\n else:\n top = left\n bottom = right\n for idx in range(len(top)):\n verts.append((0, top[idx])) # upper-left\n verts.append((0.15, top[idx])) # upper-right\n verts.append((0.15, bottom[idx])) # lower-right\n verts.append((0, bottom[idx])) # lower-left\n\n codes.append(Path.MOVETO)\n codes.append(Path.LINETO)\n codes.append(Path.LINETO)\n codes.append(Path.LINETO)\n\n try:\n diff = bottom[idx] - top[idx]\n group_y_center = top[idx] + float(diff) / 2\n if diff * 2 < len(group_labels[idx]):\n # cut label to fit available space\n group_labels[idx] = group_labels[idx][: int(diff * 2)] + \".\"\n gene_groups_ax.text(\n 0.6,\n group_y_center,\n group_labels[idx],\n ha='right',\n va='center',\n rotation=270,\n fontsize='small',\n )\n except Exception as e:\n print('problems {}'.format(e))\n pass\n\n path = Path(verts, codes)\n\n patch = patches.PathPatch(path, facecolor='none', lw=1.5)\n\n gene_groups_ax.add_patch(patch)\n gene_groups_ax.grid(False)\n gene_groups_ax.axis('off')\n # remove y ticks\n gene_groups_ax.tick_params(axis='y', left=False, labelleft=False)\n # remove x ticks and labels\n gene_groups_ax.tick_params(\n axis='x', bottom=False, labelbottom=False, labeltop=False\n )\n\n\ndef _check_indices(\n dim_df: pd.DataFrame,\n alt_index: pd.Index,\n dim: \"Literal['obs', 'var']\",\n keys: List[str],\n alias_index: pd.Index = None,\n use_raw: bool = False,\n):\n \"\"\"from scanpy\"\"\"\n if use_raw:\n alt_repr = \"adata.raw\"\n else:\n alt_repr = \"adata\"\n\n alt_dim = (\"obs\", \"var\")[dim == \"obs\"]\n\n alias_name = None\n if alias_index is not None:\n alt_names = pd.Series(alt_index, index=alias_index)\n alias_name = alias_index.name\n alt_search_repr = f\"{alt_dim}['{alias_name}']\"\n else:\n alt_names = pd.Series(alt_index, index=alt_index)\n alt_search_repr = f\"{alt_dim}_names\"\n\n col_keys = []\n index_keys = []\n index_aliases = []\n not_found = []\n\n # check that adata.obs does not contain duplicated columns\n # if duplicated columns names are present, they will\n # be further duplicated when selecting them.\n if not dim_df.columns.is_unique:\n dup_cols = dim_df.columns[dim_df.columns.duplicated()].tolist()\n raise ValueError(\n f\"adata.{dim} contains duplicated columns. Please rename or remove \"\n \"these columns first.\\n`\"\n f\"Duplicated columns {dup_cols}\"\n )\n\n if not alt_index.is_unique:\n raise ValueError(\n f\"{alt_repr}.{alt_dim}_names contains duplicated items\\n\"\n f\"Please rename these {alt_dim} names first for example using \"\n f\"`adata.{alt_dim}_names_make_unique()`\"\n )\n\n # use only unique keys, otherwise duplicated keys will\n # further duplicate when reordering the keys later in the function\n for key in np.unique(keys):\n if key in dim_df.columns:\n col_keys.append(key)\n if key in alt_names.index:\n raise KeyError(\n f\"The key '{key}' is found in both adata.{dim} and {alt_repr}.{alt_search_repr}.\"\n )\n elif key in alt_names.index:\n val = alt_names[key]\n if isinstance(val, pd.Series):\n # while var_names must be unique, adata.var[gene_symbols] does not\n # It's still ambiguous to refer to a duplicated entry though.\n assert alias_index is not None\n raise KeyError(\n f\"Found duplicate entries for '{key}' in {alt_repr}.{alt_search_repr}.\"\n )\n index_keys.append(val)\n index_aliases.append(key)\n else:\n not_found.append(key)\n if len(not_found) > 0:\n raise KeyError(\n f\"Could not find keys '{not_found}' in columns of `adata.{dim}` or in\"\n f\" {alt_repr}.{alt_search_repr}.\"\n )\n\n return col_keys, index_keys, index_aliases\n" } ]
16
5l1v3r1/gg-1
https://github.com/5l1v3r1/gg-1
cc80c441841295c81fd02ec4e3fb946c5d7fd1d3
59d6c5e2f7cae749e5c2918f6e745ab17289c2c1
24ebc32fbefaab193b2d8ad66d3736981c932ced
refs/heads/master
2022-04-20T17:54:24.132367
2020-04-21T05:02:56
2020-04-21T05:02:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 13.5, "blob_id": "11abda5e3fa3b52ec2a707a788d639c69d9e2244", "content_id": "7a9e5be7a71c2582b80934cf4e939b10c82f1b10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/ty.py", "repo_name": "5l1v3r1/gg-1", "src_encoding": "UTF-8", "text": "print('HW')\nprint('TESTING')\n" } ]
1
vaibhavj08/Limitingservice
https://github.com/vaibhavj08/Limitingservice
d1592899789ead7524e71f4545d14f3ac26d2a03
b383103dee5efba59394f3403de206b7ab0f4121
7bec193bcf445e3cf959b8b6c56f261a71e2a1e5
refs/heads/master
2023-08-05T20:37:45.947169
2020-07-17T11:57:33
2020-07-17T11:57:33
277,577,260
1
0
null
2020-07-06T15:18:26
2020-07-17T12:01:01
2021-09-22T19:26:13
Python
[ { "alpha_fraction": 0.4639175236225128, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 15.166666984558105, "blob_id": "a93c311ef955061e185e085c0e9bca17fc303c3a", "content_id": "2647df0b927943297ea236a6b9409a4469cbb284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 97, "license_type": "no_license", "max_line_length": 16, "num_lines": 6, "path": "/requirements.txt", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "Django==3.0.8\nrequests==2.24.0\ngunicorn==20.0.4\nfastapi==0.59.0\nratelimit==2.2.1\nuvicorn==0.11.5\n" }, { "alpha_fraction": 0.7213114500045776, "alphanum_fraction": 0.75, "avg_line_length": 17.769229888916016, "blob_id": "7e13d0158a745124e87ee31a1bd2c5f142e1cab3", "content_id": "b8cf1ab15854fee75f3cf9ace7e6d7ffa17ded31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 44, "num_lines": 13, "path": "/number.py", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\nimport random\nfrom ratelimit import limits,sleep_and_retry\n\none_minute=60\napp = FastAPI()\n\n\[email protected]('/get_number')\n@sleep_and_retry\n@limits(calls=5,period=one_minute)\ndef numbers():\n return random.randint(1,100)\n" }, { "alpha_fraction": 0.6482494473457336, "alphanum_fraction": 0.662472665309906, "avg_line_length": 27.13846206665039, "blob_id": "5bc4cd0ffd6e51c47516cc76f59a953de3f5616b", "content_id": "4e8cb2cd19f978568fe87ba60c69fce253166c9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1828, "license_type": "no_license", "max_line_length": 93, "num_lines": 65, "path": "/api_call/views.py", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nimport requests\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\n#function to signup\ndef signup(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = User.objects.create_user(username = username,password = password)\n user.save()\n return redirect('/')\n else:\n return render(request,'home.html')\n\n#function to login\ndef login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = auth.authenticate(username =username,password = password)\n\n if user is not None:\n auth.login(request,user)\n return redirect('/api_call')\n else:\n return HttpResponse(\"<h1>403 Forbidden</h1><h3>Wrong credential</h3>\")\n else:\n return render(request,'login.html')\n\n\ncount = 0\nhour_request_count=300\n\n#function to call the api\ndef api_call(request):\n if request.user.is_anonymous:\n return redirect('/')\n global count\n\n url = 'http://127.0.0.1:8000/get_number'\n try:\n response = requests.get(url,timeout=2)\n count = count+1\n print('count:',count)\n\n except requests.exceptions.ReadTimeout:\n return render(request,'api.html',{'response':'403 Call limit exausted for a minute'})\n\n return render(request,'api.html',{'response':response.text})\n\n#function to logout\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n\n\n#function to check the remaining calls in an hour\ndef remaining_call(request):\n r_call = hour_request_count-count\n return HttpResponse(r_call)" }, { "alpha_fraction": 0.7444444298744202, "alphanum_fraction": 0.7444444298744202, "avg_line_length": 17, "blob_id": "d6bca9261bc7479b1cdbace49151a8517ae11ff1", "content_id": "b1d9880dd5385fa031b513be50e878336d776bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/api_call/apps.py", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ApiCallConfig(AppConfig):\n name = 'api_call'\n" }, { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7731481194496155, "avg_line_length": 53, "blob_id": "3b2ebfa41a153b9a0751b3f8dbcf18efd1f5a6ce", "content_id": "be433755714a87dd19ca723d1b9fdec8fdf9f38c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 216, "license_type": "no_license", "max_line_length": 101, "num_lines": 4, "path": "/README.txt", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "# Limitingservice\n#run \"uvicorn number:app --reload\" and \"python manage.py runserver 8001\" in different terminal\n\nRUN pip install -r requirements.txt && uvicorn number:app --reload && python manage.py runserver 8001\n" }, { "alpha_fraction": 0.6545960903167725, "alphanum_fraction": 0.6545960903167725, "avg_line_length": 35, "blob_id": "ab7d7fcaf3360e245d01f1241988e3d15d4fefde", "content_id": "592f7fad370acaf7a23b82ab0fe5c73594a23c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 83, "num_lines": 10, "path": "/api_call/urls.py", "repo_name": "vaibhavj08/Limitingservice", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.login,name = 'login'),\n path('signup/', views.signup,name = 'signup'),\n path('api_call/', views.api_call,name = 'api_call'),\n path('api_call/remaining_call/', views.remaining_call,name = 'remaining_call'),\n path('api_call/logout/',views.logout,name = 'logout')\n]" } ]
6
abdulazeeznaji/funny-learn
https://github.com/abdulazeeznaji/funny-learn
c66b38e94b02f9b1b191d460ff150708ff21bddb
35ba422bd83a69e4ec16b1edf714d45c06af3d0c
2aac3e68c7b084a956c0045bbad0062f612a717e
refs/heads/master
2018-02-08T04:59:07.083135
2017-07-10T15:49:37
2017-07-10T15:49:37
96,327,213
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7755555510520935, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 25.352941513061523, "blob_id": "90a133788ded10d3e3deaf8521f3633e403f332d", "content_id": "720ab063eab3a65269e0b259f3a30b1acb056c90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/myapp/blog/views.py", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\n\n# Create your views here.\n\nfrom blog.models import Blog\nfrom rest_framework import generics, viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom blog.serializers import BlogSerializer\n \n \nclass Blogs(viewsets.ModelViewSet):\n queryset = Blog.objects.all()\n serializer_class = BlogSerializer\n\n\n" }, { "alpha_fraction": 0.6887755393981934, "alphanum_fraction": 0.6989796161651611, "avg_line_length": 25.200000762939453, "blob_id": "d0b5d45c1a62b212a496551a26d1af21bb36d7ed", "content_id": "fbe918cf218acb973c4290aae6b1c636a877a9a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 81, "num_lines": 15, "path": "/myapp/blog/models.py", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom users.models import User\n \n \nclass Blog(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n name = models.CharField(max_length=100, unique=True, blank=False, null=False)\n isComplete = models.BooleanField(default=False)\n\n \n class Meta:\n ordering = ('created',)" }, { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.5857142806053162, "avg_line_length": 16.625, "blob_id": "ff67e4313ae04304520ebe9ae02857346abb26e3", "content_id": "cdc2331ad9f80dbafc2b708142b6ca56f8f01f17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 140, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/myapp/src/lib/services.js", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "const baseUrl = 'http://127.0.0.1:8000/blogs/'\n\n\nexport const getBlogs = () =>{\n return fetch(baseUrl)\n .then(res => res.json() )\n\n}" }, { "alpha_fraction": 0.47457626461982727, "alphanum_fraction": 0.47457626461982727, "avg_line_length": 12.354838371276855, "blob_id": "5ae00bd08c6806d020662cdf855bbf8db7fb839c", "content_id": "c3760d27b6b76c04915deec088e3583aad7f85d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 413, "license_type": "no_license", "max_line_length": 46, "num_lines": 31, "path": "/myapp/src/Main.js", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport {getBlogs} from './lib/services'\n\n\n\n class Main extends React.Component{\n constructor(){\n super()\n this.state = {\n blogs:[]\n }\n }\n componentWillMount(){\n getBlogs()\n .then(blogs => this.setState({blogs}))\n }\n\n render(){\n return (\n <div>\n </div>\n\n )\n\n\n }\n\n}\n\n\nmodule.exports = Main" }, { "alpha_fraction": 0.6880733966827393, "alphanum_fraction": 0.6880733966827393, "avg_line_length": 23.22222137451172, "blob_id": "4a47f3502938fece2a1c70101e2ec444e8ccc447", "content_id": "632f6f379df3c86118104b13654e3c0d080cd368", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 56, "num_lines": 9, "path": "/myapp/blog/serializers.py", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n \nfrom blog.models import Blog\n \nclass BlogSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Blog\n fields = ('id', 'created', 'name', 'isComplete')\n" }, { "alpha_fraction": 0.7341463565826416, "alphanum_fraction": 0.7390244007110596, "avg_line_length": 33.16666793823242, "blob_id": "e371d03bbfd08f43e6eaaec7ca666f6b95733329", "content_id": "18d2f61b8d386ce8d01f5c55506bf57d1bf79083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 86, "num_lines": 12, "path": "/myapp/blog/urls.py", "repo_name": "abdulazeeznaji/funny-learn", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom blog import views\n\napi_router = routers.DefaultRouter()\napi_router.register(r'blogs', views.Blogs, base_name=\"blogs\")\n#api_router.register(r'^blogs/(?P<pk>[0-9]+)/$', views.Blogs, base_name=\"blog-detail\")\n \nurlpatterns = [\n url(r'^', include(api_router.urls)),\n]\n" } ]
6
owenvvv/Steam_helper
https://github.com/owenvvv/Steam_helper
97129ddc348bb699349b2dbbae9f1ddc80342bcc
7af451bf067a079ff271a0608f69f85cf0fa7f64
99a2e7ed0f8bf12db72824cfc416d79b122d8193
refs/heads/master
2021-05-18T19:14:33.166248
2020-03-31T02:59:46
2020-03-31T03:02:28
251,373,679
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6763636469841003, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 38.35714340209961, "blob_id": "070cf9b39620a91a425864d158114dc1bb2ca743", "content_id": "6eaed83edd4e2e6164d29aae6d37ce4830b811e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 122, "num_lines": 14, "path": "/steam-scraper/test.py", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "from scrapy.loader.processors import Compose, Join, MapCompose, TakeFirst\nimport pandas as pd\n\"\"\"\npipi = Compose(lambda x: x[0], str.upper)\nprint(pipi(['iss', 'nus', 'mtech', 'ebac']))\n\npipi = MapCompose(lambda x: x[0], str.upper)\nprint(pipi(['iss', 'nus', 'mtech', 'ebac']))\n\"\"\"\nsteam_id = pd.read_csv(\"D:\\\\NUS BA\\\\class\\\\nlp\\\\Project\\\\steam-scraper-master\\\\steam\\\\spiders\\\\steam_id.csv\", header=None)\nsteam_id = list(steam_id.iloc[:,0])\nprint(len(steam_id))\nprint(len(list(set(steam_id))))\n#steam_id .to_csv(\"steam_id.csv\",header=False,index=False)" }, { "alpha_fraction": 0.6060332655906677, "alphanum_fraction": 0.6141618490219116, "avg_line_length": 36.154361724853516, "blob_id": "85cf4caf09664ac2ea9b2dc2871cf2bb4e4edebe", "content_id": "d8eddcdf22b48569ec90d899ceefb2974130786e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5536, "license_type": "no_license", "max_line_length": 116, "num_lines": 149, "path": "/recommendegine.py", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "import pickle as pk\nimport pandas as pd\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk import pos_tag\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\nfrom scipy.spatial.distance import cosine\nimport torch\n\nmystopwords = stopwords.words(\"English\") + ['game', 'play', 'steam']\nWNlemma = nltk.WordNetLemmatizer()\nnn = ['NN', 'NNS', 'NNP', 'NNPS', 'CD']\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel = BertModel.from_pretrained('bert-base-uncased')\nmodel.eval()\n\nDoc2vec = pk.load(open('./data/des2vec.pkl', 'rb'))\nAspect = pd.read_csv('./data/Ratewithaspect.csv', index_col=0)\nAspect = Aspect.reset_index()\nTagSmall = pd.read_csv('./data/Tagsmall.csv')\nDatasmall = pd.read_csv('./data/steam_small.csv', index_col=0)\ndescrip1 = pk.load(open('./data/short_descrip.pkl', 'rb'))\nkeywords = pd.read_excel('./data/keywords.xlsx')\n\nkeywords_class = {'Gameplay': list(keywords[keywords['Gameplay'].isnull() == False]['Gameplay']),\n 'Market': list(keywords[keywords['Market'].isnull() == False]['Market']),\n 'Narrative': list(keywords[keywords['Narrative'].isnull() == False]['Narrative']),\n 'Social': list(keywords[keywords['Social'].isnull() == False]['Social']),\n 'Graphics': list(keywords[keywords['Graphics'].isnull() == False]['Graphics']),\n 'Technical': list(keywords[keywords['Technical'].isnull() == False]['Technical']),\n 'Audio': list(keywords[keywords['Audio'].isnull() == False]['Audio']),\n 'Content': list(keywords[keywords['Content'].isnull() == False]['Content'])}\nTagnames = []\nDatasmall['avgscore'] = Datasmall.apply(\n lambda row: row.positive_ratings / (row.positive_ratings + row.negative_ratings), axis=1)\n\napplist = Datasmall['appid']\nfor tag in list(TagSmall.columns):\n Tagnames.append(tag.replace('_', ' '))\n\n\ndef recommend(query, tags):\n query = query.lower()\n #print(query)\n selectaspect = []\n for key in keywords_class.keys():\n for word in keywords_class[key]:\n if word.lower() in query.split(' '):\n selectaspect.append(key)\n print(key)\n\n genre = tags.get('genre')\n for g in genre:\n query=query + ' '+ str(g)\n characters = tags.get('characters')\n for c in characters:\n query = query + ' '+ str(c)\n print(query)\n selecttag = []\n for tags in Tagnames:\n if tags in query:\n selecttag.append(tags)\n print(tags)\n status = []\n finalids = applist\n if len(selecttag) > 0:\n for tag in selecttag:\n finalids = TagSmall[(TagSmall[tag.replace(' ', '_')] > 5) & (TagSmall['appid'].isin(finalids))]['appid']\n else:\n finalids = []\n\n # 1 dont have aspect\n # 2 have aspect\n # 3 dont match\n\n if len(finalids) > 5:\n if len(selectaspect) == 0:\n status.append(1)\n status.append(selecttag[0])\n return list(\n Datasmall[Datasmall['appid'].isin(finalids)].sort_values('avgscore', ascending=False)['appid'][\n 0:5]), status\n else:\n status.append(2)\n status.append(selecttag[0])\n status.append(selectaspect[0])\n return list(\n Aspect[Aspect['gameid'].isin(finalids)].sort_values(selectaspect[0], ascending=False)['gameid'][\n 0:5]), status\n else:\n status.append(3)\n gameids = recomend_by_keyword(demand=query, dataframe=descrip1, n=5)\n if gameids!= '':\n return gameids,status\n return list(recomend_by_description(demand=query, dataframe=Doc2vec, n=5)), status\n\n\ndef recomend_by_description(demand, dataframe, n):\n print('use similar result')\n marked_text = \"[CLS] \" + demand + \" [SEP]\"\n tokenized_text = tokenizer.tokenize(marked_text)\n if len(tokenized_text) > 512:\n tokenized_text = tokenized_text[:512]\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n segments_ids = [1] * len(tokenized_text)\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n with torch.no_grad():\n encoded_layers, _ = model(tokens_tensor, segments_tensors)\n token_vecs = encoded_layers[11][0]\n sentence_embedding = torch.mean(token_vecs, dim=0)\n cos = []\n for i in range(len(dataframe)):\n tmp = cosine(sentence_embedding, dataframe.iloc[i][1])\n cos.append(tmp)\n dataframe['cos'] = cos\n dataframe.sort_values(by=['cos'], inplace=True, ascending=False, )\n return dataframe[:n]['appid'].values\n\n\ndef pre_process(text):\n try:\n tokens = nltk.word_tokenize(text)\n tokens = [t[0] for t in pos_tag(tokens) if t[1] in nn]\n tokens = [WNlemma.lemmatize(t.lower()) for t in tokens]\n tokens = [t for t in tokens if t not in mystopwords]\n return tokens\n except Exception:\n return ('')\n\n\ndef recomend_by_keyword(demand, dataframe, n):\n demand = list(set(pre_process(demand)))\n nums = []\n for i in range(len(dataframe)):\n num = 0\n for j in range(len(demand)):\n num += dataframe.iloc[i][1].count(demand[j])\n nums.append(num)\n dataframe['nums'] = nums\n dataframe.sort_values(by=['nums'], ascending=False, inplace=True)\n if dataframe.iloc[n]['nums'] != 0:\n return list(dataframe[:n]['appid'])\n else:\n return ''\n" }, { "alpha_fraction": 0.5998193025588989, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 23.511110305786133, "blob_id": "522b7e2c3c44750c89e8ad52e066c0cec744952d", "content_id": "ba0fae03291061eec5d2a4bb2ae61f76783c02da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 86, "num_lines": 45, "path": "/main.py", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "import json\n\nfrom flask import Flask, request,render_template\nfrom geventwebsocket.handler import WebSocketHandler\nfrom gevent.pywsgi import WSGIServer\nimport intention\n\n\nhelper_session = []\n\n\napp = Flask(__name__)\n\n\[email protected]('/msg')\ndef msg():\n global helper_session\n\t# \n user_socker = request.environ.get('wsgi.websocket')\n # \n while 1:\n \t# \n msg = user_socker.receive()\n result={}\n result['message']=msg\n print(msg)\n\n r_text, new_session = intention.response(result, helper_session)\n # If only one sentence return, change it into a list.\n r_text_return=[]\n if not isinstance(r_text, list):\n r_text_return.append(r_text)\n else:\n r_text_return=r_text\n\n helper_session.extend(new_session)\n\t\t# Packed in a dict\n res = {\"msg\" : r_text_return}\n # Sent to client\n user_socker.send(json.dumps(res))\n \nif __name__ == '__main__':\t\n http_server = WSGIServer(('127.0.0.1', 5000), app, handler_class=WebSocketHandler)\n # Start Listening:\n http_server.serve_forever()\n " }, { "alpha_fraction": 0.7573735117912292, "alphanum_fraction": 0.7696447968482971, "avg_line_length": 62.6301383972168, "blob_id": "11804dd9a9c2840722082ded5a8339069a46004c", "content_id": "0b92f979c9abad125e28886eec9a29975eaa6357", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4647, "license_type": "no_license", "max_line_length": 879, "num_lines": 73, "path": "/README.md", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "# SECTION 1 : PROJECT TITLE\n### Steam Helper\n\n# SECTION 2 : EXECUTIVE SUMMARY / PAPER ABSTRACT\nSteam Helper is a Chatbot that can recommend steam games for users in two ways. For users who have forgotten the game name, the helper will find out the most similar games according to the paragraphs they provided. For users who are looking for new games, the helper will recommend games according to their preferences in category, price, age and others.\n\nThere are two parts in the web chat interface based on Flask Socket, Language Understanding and Language Generation. In the first part, we applied Support Vector Machines to detect user intentions and Conditional Random Field to extract the key aspect of the user requirements. If the user is asking for recommendation or matching, the aspects or the game description will be passed to the backend recommendation engine. The response for other types of intentions will be collected from the preset corpus following a set of rules.\n\nThe recommend engine provides three solutions in different levels. In the document level, we classified untagged user reviews into two general categories (recommended and unrecommended). Normally, each review has 10 sentences, although the tone of the entire document is positive, there are still negative complaints about certain aspects in some sentences. Therefore, in the sentence level, Entity & Aspect Mining and Sentiment Mining will be used to further discover the user’s attitudes toward different perspectives of the game. These sentiments will be quantified as scores for evaluating games. The games with high rating in the aspects emphasized by user will be recommended. If user a paragraph of game description for matching, in this level, deep learning model will be used for sentence embedding. The recommended games will be those with the highest similarity score.\n\n\n# SECTION 3 : CREDITS / PROJECT CONTRIBUTION\n| Official Full Name | Student ID (MTech Applicable)| Work Items (Who Did What) | Email (Optional) |\n| :---: | :---: | :---: | :---: |\n| Guanlan Jiang | A0198451W | Language Understanding and Generation | [email protected] |\n| Li Tiancheng | A0198530Y | Recommendation Engine | [email protected] |\n| Ng Siew Pheng | A0198525R | Language Understanding and Generation | [email protected] |\n| Ruowen Li | A0198423X | UI, Recommendation Engine | [email protected] |\n\n\n# SECTION 4 : VIDEO OF SYSTEM MODELLING & USE CASE DEMO\nhttps://drive.google.com/file/d/1Vb8sBB_PotAl4JL2sbHBVcyx84CDhVRv/view\n\n# SECTION 5 : INSTRUCTION GUIDE\n\n## Conda Environment\nTo set up the environment\n1. git clone https://github.com/owenvvv/Steam_helper.git\n2. cd PLP-CA10-Steam_Helper\n3. conda env create --file environment.yaml\n4. conda activate Steam_Helper\n5. python -m spacy download en_core_web_sm\n6. python -m nltk.downloader \n7. conda install pytorch -c pytorch\n\n## Start Chatbot\n1. python main.py \n2. Use index.html in Front-end folder to chat with chatbox\n# SECTION 6 : Models\n\n## Intent Detection\n\n### Data\nFilename: Code/data/intent_queries.json\n- This fields contains a list of questions (column - Query) with its intent label (column - Intent)\n- A total of 13 intents namely:\n\t- commonQ (.how, .assist, .name) > general chitchat questions to the chatbot\n\t- recommend (including .price, .age) > ask chatbot to recommend games. .price and .age is to detect user asking for aspect of the game recommended\n\t- response.abusive > abusive comments from user. this will trigger a positive response\n\t- response.negative > negative sentiment feedback from user about the chat\n\t\n- If new questions with its intent label is added, we need to re-train the intent detection model. \n\n### Train Model\n- Please use model/build_intent_model_json.ipynb to train the model\n- The model will be saved to intent_SGDClassifier_v2.pkl file to be used by the chatbot app.\n\n## Slot Detection\n\n### Data\nFilename: Code/data/intent_queries.json\n- This fields contains a list of questions (column - Query) with its intent label (column - Intent)\n- A total of 9 intents namely:\n\t- commonQ (.how, .assist, .name) > general chitchat questions to the chatbot\n\t- recommend (including .price, .age) > ask chatbot to recommend games. .price and .age is to detect user asking for aspect of the game recommended\n\t- response.abusive > abusive comments from user. this will trigger a positive response\n\t- response.negative > negative sentiment feedback from user about the chat\n\t\n- If new questions with its intent label is added, we need to re-train the intent detection model. \n\n### Train model\n- Please use Code/model/SlotFillerCRF.ipynb to train the model\n- The model will be saved to recommend_game.crfsuite file to be used by the chatbot app.\n" }, { "alpha_fraction": 0.7133382558822632, "alphanum_fraction": 0.7397445440292358, "avg_line_length": 41.40625, "blob_id": "772ae11249aa5e774395b89a9d3f6128bfb9f2a8", "content_id": "6b9fc65581ce8eb05444924a15073789b33ee3a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8146, "license_type": "no_license", "max_line_length": 374, "num_lines": 192, "path": "/steam-scraper/README.md", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "# Steam Scraper\n\nThis repository contains [Scrapy](https://github.com/scrapy/scrapy) spiders for **crawling products** and **scraping all user-submitted reviews** from the [Steam game store](https://steampowered.com).\nA few scripts for more easily managing and deploying the spiders are included as well.\n\nThis repository contains code accompanying the *Scraping the Steam Game Store* article published on the [Scrapinghub blog](https://blog.scrapinghub.com/2017/07/07/scraping-the-steam-game-store-with-scrapy/) and the [Intoli blog](https://intoli.com/blog/steam-scraper/).\n\n## Installation\n\nAfter cloning the repository with\n```bash\ngit clone [email protected]:prncc/steam-scraper.git\n```\nstart and activate a Python 3.6+ virtualenv with\n```bash\ncd steam-scraper\nvirtualenv -p python3.6 env\n. env/bin/activate\n```\nInstall Python requirements via:\n```bash\npip install -r requirements.txt\n```\n\nBy the way, on macOS you can install Python 3.6 via [homebrew](https://brew.sh):\n ```bash\n brew install python3\n```\nOn Ubuntu you can use [instructions posted on askubuntu.com](https://askubuntu.com/questions/865554/how-do-i-install-python-3-6-using-apt-get).\n\n## Crawling the Products\n\nThe purpose of `ProductSpider` is to discover product pages on the [Steam product listing](http://store.steampowered.com/search/?sort_by=Released_DESC) and extract useful metadata from them.\nA neat feature of this spider is that it automatically navigates through Steam's age verification checkpoints.\nYou can initiate the multi-hour crawl with\n```bash\nscrapy crawl products -o output/products_all.jl --logfile=output/products_all.log --loglevel=INFO -s JOBDIR=output/products_all_job -s HTTPCACHE_ENABLED=False\n```\nWhen it completes you should have metadata for all games on Steam in `output/products_all.jl`.\nHere's some example output:\n```python\n{\n 'app_name': 'Cold Fear™',\n 'developer': 'Darkworks',\n 'early_access': False,\n 'genres': ['Action'],\n 'id': '15270',\n 'metascore': 66,\n 'n_reviews': 172,\n 'price': 9.99,\n 'publisher': 'Ubisoft',\n 'release_date': '2005-03-28',\n 'reviews_url': 'http://steamcommunity.com/app/15270/reviews/?browsefilter=mostrecent&p=1',\n 'sentiment': 'Very Positive',\n 'specs': ['Single-player'],\n 'tags': ['Horror', 'Action', 'Survival Horror', 'Zombies', 'Third Person', 'Third-Person Shooter'],\n 'title': 'Cold Fear™',\n 'url': 'http://store.steampowered.com/app/15270/Cold_Fear/'\n }\n```\n\n## Extracting the Reviews\n\nThe purpose of `ReviewSpider` is to scrape all user-submitted reviews of a particular product from the [Steam community portal](http://steamcommunity.com/). \nBy default, it starts from URLs listed in its `test_urls` parameter:\n```python\nclass ReviewSpider(scrapy.Spider):\n name = 'reviews'\n test_urls = [\n \"http://steamcommunity.com/app/316790/reviews/?browsefilter=mostrecent&p=1\", # Grim Fandango\n \"http://steamcommunity.com/app/207610/reviews/?browsefilter=mostrecent&p=1\", # The Walking Dead\n \"http://steamcommunity.com/app/414700/reviews/?browsefilter=mostrecent&p=1\" # Outlast 2\n ]\n```\nIt can alternatively ingest a text file containing URLs such as\n```\nhttp://steamcommunity.com/app/316790/reviews/?browsefilter=mostrecent&p=1\nhttp://steamcommunity.com/app/207610/reviews/?browsefilter=mostrecent&p=1\nhttp://steamcommunity.com/app/414700/reviews/?browsefilter=mostrecent&p=1\n```\nvia the `url_file` command line argument:\n```bash\nscrapy crawl reviews -o reviews.jl -a url_file=url_file.txt -s JOBDIR=output/reviews\n```\nAn output sample:\n```python\n{\n 'date': '2017-06-04',\n 'early_access': False,\n 'found_funny': 5,\n 'found_helpful': 0,\n 'found_unhelpful': 1,\n 'hours': 9.8,\n 'page': 3,\n 'page_order': 7,\n 'product_id': '414700',\n 'products': 179,\n 'recommended': True,\n 'text': '3 spooky 5 me',\n 'user_id': '76561198116659822',\n 'username': 'Fowler'\n}\n```\n\nIf you want to get all the reviews for all products, `split_review_urls.py` will remove duplicate entries from `products_all.jl` and shuffle `review_url`s into several text files.\nThis provides a convenient way to split up your crawl into manageable pieces.\nThe whole job takes a few days with Steam's generous rate limits.\n\n## Deploying to a Remote Server\n\nThis section briefly explains how to run the crawl on one or more t1.micro AWS instances.\n\nFirst, create an Ubuntu 16.04 t1.micro instance and name it `scrapy-runner-01` in your `~/.ssh/config` file:\n```\nHost scrapy-runner-01\n User ubuntu\n HostName <server's IP>\n IdentityFile ~/.ssh/id_rsa\n```\nA hostname of this form is expected by the `scrapydee.sh` helper script included in this repository.\nMake sure you can connect with `ssh scrappy-runner-01`.\n\n### Remote Server Setup\n\nThe tool that will actually run the crawl is [scrapyd](http://scrapyd.readthedocs.io/en/stable/) running on the remote server.\nTo set things up first install Python 3.6:\n```bash\nsudo add-apt-repository ppa:jonathonf/python-3.6\nsudo apt update\nsudo apt install python3.6 python3.6-dev virtualenv python-pip\n```\nThen, install scrapyd and the remaining requirements in a dedicated `run` directory on the remote server: \n```bash\nmkdir run && cd run\nvirtualenv -p python3.6 env\n. env/bin/activate\npip install scrapy scrapyd botocore smart_getenv \n```\nYou can run `scrapyd` from the virtual environment with\n```bash\nscrapyd --logfile /home/ubuntu/run/scrapyd.log &\n```\nYou may wish to use something like [screen](https://www.gnu.org/software/screen/) to keep the process alive if you disconnect from the server.\n\n### Controlling the Job\n\nYou can issue commands to the scrapyd process running on the remote machine using a simple [HTTP JSON API](http://scrapyd.readthedocs.io/en/stable/index.html).\nFirst, create an egg for this project:\n```bash\npython setup.py bdist_egg\n```\nCopy the egg and your review url file to `scrapy-runner-01` via\n```bash\nscp output/review_urls_01.txt scrapy-runner-01:/home/ubuntu/run/\nscp dist/steam_scraper-1.0-py3.6.egg scrapy-runner-01:/home/ubuntu/run\n```\nand add it to scrapyd's job directory via \n```bash\nssh -f scrapy-runner-01 'cd /home/ubuntu/run && curl http://localhost:6800/addversion.json -F project=steam -F egg=@steam_scraper-1.0-py3.6.egg'\n```\nOpening port 6800 to TCP traffic coming from your home IP would allow you to issue this command without going through SSH.\nIf this command doesn't work, you may need to edit `scrapyd.conf` to contain\n```\nbind_address = 0.0.0.0\n```\nin the `[scrapyd]` section.\nThis is a good time to mention that there exists a [scrapyd-client](https://github.com/scrapy/scrapyd-client) project for deploying eggs to scrapyd equipped servers.\nI chose not to use it because it doesn't know about servers already set up in `~/.ssh/config` and so requires repetitive configuration.\n\nFinally, start the job with something like\n```bash\nssh scrapy-runner-01 'curl http://localhost:6800/schedule.json -d project=steam -d spider=reviews -d url_file=\"/home/ubuntu/run/review_urls_01.txt\" -d jobid=part_01 -d setting=FEED_URI=\"s3://'$STEAM_S3_BUCKET'/%(name)s/part_01/%(time)s.jl\" -d setting=AWS_ACCESS_KEY_ID='$AWS_ACCESS_KEY_ID' -d setting=AWS_SECRET_ACCESS_KEY='$AWS_SECRET_ACCESS_KEY' -d setting=LOG_LEVEL=INFO'\n```\nThis command assumes you have set up an S3 bucket and the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.\nIt should be pretty easy to customize it for non-S3 output, however.\n\nThe `scrapydee.sh` helper script included in the `scripts` directory of this repository has some shortcuts for issuing commands to scrapyd-equipped servers with hostnames of the form `scrapy-runner-01`.\nFor example, the command\n```bash\n./scripts/scrapydee.sh status 1\n# Executing status()...\n# On server(s): 1.\n```\nwill run the `status()` function defined in `scrapydee.sh` on `scrapy-runner-01`.\nSee that file for more command examples.\nYou can also run each of the included commands on multiple servers:\nFirst, change the `all()` function within `scrapydee.sh` to match the number of servers you have configured.\nThen, issue a command such as\n```bash\n./scripts/scrapydee.sh status all\n```\nThe output is a bit messy, but it's a quick and easy way to run this job.\n" }, { "alpha_fraction": 0.5150588154792786, "alphanum_fraction": 0.5317646861076355, "avg_line_length": 30.71641731262207, "blob_id": "0df52da082c9e342e3693867fefc1901162a9a3e", "content_id": "abe8619c41a52cd2e40a99213e12bb1f29e8e8d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4250, "license_type": "no_license", "max_line_length": 124, "num_lines": 134, "path": "/slotfiller.py", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "import pycrfsuite\nimport en_core_web_sm\nimport nltk\nwnl = nltk.WordNetLemmatizer()\nnlp = en_core_web_sm.load()\n\ndef input_prep(text):\n data_List = []\n\n for sequence in text:\n wordList=[]\n posList=[]\n tagList = []\n sentlist=[]\n\n text = sequence.strip().lower()\n tokens = nltk.word_tokenize(text)\n tokens = [wnl.lemmatize(t.lower(), pos='v') for t in tokens]\n text = \" \".join(tokens)\n tokenList = text.split()\n\n for tok in tokenList:\n wordList.append(tok)\n tagList.append('O')\n\n sent = ' '.join(wordList)\n sent_nlp = nlp(sent) #POS tag\n\n for token in sent_nlp:\n posList.append(token.tag_) #retrieve tag\n\n for idx,word in enumerate(wordList):\n sentlist.append((word,posList[idx],tagList[idx]))\n\n data_List.append(sentlist)\n return data_List\n\n\ndef word2features(sent, i): # function to create feature vector to represent each word\n word = sent[i][0]\n postag = sent[i][1]\n features = [ # for all words\n 'bias',\n 'word.lower=' + word.lower(),\n # 'word[-3:]=' + word[-3:],\n 'word.isupper=%s' % word.isupper(),\n 'word.istitle=%s' % word.istitle(),\n 'word.isdigit=%s' % word.isdigit(),\n 'postag=' + postag,\n 'postag[:2]=' + postag[:2], # what is the POS tag for the next 2 word token\n ]\n if i > 0: # if not <S>\n word1 = sent[i - 1][0]\n postag1 = sent[i - 1][1]\n features.extend([\n '-1:word.lower=' + word1.lower(),\n '-1:word.istitle=%s' % word1.istitle(),\n '-1:word.isupper=%s' % word1.isupper(),\n '-1:word.isdigit=%s' % word1.isdigit(),\n '-1:postag=' + postag1,\n '-1:postag[:2]=' + postag1[:2],\n ])\n else:\n features.append('BOS') # beginning of statement\n\n if i < len(sent) - 1: # if not <\\S>\n word1 = sent[i + 1][0]\n postag1 = sent[i + 1][1]\n features.extend([\n '+1:word.lower=' + word1.lower(),\n '+1:word.istitle=%s' % word1.istitle(),\n '+1:word.isupper=%s' % word1.isupper(),\n '+1:word.isdigit=%s' % word1.isdigit(),\n '+1:postag=' + postag1,\n '+1:postag[:2]=' + postag1[:2],\n ])\n else:\n features.append('EOS')\n\n return features\n\n\ndef sent2features(sent):\n return [word2features(sent, i) for i in range(len(sent))]\n\n\ndef sent2labels(sent):\n return [label for token, postag, label in sent]\n\n\ndef sent2tokens(sent):\n return [token for token, postag, label in sent]\n\ndef extract(text):\n tagger = pycrfsuite.Tagger()\n tagger.open('model/recommend_game.crfsuite')\n text_split = text.replace(' and', '.').split('.')\n sentence = input_prep(text_split)\n features = [sent2features(s) for s in sentence]\n tagList = [tagger.tag(s) for s in features]\n print(tagList)\n for idx_sent, sent in enumerate(tagList):\n for idx_word, word in enumerate(sent):\n if word != 'O':\n words = sentence[idx_sent][idx_word]\n words_new = (words[0], words[2], word)\n sentence[idx_sent][idx_word] = words_new\n #print(sentence)\n ratingList = []\n genreList = []\n priceList = []\n ageList = []\n characterList = []\n for idx_sent, sent in enumerate(sentence):\n for idx_word, word in enumerate(sent):\n if 'genre' in word[2]:\n genreList.append(word[0])\n elif 'age' in word[2]:\n if word[0].isdigit():\n ageList.append(word[0])\n elif 'price' in word[2]:\n if 'free' in word[0]:\n priceList.append('0')\n else:\n if word[0].replace('$','').isdigit():\n priceList.append(word[0].replace('$',''))\n elif 'rating' in word[2]:\n ratingList.append(word[0])\n elif 'character' in word[2]:\n characterList.append(word[0])\n\n entitylist = {'genre': genreList, 'age': ageList, 'price': priceList, 'rating': ratingList, 'characters': characterList}\n #print(f\"entitylist: {entitylist}\")\n return sentence, entitylist\n" }, { "alpha_fraction": 0.5550034046173096, "alphanum_fraction": 0.5632368922233582, "avg_line_length": 41.23456954956055, "blob_id": "fc1ea2e5e9124f6f666c36a0afeb0694430757e7", "content_id": "c33c4d03358c7b16bee9121f3422bfbbd7f92b08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20526, "license_type": "no_license", "max_line_length": 141, "num_lines": 486, "path": "/intention.py", "repo_name": "owenvvv/Steam_helper", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport pickle as pk\nimport re\nimport random\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nimport slotfiller as sf\nimport nltk\n\nwnl = nltk.WordNetLemmatizer()\nfrom nltk.corpus import stopwords\n\nmystopwords = stopwords.words(\"english\")\nimport recommendegine\n\nmodel_filename = 'model/intent_SGDClassifier_v2.pkl'\nclassifier_probability_threshold = 0.35\n\nprice_words = ['cheap', 'cheaper', 'cheapest']\nother_words = ['other', 'another', 'different']\n\nintent_enc = {\n 'commonQ.assist': 0,\n 'commonQ.how': 1,\n 'commonQ.name': 2,\n 'commonQ.wait': 3,\n 'recommend.game': 4,\n 'game.age': 5,\n 'game.price': 6,\n 'response.abusive': 7,\n 'response.negative': 8,\n 'response.incorrect': 9,\n 'game.release_date': 10,\n 'game.platforms\"': 11,\n 'response.positive': 12,\n 'game.details': 13\n}\n\nintent_dec = {\n -1: 'unknown',\n 0: 'commonQ.assist',\n 1: 'commonQ.how',\n 2: 'commonQ.name',\n 3: 'commonQ.wait',\n 4: 'recommend.game',\n 5: 'game.age',\n 6: 'game.price',\n 7: 'response.abusive',\n 8: 'response.negative',\n 9: 'response.incorrect',\n 10: 'game.release_date',\n 11: 'game.platforms',\n 12: 'response.positive',\n 13: 'game.details'\n}\n\ngamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n\n\ndef retrieve_last_session(session):\n last_session = ''\n if len(session) > 0:\n last_session = session[len(session) - 1] # retrieve last session details\n\n return last_session\n\n\ndef clean_text(text, lemma=True):\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('&quot;', '', text)\n text = re.sub('\\<br \\/\\>', '', text)\n text = re.sub('etc.', 'etc', text)\n # text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = re.sub('\\<br\\>', ' ', text)\n text = re.sub('\\<strong\\>', '', text)\n text = re.sub('\\<\\/strong\\>', '', text)\n text = text.strip(' ')\n if lemma:\n tokens = word_tokenize(text)\n tokens = [wnl.lemmatize(t.lower(), pos='v') for t in tokens]\n text = \" \".join(tokens)\n return text\n\n\ndef detect_intent(query):\n text = [str(query['message'])]\n queryDF = pd.DataFrame(text, columns=['Query'])\n # Load trained Intent Detection Model\n intent_model = pk.load(open(model_filename, 'rb'))\n result = intent_model.predict(queryDF.Query)\n result_proba = intent_model.predict_proba(queryDF.Query)\n classes = list(intent_model.classes_)\n class_proba = result_proba[0][classes.index(result[0])]\n # print(f\"intent: {result[0]}; probability: {class_proba}\")\n if result[0] == 4:\n if class_proba >= classifier_probability_threshold:\n intent = result[0]\n else:\n intent = -1\n else:\n intent = result[0]\n return intent\n\n\ndef response(query, helper_session):\n name_part1 = ['Hi, my name is Stella.', 'Hello, my name is Stella.']\n wait_part1 = ['Sure!', 'Of course!', 'No problem!', 'Okay.']\n wait_part2 = ['I will wait for you.', 'Whenever you are ready.', 'Write back when you are ready.',\n 'Just write back when you are ready.']\n assist_part1 = ['How can I help you?', 'What can I do for you today?', 'How can I assist you?',\n 'Do you need help finding games?', 'Would you like me to recommend you a game?']\n hru = ['Feeling great!', 'I am feeling awesome.', 'Feeling Good!', 'I am doing great']\n recmd_part1 = ['I found this game - ', 'You might be interested in this game - ',\n 'I can suggest this game - ', 'Maybe you will be interested in - ']\n recmd_part2 = ['I found this game about your requirement on <<reason>> -']\n recmd_part3 = ['You may like this <<genre>> game which is good on its <<aspect>> aspect -']\n recmd_part4 = ['I found this game - ',\n 'I would recommend the game because you like <<genre>> game - ']\n abusive_resp = ['Please refrain from using such language',\n 'Let''s be nice to each other and refrain from using such strong words']\n negative_part1 = ['I am sorry.', 'My apologise.']\n negative_part2 = ['Can you tell me what is wrong?', 'What did I get wrong?', 'How can I correct myself?',\n 'How can I fix this?']\n price_part1 = ['The price of the game is $<<price>>', 'It costs $<<price>>', '$<<price>>']\n ask4more = ['Is there anything else you would like to know?',\n 'Would you like me to know more details about this game?']\n age_part1 = ['This game is suitable for gamers age above <<age>> years old',\n 'This is suitable for gamers age <<age>> and above', 'This is for gamers above <<age>> years old.']\n date_part = ['The release date is <<release_date>>', 'It was released on <<release_date>>', '<<release_date>>']\n platform_part = ['This game supports <<platform>>', 'You can play the game on <<platform>>']\n positive_resp = ['You are welcome :)']\n unknown_part1 = ['Unfortunately,', 'Sorry,', 'Pardon me,']\n unknown_part2 = ['I did not understand.', 'I did not get it.']\n unknown_part3 = ['Can you repeat?', 'Can we try again?', 'Can you say it again?']\n\n last_session = retrieve_last_session(helper_session) # retrieve the last session details\n session_tags = {}\n session_game = {}\n session = {}\n game = {}\n resp_text = ''\n genre = ''\n if last_session != '':\n if last_session.get(\"tags\") is not None:\n session_tags.update(last_session['tags'])\n if last_session.get(\"game\") is not None:\n session_game.update(last_session['game'])\n\n query_words = str(query['message']).lower().split(' ')\n yeswords = ['yes', 'ok', 'sure']\n if 'yes' in query_words or 'ok' in query_words or 'sure' in query_words:\n last_intent = last_session['intent']\n intent = last_intent\n session.update(last_session)\n if last_intent == 'commonQ.assist':\n resp_text = 'What kind of games are you looking for? Any particular genre or price?'\n elif last_intent == 'recommend.game':\n session.update({'intent': 'game.details'})\n game = last_session['game']\n resp_text = f\"{game['Title']} is released on {game['release']} by {game['publisher']}.\"\n if game['Price'] == 0:\n resp_text = resp_text + \" It is free to play and \"\n else:\n resp_text = resp_text + f\" It costs ${game['Price']} and \"\n if game['Age'] == '0':\n resp_text = resp_text + \" suitable for all ages.\"\n elif game['Age'] < 12:\n resp_text = resp_text + f\" suitable for kids age {game['Age']} and above.\"\n else:\n resp_text = resp_text + f\" suitable for teenager age {game['Age']} and above.\"\n\n resp_temp = resp_text\n\n resp_text = []\n resp_text.append(resp_temp)\n resp_text.append('Would you like me to recommend you other similar games?')\n\n elif last_intent == 'game.details':\n try:\n session.update({'intent': 'recommend.game'})\n last_gameid = last_session['game']\n # print(last_gameid)\n gameids = last_session.get('gameids')\n print(gameids)\n gameids.remove(last_gameid['id'])\n gameid = random.choice(gameids)\n gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameImage = extract_game_summ(\n gameid)\n resp_text = []\n resp_text.append(random.choice(recmd_part1) + gameTitle + '.')\n resp_text.append(f'<img src=\"{gameImage}\" target=\"_blank\" style=\"width:100%\">' + gameSummary)\n resp_text.append(f'<a href=\"{gameURL}\" target=\"_blank\">{gameURL}</a>')\n resp_text.append(random.choice(ask4more))\n game = {'id': gameid, 'Title': gameTitle, 'URL': gameURL, 'Price': gamePrice, 'Age': gameAge,\n 'release': gameRelease, 'platform': gamePlatform, 'publisher': gamePublisher}\n session.update({'game': game})\n except Exception as e:\n resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3)\n else:\n resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3)\n else:\n intent = intent_dec[detect_intent(query)]\n print(intent)\n session = {'intent': intent, 'query': str(query['message'])}\n session.update({'tags': session_tags})\n session.update({'game': session_game})\n\n if intent == 'commonQ.how':\n resp_text = random.choice(hru)\n elif intent == 'commonQ.assist':\n resp_text = random.choice(assist_part1)\n elif intent == 'commonQ.wait':\n resp_text = random.choice(wait_part1) + ' ' + random.choice(wait_part2)\n elif intent == 'commonQ.name':\n resp_text = random.choice(name_part1) + ' ' + random.choice(assist_part1)\n elif intent == 'recommend.game':\n sent_tag, tags = sf.extract(str(query['message']))\n # manual set gameid for testing purpose. Remove once recommendation model is available\n # tags = {'genre':[], 'price':[], 'age':[], 'rating':[]}\n print(tags)\n if tags.get('genre') is not None:\n if tags['genre'] != '':\n genre = ' and '.join(str(x) for x in tags['genre'])\n for tags_word in tags['genre']:\n if tags_word == 'cheaper':\n price = session_game['Price']\n tags.update({'price': [str(price)]})\n new_tags = update_tags(tags, session_tags)\n print(f\"new tags: {new_tags}\")\n session.update({'tags': new_tags})\n\n gameids, status = recommend_game(str(query['message']), tags)\n\n session.update({'gameids': gameids})\n\n resp_text = []\n if len(gameids) == 0:\n gameids = random.sample(list(gamesDF['appid']), 5)\n status[0] = 0 # random result\n\n gameid = random.choice(gameids)\n\n gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameImage = extract_game_summ(\n gameid)\n\n if status[0] == 1:\n print(status[1])\n resp_text.append((random.choice(recmd_part4)).replace('<<genre>>', status[1]) + gameTitle + '.')\n elif status[0] == -1:\n resp_text.append((random.choice(recmd_part2)).replace('<<reason>>', status[1]) + gameTitle + '.')\n elif status[0] == 2:\n resp_text.append((random.choice(recmd_part3)).replace('<<genre>>', status[1]).replace('<<aspect>>',\n status[\n 2]) + gameTitle + '.')\n else:\n resp_text.append((random.choice(recmd_part1)) + gameTitle + '.')\n\n resp_text.append((f'<img src=\"{gameImage}\" target=\"_blank\" style=\"width:100%\">' + gameSummary))\n resp_text.append(f'<a href=\"{gameURL}\" target=\"_blank\">{gameURL}</a>')\n resp_text.append(random.choice(ask4more))\n game = {'id': gameid, 'Title': gameTitle, 'URL': gameURL, 'Price': gamePrice, 'Age': gameAge,\n 'release': gameRelease, 'platform': gamePlatform, 'publisher': gamePublisher}\n session.update({'game': game})\n elif intent == 'game.age':\n resp_text = []\n if session_game != '':\n age = extract_game_age(session_game['id'])\n # print(age)\n resp_text.append((random.choice(age_part1)).replace('<<age>>', str(age)))\n else:\n resp_text.append(\n random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3))\n\n resp_text.append(random.choice(ask4more))\n elif intent == 'game.price':\n resp_text = []\n if session_game != '':\n price = extract_game_price(session_game['id'])\n if price == 0.0:\n resp_text.append('This is a free to play game.')\n else:\n resp_text.append((random.choice(price_part1)).replace('<<price>>', str(price)))\n else:\n resp_text.append(\n random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3))\n\n resp_text.append(random.choice(ask4more))\n elif intent == 'response.abusive':\n resp_text = random.choice(abusive_resp)\n elif intent == 'response.negative':\n resp_text = random.choice(negative_part1) + ' ' + random.choice(negative_part2)\n elif intent == 'response.incorrect':\n last_intent = last_session['intent']\n last_query = last_session['query']\n if last_intent == 'response.incorrect' and 'no' in last_query.lower() and 'no' in str(query['message']):\n resp_text = 'Thank you for using Steam Helper. Have a nice day'\n else:\n resp_text = random.choice(assist_part1)\n elif intent == 'game.release_date':\n resp_text = []\n if session_game != '':\n date = extract_game_date(session_game['id'])\n resp_text.append((random.choice(date_part)).replace('<<release_date>>', str(date)))\n else:\n resp_text.append(\n random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3))\n resp_text.append(random.choice(ask4more))\n elif intent == 'game.platforms':\n resp_text = []\n if session_game != '':\n plateforms = extract_game_platform(session_game['id'])\n resp_text.append((random.choice(platform_part)).replace('<<platform>>', str(plateforms)))\n else:\n resp_text.append(\n random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3))\n resp_text.append(random.choice(ask4more))\n elif intent == 'response.positive':\n resp_text = random.choice(positive_resp)\n else:\n resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(\n unknown_part3)\n\n # Change the response to a list for seperate the response\n # print(f\"new >> session: {session}; intent: {intent}; resp_text: {resp_text}\")\n return resp_text, [session]\n\n\ndef extract_about_game(text):\n text_cleansed = clean_text(text, lemma=False)\n sentences = sent_tokenize(text_cleansed)\n text_sent = ' '.join(sentences[:2])\n return text_sent\n\n\ndef recommend_game(query, tags):\n status = []\n\n # gamesDF[\"steamspy_tags\"] = gamesDF[\"steamspy_tags\"].str.lower()\n gameslist = gamesDF\n gameids = []\n '''\n if tags.get('genre') != None:\n genre = tags.get('genre')\n genre = '|'.join(genre)\n gamelist_tmp = gamesDF[gamesDF[\"steamspy_tags\"].str.contains(genre, na=False)]\n gameids_tmp = gamelist_tmp['appid'].head(50).tolist()\n if len(gameids_tmp) > 0:\n gamelist = gamelist_tmp\n gameids = gameids_tmp\n else:\n gameids = gamelist['appid'].head(50).tolist()\n '''\n\n if tags.get('price') != None and tags['price'] != []:\n pricelimit = ' '.join(tags.get('price'))\n gameslist_tmp = gameslist[gameslist.price < int(pricelimit)]\n gameids_tmp = gameslist_tmp['appid'].head(10).tolist()\n if len(gameids_tmp) > 0:\n status.append(-1)\n status.append('price')\n gameslist = gameslist_tmp\n gameids = gameids_tmp\n\n if tags.get('age') != None and tags['age'] != []:\n agelimit = ' '.join(tags.get('age'))\n gameslist_tmp = gameslist[gameslist.required_age < int(agelimit)]\n gameids_tmp = gameslist_tmp['appid'].head(10).tolist()\n if len(gameids_tmp) > 0:\n status.append(-1)\n status.append('age')\n gameslist = gameslist_tmp\n gameids = gameids_tmp\n\n if len(gameids) > 0:\n return gameids, status\n\n try:\n gameids, status = recommendegine.recommend(query, tags)\n except Exception as e:\n print(e)\n gameids = []\n\n print(gameids)\n return gameids, status\n\n\n# Function to extract a short summary of the game\ndef extract_game_summ(gameid):\n # Game Info Columns:\n # 'appid', 'name', 'release_date', 'english', 'developer', 'publisher', 'platforms', 'required_age', 'categories', 'genres',\n # 'steamspy_tags', 'achievements', 'positive_ratings', 'negative_ratings',\n # 'average_playtime', 'median_playtime', 'owners', 'price', 'totalrating', 'about_the_game'\n\n # gamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n gameInfo = gamesDF[gamesDF['appid'] == gameid]\n gameTitle = gameInfo.iloc[0]['name']\n gameSummary = gameInfo.iloc[0]['short_description']\n # gameSummary = extract_about_game(aboutgame)\n gameURL = f'https://store.steampowered.com/app/{gameid}'\n gamePrice = gameInfo.iloc[0]['price']\n gameAge = gameInfo.iloc[0]['required_age']\n gameRelease = gameInfo.iloc[0]['release_date']\n gamePlatform = gameInfo.iloc[0]['platforms']\n gamePublisher = gameInfo.iloc[0]['publisher']\n gameimage = gameInfo.iloc[0]['header_image']\n return gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameimage\n\n\n# Function to extract price of game last recommended\ndef extract_game_price(gameid):\n gamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n gameInfo = gamesDF[gamesDF['appid'] == gameid]\n gamePrice = gameInfo.iloc[0]['price']\n return gamePrice\n\n\ndef extract_game_age(gameid):\n gamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n gameInfo = gamesDF[gamesDF['appid'] == gameid]\n gameAge = gameInfo.iloc[0]['required_age']\n return gameAge\n\n\ndef extract_game_date(gameid):\n gamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n gameInfo = gamesDF[gamesDF['appid'] == gameid]\n gameDate = gameInfo.iloc[0]['release_date']\n return gameDate\n\n\ndef extract_game_platform(gameid):\n # gamesDF = pd.read_csv(\"./data/steam_small.csv\", encoding=\"utf-8\")\n gameInfo = gamesDF[gamesDF['appid'] == gameid]\n gamePlatform = gameInfo.iloc[0]['platforms']\n return gamePlatform\n\n\ndef update_tags(tags, session_tags):\n new_tags = session_tags\n\n if session_tags.get('genre') != None:\n if tags.get('genre') != None:\n new_tags['genre'].extend(tags['genre'])\n else:\n new_tags.update({'genre': tags.get('genre')})\n\n if session_tags.get('price') != None:\n if tags.get('price') != None:\n new_tags.update({'price': tags.get('price')})\n else:\n new_tags.update({'price': tags.get('price')})\n\n if session_tags.get('age') != None:\n if tags.get('age') != None:\n new_tags['age'].extend(tags['age'])\n else:\n new_tags.update({'age': tags.get('age')})\n\n if session_tags.get('rating') != None:\n if tags.get('rating') != None:\n new_tags['rating'].extend(tags['rating'])\n else:\n new_tags.update({'rating': tags.get('rating')})\n\n if session_tags.get('characters') != None:\n if tags.get('characters') != None:\n new_tags['characters'].extend(tags['characters'])\n else:\n new_tags.update({'characters': tags.get('characters')})\n\n return new_tags\n" } ]
7
darksuney/MathProblem
https://github.com/darksuney/MathProblem
22884ab9fc0f20b371ac8c665faef1af971b6513
3a730349239afe96bf9772346da2ba09a9a70b81
ee8c6e6ba4650c8f1cc64be00f1fe1b711cee62f
refs/heads/master
2021-01-21T14:04:29.520508
2016-05-23T01:45:34
2016-05-23T01:45:34
50,146,735
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 21.66666603088379, "blob_id": "c0a08468e733b8c6301ad28270cdd39082804702", "content_id": "d31d87607e84a211b846f468cb501742b98ab9aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 68, "license_type": "no_license", "max_line_length": 55, "num_lines": 3, "path": "/README.md", "repo_name": "darksuney/MathProblem", "src_encoding": "UTF-8", "text": "# Create random math problem for kids in primary school\n\n## class1A\n" }, { "alpha_fraction": 0.43143230676651, "alphanum_fraction": 0.4662603437900543, "avg_line_length": 28.44871711730957, "blob_id": "5a19be93285f187d14f0d30ccfa852d59a3b46e5", "content_id": "4a88c40c581e9139957fd5cd066d2409c824eed6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2373, "license_type": "no_license", "max_line_length": 75, "num_lines": 78, "path": "/class1A.py", "repo_name": "darksuney/MathProblem", "src_encoding": "GB18030", "text": "# -*- coding: cp936 -*-\nimport math\nimport random\n\n#生成7-9加其它数,结果在10到20之间的算式\n\nspan= ' ';\n\ndef getRandomChar(str1):\n pos = int(math.floor(random.random()*len(str1)));\n return str1[pos];\n\ndef paddingWithSpace(str1, length):\n while(len(str1) < length):\n equation += \" \";\n\ndef getTitle():\n first = int(math.ceil(random.random()*10));\n second = int(math.ceil(random.random()*10));\n third = int(math.ceil(random.random()*10));\n op1 = getRandomChar(\"+-\");\n op2 = getRandomChar(\"+-\");\n equation = \"{0}{1}{2}{3}{4}\".format(first, op1, second, op2, third);\n if(int(eval(equation)) < 0):\n return getTitle();\n else:\n equation+=\"=\";\n return equation.ljust(12, ' ');\n\ndef printTitle(number):\n col = 0;\n line = ''\n print(\"写出得数\");\n for i in range(0, number):\n line = line + getTitle() + span\n col += 1\n if(col % 5 == 0):\n print(line)\n line = \"\"\n \n print(\"在()中填写合适的数,使等式成立\");\n for i in range(0, number):\n first = int(math.ceil(random.random()*10));\n second = first + int(math.ceil(random.random()*10));\n equation = \"{0}+( )={1}\".format(first, second).ljust(12, ' ');\n line = line + equation + span\n col += 1\n if(col % 5 == 0):\n print(line)\n line = \"\"\n\n def getTitle1():\n first = int(math.ceil(random.random()*10));\n second = int(math.ceil(random.random()*10));\n op1 = getRandomChar(\"+-\");\n equation = \"{0}{1}{2}\".format(first, op1, second);\n if(int(eval(equation)) < 1):\n return getTitle1();\n else:\n return equation;\n\n print(\"在()中填写> < =\");\n for i in range(0, number):\n eq = getTitle1();\n op2 = getRandomChar(\"+-\");\n diff = getRandomChar(\"012\");\n result = int(eval(eq+op2+diff));\n equation = \"{0}( ){1}\".format(eq, second).ljust(12, ' ');\n line = line + equation + span\n col += 1\n if(col % 5 == 0):\n print(line)\n line = \"\"\n\n print(\"\");\n\nfor i in range(1,11):\n printTitle(20);\n" }, { "alpha_fraction": 0.47472527623176575, "alphanum_fraction": 0.5186813473701477, "avg_line_length": 21.19512176513672, "blob_id": "5e33975099882c873bd2350fa8975eb4fc6a786b", "content_id": "81c4b31f49b16af4578b7bce9df1978ee894fc24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 54, "num_lines": 41, "path": "/class1B.py", "repo_name": "darksuney/MathProblem", "src_encoding": "GB18030", "text": "# -*- coding: cp936 -*-\nimport math\nimport random\n\n#生成7-9加其它数,结果在10到20之间的算式\n\nspan= ' ';\n\ndef getRandomChar(str1):\n pos = int(math.floor(random.random()*len(str1)));\n return str1[pos];\n\ndef paddingWithSpace(str1, length):\n while(len(str1) < length):\n equation += \" \";\n\ndef getTitle():\n first = 10 + int(math.ceil(random.random()*10));\n second = int(getRandomChar('6789'));\n op1 = '-';\n equation = \"{0}{1}{2}\".format(first, op1, second);\n if(int(eval(equation)) < 0):\n return getTitle();\n else:\n equation+=\"=\";\n return equation.ljust(12, ' ');\n\n\ndef printTitle(number):\n col = 0;\n line = ''\n print(\"写出得数\");\n for i in range(0, number):\n line = line + getTitle() + span\n col += 1\n if(col % 5 == 0):\n print(line)\n line = \"\"\n\nfor i in range(0,10):\n printTitle(20)\n" } ]
3
mostafa133/9.8.2019
https://github.com/mostafa133/9.8.2019
13c832ca9db34b9f535fe1773c7fea2a3ba19dba
b47e3c045039d3938cea69af9036372ba809d6c6
882530d0bf478fe81f84b8db83f3ec269c4914db
refs/heads/master
2020-07-02T04:29:16.363553
2019-08-09T09:57:38
2019-08-09T09:57:38
201,416,420
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6253185868263245, "alphanum_fraction": 0.6304163336753845, "avg_line_length": 33.55882263183594, "blob_id": "3b9689e2b683df2274ecc11dc51d847d38b6aa4f", "content_id": "a42d30a3e56e57f9f0ff32a94d1c4afd94c34a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 72, "num_lines": 34, "path": "/myClass.py", "repo_name": "mostafa133/9.8.2019", "src_encoding": "UTF-8", "text": "# exercise:\n# create Employee class\n# __init__ (self, name, address, salary)\n# create salary getter/setter to avoid negative salary\n# implement __str__\n#\n# create Manager class\n# __init__ (self, name, address, salary, numberOfEmployeesBenieth)\n# call super() ...\n# implement __str__ and call super()__str__\n# create a manager and chnage it salary to something invalid and print\n# then change it to something valid and print\nclass Employee:\n def __init__(self, name, address, salary):\n self.name = name\n self.adress = address\n self.salary = salary\n def __str__(self):\n return f'name : {self.name},' \\\n f' adress : {self.adress},' \\\n f' salary: {self.salary}'\n\nclass Manager(Employee):\n def __init__(self,name, address, salary, numberOfEmployeesBenieth ):\n #self.name = name\n #self.address = address\n #self.salary = salary\n super().__init__(name, address, salary)\n self.numberOfEmployeesBenieth = numberOfEmployeesBenieth\n def __str__(self):\n return super().__str__() + f' {self.numberOfEmployeesBenieth}'\n\nmoste = Manager('moste', 'kawkab', 8500, 98)\nprint(moste)\n\n\n" }, { "alpha_fraction": 0.5189144611358643, "alphanum_fraction": 0.5287829041481018, "avg_line_length": 24.87234115600586, "blob_id": "1796b7177513ef964a4eb82cca2a6f2637f4da4a", "content_id": "dd4f350eb2a5bd8d9513b7699cbf0a71e7ecd06b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 49, "num_lines": 47, "path": "/seeter\\getter.py", "repo_name": "mostafa133/9.8.2019", "src_encoding": "UTF-8", "text": "class Animal:\n def __init__(self, name, age):\n self.name = name\n self.__age = age\n def __str__(self):\n return f'Animal {self.name} {self.__age}'\n '''\n def getAge(self): # getter\n return self.__age\n def setAge(self, value):\n if value >= 0: # setter\n self.__age = value\n '''\n @property\n def age(self): # getter\n return self.__age\n @age.setter\n def age(self, value):\n if value >= 0: # setter\n self.__age = value\ndolphine = Animal('Lucky', 1.5)\ndolphine.age = -200\nprint(dolphine)\nלהדפיס את המילים שגדולות מ- 2 אותיות\\\\\nclass Animal:\n def __init__(self, name, age):\n self.__name = name\n self.age = age\n def __str__(self):\n return f'Animal {self.__name} {self.age}'\n '''\n def getAge(self): # getter\n return self.__age\n def setAge(self, value):\n if value >= 0: # setter\n self.__age = value\n '''\n @property\n def name(self): # getter\n return self.__name\n @name.setter\n def name(self, letter):\n if len(letter) > 2: # setter\n self.__name = letter\ndolphine = Animal('most', 1.5)\ndolphine.name = \"mo\"\nprint(dolphine.name)\n" } ]
2
gutch/OSTacct
https://github.com/gutch/OSTacct
6751f086e9e6d600aa1227266c5f2e1430b39f28
0f14347b991559d15c9ec9768eaa27111703b14e
d9788d8146c8bb125dc8109e57feb6faab42c96f
refs/heads/master
2021-01-01T19:50:40.722905
2011-10-06T19:02:14
2011-10-06T19:02:14
1,203,318
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6574074029922485, "alphanum_fraction": 0.6759259104728699, "avg_line_length": 25.75, "blob_id": "09f4c6388251e6734e90b956dd80efdd53e3adc4", "content_id": "231d5d232add71490c68bb804d1af381ff224268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 216, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/ostacct/ostacct/templates/SamAFR.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\nLearning TurboGears 2.1: Quick guide to the Quickstart pages.\n</%def>\n\n <h2>SAMPLE AFR REPORT</h2>\n <img src=\"images/samples/SampleAFR.pdf\" />\n\n\n" }, { "alpha_fraction": 0.5849405527114868, "alphanum_fraction": 0.6007925868034363, "avg_line_length": 34.02777862548828, "blob_id": "13d8eefad07ee65873f658f28750cf12591d06cd", "content_id": "d46fb25271ceb415e88dddf32aea701251d8473d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3785, "license_type": "no_license", "max_line_length": 177, "num_lines": 108, "path": "/ostacct/data/templates/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/new.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317668151.8132091\n_template_filename='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/new.mak'\n_template_uri='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/new.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['header', 'body_class', 'title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n # SOURCE LINE 2\n ns = runtime.TemplateNamespace(u'menu_items', context._clean_inheritance_tokens(), templateuri=u'tgext.crud.templates.menu_items', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, u'menu_items')] = ns\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n value = context.get('value', UNDEFINED)\n model = context.get('model', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 2\n __M_writer(u'\\n\\n')\n # SOURCE LINE 6\n __M_writer(u'\\n')\n # SOURCE LINE 10\n __M_writer(u'\\n\\n')\n # SOURCE LINE 12\n __M_writer(u'\\n\\n<div id=\"main_content\">\\n ')\n # SOURCE LINE 15\n __M_writer(escape(menu_items.menu_items('../')))\n __M_writer(u'\\n<div style=\"float:left;\" class=\"crud_add\">\\n <h2 style=\"margin:5px 0px; 4px; 0px;\">New ')\n # SOURCE LINE 17\n __M_writer(escape(model))\n __M_writer(u'</h2>\\n ')\n # SOURCE LINE 18\n __M_writer(tmpl_context.widget(value=value, action='./') )\n __M_writer(u'\\n</div>\\n<div style=\"height:0px; clear:both;\"> &nbsp; </div>\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_header(context):\n context.caller_stack._push_frame()\n try:\n parent = context.get('parent', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 7\n __M_writer(u'\\n ')\n # SOURCE LINE 8\n __M_writer(escape(menu_items.menu_style()))\n __M_writer(u'\\n ')\n # SOURCE LINE 9\n __M_writer(escape(parent.header()))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_body_class(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 12\n __M_writer(u'tundra')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n model = context.get('model', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 4\n __M_writer(u'\\n')\n # SOURCE LINE 5\n __M_writer(escape(tmpl_context.title))\n __M_writer(u' - New ')\n __M_writer(escape(model))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.6634213924407959, "alphanum_fraction": 0.7357441186904907, "avg_line_length": 54.230770111083984, "blob_id": "e979a854671b5c86e3d7dad927f326f8c38047f2", "content_id": "65cb5077abf13244e01e89a5ab120b859bb838e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 719, "license_type": "no_license", "max_line_length": 111, "num_lines": 13, "path": "/ostacct/ostacct/templates/workshops.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\nLinks 2011 Spring Workshop Powerpoints\n</%def>\n <FORM><INPUT onclick=history.go(-1) type=button value=Back></FORM>\n\n <h2>Links 2011 Spring Workshop Powerpoints</h2>\n<p> <a href=\"/man/PICKWICK LANDING STATE PARK 2011 revised(3).pdf\">2012 Pickwick spring workshop agenda</a></p>\n<p> <a href=\"/TCRSPlanUpdate2011.pdf\">Tennessee Consolidated Retirement System Powerpoint</a></p>\n<p> <a href=\"/NCLB%20Updates-2011Spring%20Fiscal%20Workshops-revised.pptx\">NCLB Fiscal Powerpoint</a></p>\n<p> <a href=\"/2011%20Local%20Finance%20Workshops.pptx\">Local Finance Fiscal Powerpoint</a></p>\n<p> <a href=\"/2011BOEGASB54.pptx\">County Audit GASB 54 Powerpoint</a></p>\n\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 17.962963104248047, "blob_id": "aa7a1f379e8df887eb4db1c4d1813a0121f827e8", "content_id": "0a283bb073715ae3a4e78ecd9feb16b918bb6750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 513, "license_type": "no_license", "max_line_length": 45, "num_lines": 27, "path": "/ostacct/ostacct/templates/schools.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<script type=\"text/javascript\">\nfunction eclub(me){\nvar schools_id=me.id;\nvar ThePath='/Chart/?schools_id='+schools_id;\nwindow.location =ThePath\n}\nfunction eact(me){\nvar schools_id=me.id;\nvar ThePath='/Chart/?schools_id='+schools_id;\nwindow.location =ThePath\n}\nfunction eacct(me){\nvar schools_id=me.id;\nvar ThePath='/Chart/?schools_id='+schools_id;\nwindow.location =ThePath\n}\n</script>\n\n<%def name=\"title()\">\n In School Accounting\n</%def>\n\n\n\n ${c.form().display() | n} \n" }, { "alpha_fraction": 0.5974185466766357, "alphanum_fraction": 0.6124769449234009, "avg_line_length": 38.65853500366211, "blob_id": "4b3233992472b46b21f89d20d1f5a11817e6e28e", "content_id": "ff772ecf8457354664aada974e996b72988e2553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3254, "license_type": "no_license", "max_line_length": 506, "num_lines": 82, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/data.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317650335.730118\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/data.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/data.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['sidebar_bottom', 'title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n tg = context.get('tg', UNDEFINED)\n params = context.get('params', UNDEFINED)\n parent = context.get('parent', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\n')\n # SOURCE LINE 5\n __M_writer(u'\\n\\n')\n # SOURCE LINE 7\n __M_writer(escape(parent.sidebar_top()))\n __M_writer(u'\\n\\n<h2>Content Type Dispatch</h2>\\n<p>\\nThis page shows how you can provide multiple pages\\ndirectly from the same controller method. This page is generated \\nfrom the expose decorator with the template defintion provided.\\nYou can provide a url with parameters and this page will display\\nthe parameters as html, and the json version will express\\nthe entries as JSON. Here, try it out: <a href=\"/data.html?a=1&b=2\">/data.html?a=1&b=2</a>\\n</p>\\n\\n<p>Click here for the <a href=\"')\n # SOURCE LINE 19\n __M_writer(escape(tg.url('/data.json', params=params)))\n __M_writer(u'\">JSON Version of this page.</a></p>\\n<p>The data provided in the template call is: \\n <table>\\n')\n # SOURCE LINE 22\n for key, value in params.iteritems():\n # SOURCE LINE 23\n __M_writer(u' <tr>\\n <td>')\n # SOURCE LINE 24\n __M_writer(escape(key))\n __M_writer(u'</td>\\n <td>')\n # SOURCE LINE 25\n __M_writer(escape(value))\n __M_writer(u'</td>\\n </tr>\\n')\n pass\n # SOURCE LINE 28\n __M_writer(u' </table>\\n\\n\\n')\n # SOURCE LINE 31\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_sidebar_bottom(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 3\n __M_writer(u'\\n Welcome to TurboGears 2.1, standing on the shoulders of giants, since 2007\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.6293546557426453, "alphanum_fraction": 0.643061101436615, "avg_line_length": 32, "blob_id": "550fb371154217590b3919fef873d69c25a0f1f7", "content_id": "8a7d804f8429dc80ebc3f30979f97e5512324823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1751, "license_type": "no_license", "max_line_length": 114, "num_lines": 53, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/eventedit.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317668520.514703\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/eventedit.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/eventedit.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n c = context.get('c', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 4\n __M_writer(u'\\n<FORM><INPUT TYPE=\"button\" VALUE=\"Back\" onClick=\"history.go(-1);return true;\"></FORM> \\n ')\n # SOURCE LINE 6\n __M_writer(c.form.display())\n __M_writer(u' \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 2\n __M_writer(u'\\nWork with Calendar events\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.6592575907707214, "alphanum_fraction": 0.6644515991210938, "avg_line_length": 43.682594299316406, "blob_id": "f736f3b7c22c4c35784eeb5af17c87c95f54f32a", "content_id": "65c46fecb488e51aa54fa8457c5a89f82bbd6cc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13092, "license_type": "no_license", "max_line_length": 180, "num_lines": 293, "path": "/ostacct/ostacct/model/schoolmodel.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "from tg import config\nfrom sqlalchemy import Table, ForeignKey, Column, types, Column, Integer, String,Date, Text, MetaData\nfrom sqlalchemy.types import String, Unicode, UnicodeText, Integer, DateTime, \\\n Boolean, Float, Numeric\nfrom sqlalchemy.orm import mapper, relation, backref, join\nfrom ostacct.model import DeclarativeBase, metadata, DBSession, auth\nfrom auth import Schools\nfrom datetime import datetime\n\nclass LastRef(DeclarativeBase):\n __tablename__=\"lastref\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n lastrecpt = Column(Integer, nullable=True)\n lastdisburse = Column(Integer, nullable=True)\n lastje = Column(Integer, nullable=True)\n def __repr__(self):\n return '<LastRef = lastref:lastrecpt = %i, lastdisburse = %i, lastje - %i>' %(self.lastrecpt,self.lastdisburse,self.lastje)\n\nclass Bank(DeclarativeBase):\n __tablename__=\"bank\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n BankName = Column(String(50), nullable =False)\n accounts_id=Column(Integer,ForeignKey('accounts.id'))\n accounts = relation('Account', backref=backref('bank'),order_by=id)\n school = relation('Schools', backref=backref('bank'))\n school_id = Column(Integer, ForeignKey('schools.id'))\n def __repr__(self):\n return self.id\n\nclass Account(DeclarativeBase):\n __tablename__=\"accounts\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n AcctNo = Column(String(6), nullable=False)\n Description = Column(String(75), nullable=False)\n SchoolWide = Column(Boolean, nullable=False)\n SchoolActivity= Column(Boolean, nullable=False)\n def __repr__(self):\n return str(self.id)\n\nclass Sub(DeclarativeBase):\n __tablename__=\"subs\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n SubNo = Column(String(5), nullable=False)\n Description = Column(String(75), nullable=False)\n \n #schools_id = relation('Schools', secondary=schools_subs, backref='subs')\n def __repr__(self):\n return str(self.id)\n\nclass TempC(DeclarativeBase):\n __tablename__=\"tempc\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n aNo = Column(String(6), nullable=False)\n Description = Column(String(75), nullable=False)\n\nclass Activities(DeclarativeBase):\n __tablename__=\"activities\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n SubNo = Column(String(5), nullable=False)\n schools_id=Column(Integer,ForeignKey('schools.id'))\n school = relation('Schools', backref=backref('activities'),order_by=id)\n subs_id=Column(Integer,ForeignKey('subs.id'))\n subs = relation('Sub', backref=backref('activities'),order_by=id)\n def __repr__(self):\n return self.SubNo\n\nclass Clubs(DeclarativeBase):\n __tablename__=\"clubs\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n SubNo = Column(String(5), nullable=False)\n schools_id=Column(Integer,ForeignKey('schools.id'))\n school = relation('Schools', backref=backref('clubs'),order_by=id)\n subs_id=Column(Integer,ForeignKey('subs.id'))\n subs = relation('Sub', backref=backref('clubs'),order_by=id)\n def __repr__(self):\n return self.SubNo \n\nclass Obj(DeclarativeBase):\n __tablename__=\"objs\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n ObjNo = Column(String(5), nullable=False)\n Description = Column(String(75), nullable=False)\n def __repr__(self):\n return self.ObjNo\n\n \nclass Chart(DeclarativeBase): \n __tablename__=\"charts\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n AcctNo=Column(String(6), nullable=False)\n fund = relation('Fund', backref=backref('charts'),order_by=id)\n fundid = Column(Integer, ForeignKey('funds.id'))\n #bankid = Column(Integer, ForeignKey('bank.id'))\n #bank = relation('Bank', backref='charts')\n schools_id=Column(Integer,ForeignKey('schools.id'))\n school = relation('Schools', backref=backref('charts'),order_by=id)\n accounts_id=Column(Integer,ForeignKey('accounts.id'))\n accounts = relation('Account', backref=backref('accounts'),order_by=id)\n\n def __repr__(self):\n return self.AcctNo\n #return '<Chart(\"%s\")>' %(sacct + \" \" + self.acctdescription)\n #return sacct + \" \" + self.acctdescription\n\nclass Typ(DeclarativeBase): \n __tablename__=\"typs\"\n id = Column(Integer, primary_key=True)\n typDescription = Column(String(50), nullable=False)\n \n def __repr__(self):\n return '<typDescription = \"%s\">' %(self.typDescription)\n\nclass Fund(DeclarativeBase): \n __tablename__=\"funds\"\n id=Column(Integer,primary_key=True)\n fundDescription = Column(String(30))\n def __repr__(self):\n #return '<Fund(\"%s\")>' % (self.fundDescription)\n return str(self.id)\n\nclass Payoree(DeclarativeBase): \n __tablename__=\"payoree\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n service=Column(Boolean,nullable=False)\n fedid=Column(String,nullable=True)\n compname= Column(String(70), nullable=False)\n contname= Column(String(70), nullable=True)\n address= Column(String(70), nullable=True)\n city= Column(String(70), nullable=True)\n state= Column(String(2), nullable=True)\n zip= Column(String(10), nullable=True)\n tel=Column(String(20), nullable=True) \n def __repr__(self):\n #return \"<Payoree('%s')>\" %(self.payoreename)\n return '<payoree: id= \"%s\", service = \"%s\", fedid=\"%s\", compname=\"%s\", contname=\"%s\", address=\"%s\", city=\"%s\", state=\"%s\", zip=\"%s\", tel=\"%s\">' \\\n % (self.id, self.service, self.fedid, self.compname, self.contname, self.address, self.city, self.state, self.zip, self.tel)\n\nclass Po(DeclarativeBase):\n __tablename__= \"po\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n payoreeid = Column(ForeignKey('payoree.id'), nullable=True)\n payoreename = relation('Payoree', order_by = id, backref = 'po')\n fund = relation('Fund', order_by=id, backref='po')\n fundid = Column(Integer, ForeignKey('funds.id'))\n ref = Column(Integer, nullable=False) \n transdate = Column(DateTime, nullable=False) \n chartid = Column(ForeignKey('charts.id'), nullable=True)\n chart = relation('Chart',order_by=Chart.id, backref='po') \n subs_id=Column(Integer,ForeignKey('subs.id'))\n subs = relation('Sub', backref=backref('po'),order_by=id)\n amount = Column(Numeric(precision=2, scale=2, asdecimal=True))\n transactid= Column(ForeignKey('transact.id'), nullable=True)\n transact = relation('Transact',order_by=Chart.id, backref='po') \n podate = Column(DateTime, nullable=False)\n #itemid= Column(ForeignKey('poitem'), nullable=True)\n # item = relation('Poitem',order_by=Poitem.id, backref='po') \n \n \n\n \n \n \nclass Deposit(DeclarativeBase):\n __tablename__=\"deposit\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n total = Column(Numeric(precision=2, scale=2, asdecimal=True))\n #def __init__(self,receipt,fund,chart,transdate,payoreeid,itemdesc,total):\n #self.receipt=receipt\n #self.fund=fund.fundDescription\n #self.chart=str(chart.acct) + \" \" + chart.acctdescription\n #self.transdate=transdate\n #self.payoree_id=payoree_id\n #self.itemdesc=itemdesc\n #self.total=total\n def __repr__(self):\n return \"<Deposit('%-d')>\" %(self.total)\n \nclass Receipt(DeclarativeBase):\n __tablename__=\"receipts\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n receipt = Column(Integer, nullable=False)\n fundDescription = relation('Fund', order_by = Fund.id, backref='deposit')\n fundid = Column(Integer, ForeignKey('funds.id'))\n chartid = Column(ForeignKey('charts.id'), nullable=True)\n chart = relation('Chart',order_by=Chart.id, backref='receipts')\n transdate = Column(DateTime, nullable=False)\n payoreeid = Column(ForeignKey('payoree.id'), nullable=True)\n payoreename = relation('Payoree', order_by = Payoree.id, backref = 'deposit')\n itemdesc = Column(String(100), nullable=True)\n #splitid = Column(ForeignKey('splits.id'), nullable=True)\n #split = relation('Split',order_by=Chart.id, backref='receipts')\n depositid = Column(ForeignKey('deposit.id'), nullable=True)\n deposit = relation('Deposit',order_by=Deposit.id, backref='receipts')\n amount = Column(Numeric(precision=2, scale=2, asdecimal=True))\n def __repr__(self):\n return \"<Receipt(%i,'%s','%s',%date,%i,'%s','%-d')>\" %(self.receipt,self.fundDescription,self.chart,self.transdate,self.payoreeid,self.itemdesc,self.deposit,self.total)\n\n\nclas_table = Table(\"clas\", metadata,\n Column(\"id\", types.Integer,autoincrement=True, primary_key=True),\n Column(\"clasyear\", types.Integer, nullable=True),\n Column(\"sponsor\", types.String(100), nullable=True),\n )\n\nclass clas(object):\n pass\n\n\nclass Transtype(DeclarativeBase): \n __tablename__= \"transtypes\"\n id = Column(Integer, primary_key=True)\n transtypeDescription = Column(String(50), nullable=False)\n \n def __repr__(self):\n return '<Transtype( = \"%s\")>' %(self.transtypeDescription)\n\nclass Poitem(DeclarativeBase): \n __tablename__=\"poitem\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n itemDescription=Column(String(150), nullable=True)\n \n\nclass Transact(DeclarativeBase):\n __tablename__=\"transact\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n transtypeid = Column(Integer, ForeignKey('transtypes.id'), nullable=False)\n transtype = relation('Transtype', order_by=id, backref = 'transact')\n fund = relation('Fund', order_by=id, backref='transact')\n fundid = Column(Integer, ForeignKey('funds.id'))\n ref = Column(Integer, nullable=False) \n transdate = Column(DateTime, nullable=False)\n #po_id = Column(ForeignKey('po.po_id'), nullable=True)\n payoreeid = Column(ForeignKey('payoree.id'), nullable=True)\n payoreename = relation('Payoree', order_by = id, backref = 'transact')\n itemdesc = Column(String(100), nullable=True)\n chart = relation('Chart', order_by = id, backref = 'transact')\n chartid = Column(ForeignKey('charts.id'), nullable=True)\n #schart_id = Column(ForeignKey('charts.id'), nullable=True)\n #account = relation('Account', order_by = id, backref = 'transact')\n #accountid = Column(Integer, ForeignKey('accounts.id'), nullable=True)\n obj = relation('Obj', order_by = Clubs.id, backref = 'transact')\n objid = Column(Integer, ForeignKey('objs.id'), nullable=True)\n schoolid = Column(Integer, ForeignKey('schools.id'), nullable=True)\n club = relation('Clubs', order_by = Clubs.id, backref = 'transact')\n clubid = Column(Integer, ForeignKey('clubs.id'), nullable=True)\n school = relation('Schools',order_by = id, backref='transact')\n bankid = Column(Integer, ForeignKey('bank.id'))\n bank = relation('Bank', order_by = Bank.id, backref='transact')\n depositid=Column(ForeignKey('deposit.id'), nullable=True)\n deposit = relation('Deposit', order_by = Deposit.id, backref='transact')\n amount = Column(Numeric(precision=2, scale=2, asdecimal=True))\n balance = Column(Numeric(precision=2, scale=2, asdecimal=True))\n #originator = Column(ForeignKey('tg_user.user.id'))\n #splitid = Column(ForeignKey('splits.id'), nullable=True)\n #split = relation('Split',order_by=Chart.id, backref='Transact')\n \n def __repr__(self):\n return \"<Transact(%i,'%s','%s',%date,%i,'%s','%-d','%-d')>\" %(self.ref,self.fundDescription,self.chart,self.transdate,self.payoreeid,self.itemdesc,self.amount,self.balance)\n \nclass TheEvent(DeclarativeBase):\n __tablename__= 'events'\n id=Column(Integer,autoincrement=True, primary_key=True)\n month= Column(Integer, nullable=False)\n day= Column(Integer, nullable=False)\n year= Column(Integer, nullable=False)\n typ= Column(String(50), nullable=False)\n Description= Column(String(50), nullable=False)\n hyperlink=Column(String(50), nullable=True)\n doc_id=Column(Integer,ForeignKey('docs.id'))\n doc = relation('Doc', backref=backref('events'),order_by=id) \n user_id=Column(Integer, ForeignKey('tg_user.user_id'))\n user = relation('User',backref=backref('events'))\n Date_entered = Column(DateTime, default=datetime.now) \n #def __repr__(self):\n #return \"<TheEvent(%i,%i,%i,'%s','%s')>\" %(self.month, self.day, self.year,self.typ, self.Description) \n \n\nclass Doc(DeclarativeBase):\n __tablename__= \"docs\"\n id = Column(Integer,autoincrement=True, primary_key=True)\n name=Column(Text(50), nullable=False)\n description=Column(String(50), nullable=True)\n def __repr__(self):\n return \"<Doc(%i,'%s','%s')>\" %(self.id, self.name, self.description)\n \n\n\nmapper(clas, clas_table)\n#mapper(club, club_table)\n#mapper(vendor, vendor_table)\n#mapper(po, po_table)\n#mapper(transtype, transtype_table)\n#mapper(trans, trans_table)\n" }, { "alpha_fraction": 0.5520069003105164, "alphanum_fraction": 0.574018120765686, "avg_line_length": 34.61538314819336, "blob_id": "600301bf6368defe92b9a1bcc18fedd60e44b5eb", "content_id": "5a2988ae43f18fd1cb47264fc721735a5e0b80e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2317, "license_type": "no_license", "max_line_length": 155, "num_lines": 65, "path": "/ostacct/data/templates/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/menu_items.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317650509.9355121\n_template_filename=u'/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/menu_items.mak'\n_template_uri=u'/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/menu_items.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['menu_style', 'menu_items']\n\n\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n __M_writer = context.writer()\n # SOURCE LINE 10\n __M_writer(u'\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_menu_style(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n<style>\\n#menu_items {\\n padding:0px 12px 0px 2px;\\n list-style-type:None;\\n float:left; \\n padding-left:0px;\\n }\\n</style>\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_menu_items(context,path='../'):\n context.caller_stack._push_frame()\n try:\n sorted = context.get('sorted', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n hasattr = context.get('hasattr', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 12\n __M_writer(u'\\n <div id=\"menu_items\">\\n <ul>\\n')\n # SOURCE LINE 15\n if hasattr(tmpl_context, 'menu_items'):\n # SOURCE LINE 16\n for lower, item in sorted(tmpl_context.menu_items.iteritems()):\n # SOURCE LINE 17\n __M_writer(u' <li><a href=\"')\n __M_writer(escape(path))\n __M_writer(escape(lower))\n __M_writer(u's\">')\n __M_writer(escape(item.__name__))\n __M_writer(u'</a></li>\\n')\n pass\n pass\n # SOURCE LINE 20\n __M_writer(u' </ul>\\n </div>\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.6650717854499817, "alphanum_fraction": 0.6698564887046814, "avg_line_length": 33.66666793823242, "blob_id": "1dd71f882be63b6886871cd464d9c39306039f3d", "content_id": "8e53d32266fccc23bd112bd28f7a81ad913e265b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 209, "license_type": "no_license", "max_line_length": 86, "num_lines": 6, "path": "/ostacct/ostacct/templates/eventedit.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n<%def name=\"title()\">\nWork with Calendar events\n</%def>\n<FORM><INPUT TYPE=\"button\" VALUE=\"Back\" onClick=\"history.go(-1);return true;\"></FORM> \n ${c.form.display()|n} \n" }, { "alpha_fraction": 0.599194347858429, "alphanum_fraction": 0.6122860312461853, "avg_line_length": 36.68354415893555, "blob_id": "eb9cf76a6fd6637c5b53135db3e47701a2bbcaa3", "content_id": "be07873dd425be18a84db2390d034a12e9569946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2979, "license_type": "no_license", "max_line_length": 434, "num_lines": 79, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/environ.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317650335.959218\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/environ.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/environ.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['sidebar_bottom', 'title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n environment = context.get('environment', UNDEFINED)\n sorted = context.get('sorted', UNDEFINED)\n parent = context.get('parent', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\n')\n # SOURCE LINE 5\n __M_writer(u'\\n\\n')\n # SOURCE LINE 7\n __M_writer(escape(parent.sidebar_top()))\n __M_writer(u'\\n<h2>The WSGI nature of the framework</h2>\\n <p>In this page you can see all the WSGI variables your request object has, \\n the ones in capital letters are required by the spec, then a sorted by\\n component list of variables provided by the Components, and at last\\n the \"wsgi.\" namespace with very useful information about your WSGI Server</p>\\n <p>The keys in the environment are: \\n <table>\\n')\n # SOURCE LINE 15\n for key in sorted(environment):\n # SOURCE LINE 16\n __M_writer(u' <tr>\\n <td>')\n # SOURCE LINE 17\n __M_writer(escape(key))\n __M_writer(u'</td>\\n <td>')\n # SOURCE LINE 18\n __M_writer(escape(environment[key]))\n __M_writer(u'</td>\\n </tr>\\n')\n pass\n # SOURCE LINE 21\n __M_writer(u' </table>\\n\\n </p>\\n\\n\\n')\n # SOURCE LINE 26\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_sidebar_bottom(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 3\n __M_writer(u'\\n Learning TurboGears 2.1: Information about TG and WSGI\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.5790266990661621, "alphanum_fraction": 0.5852712988853455, "avg_line_length": 31.47552490234375, "blob_id": "b74a8844b388e29e63481ea05d5a17a40fb4a6a2", "content_id": "2fa37f938bdc5e87d4a866e7f94767011a14b5e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 4644, "license_type": "no_license", "max_line_length": 223, "num_lines": 143, "path": "/ostacct/ostacct/templates/master.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html>\n<head>\n \n \n ${self.meta()}\n <title>${self.title()}</title>\n <link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"${tg.url('/css/style.css')}\" />\n <link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"${tg.url('/css/admin.css')}\" />\n\n\n\n<meta name=\"description\" content=\"Printable reports from Tennessee DOE ereporting\" />\n<meta name=\"keywords\" content=\"DOE,Tennessee,ereporting, e reporting, database eventcalendar, Ron Adelman, Fiscal Consultant,\" />\n<meta name=\"author\" content=\"Ron Adelman\" />\n<meta http-equiv=\"content-type\" content=\"text/html;charset=UTF-8\" />\n\n\t</head>\n\n\n<body class=\"${self.body_class()}\">\n\n${self.header()}\n<ul id=\"nav\">\n\t<li><a href=\"#\"><a href=\"/index\">Home</a></a></li>\n\n\t\n\n\t<li><a href=>E reporting</a>\n\t\t<ul>\n <li><a href=\"/SampleAFR.pdf\">Sample - EXCEL AFR </a></li> \n <li><a href=\"/SampleSubAFR.pdf\">Sample - EXCEL AFR(Sub Fund)</a></li> \n <li><a href=\"/graphs.docx\">Sample of some graphs</a></li> \n <li><a href=\"/WxPyETn\">More WxPyETn</a></li> \n\t\t </ul>\n\t</li>\n\t<li><a href=>Local Finance</a>\n\t\t<ul>\n <li><a href=\"/accessit\">Application Access </a></li> \n <li><a href=\"/cal\">Event Calendar</a></li> \n <li><a href=\"/manuals\">Manuals & Misc</a></li> \n <li><a href=\"/workshops\">Spring workshops</a></li> \n\t\t </ul>\n\t</li>\n\n\n\n</ul> \n\n ${self.content_wrapper()}\n\n \n ${self.footer()}\n</body>\n \n<%def name=\"content_wrapper()\">\n\n <div id=\"content\">\n <div>\n % if page:\n <div class=\"currentpage\">\n Now Viewing: <span>${page}</page>\n </div>\n % endif\n\n <%\n flash=tg.flash_obj.render('flash', use_js=False)\n %>\n % if flash:\n ${flash | n}\n % endif\n ${self.body()}\n </div>\n</%def>\n\n<%def name=\"body_class()\">\n</%def>\n<%def name=\"meta()\">\n <meta content=\"text/html; charset=UTF-8\" http-equiv=\"content-type\"/>\n</%def>\n\n<%def name=\"title()\"> </%def>\n<%def name=\"sidebar_top()\">\n <div id=\"sb_top\" class=\"sidebar\">\n <h2>My Social network links</h2>\n <ul class=\"links\">\n <li><a href=\"http://www.toddfarmtn.com/\">Bed and Breakfast and More</a> - Pat Todds(from my office) business website</li>\n <li><a href=\"http://www.thelifechurch.com/\">The Life Church</a> Peggy and I changed to here Sept 2011 </li>\n <li><a href=\"http://lisabrooksphotos.com/\">Lisa Brooks Photography</a> Lisa Brooks(from my office) business website </li> \n </ul>\n I probably need to spend more time socializing and less time on the computer and working out.\n </div>\n</%def>\n\n<%def name=\"sidebar_bottom()\">\n <div id=\"sb_bottom\" class=\"sidebar\">\n <h2>Developing TG2</h2>\n <ul class=\"links\">\n <li><a href=\"http://trac.turbogears.org/query?status=new&amp;status=assigned&amp;status=reopened&amp;group=type&amp;milestone=2.1&amp;order=priority\">TG2 Trac tickets</a> What's happening now in TG2 development</li>\n <li><a href=\"http://trac.turbogears.org/timeline\">TG Dev timeline</a> (recent ticket updates, svn checkins, wiki changes)</li>\n <li><a href=\"http://svn.turbogears.org/trunk\">TG2 SVN repository</a> For checking out a copy</li>\n <li><a href=\"http://turbogears.org/2.1/docs/main/Contributing.html#installing-the-development-version-of-turbogears-2-from-source\">Follow these instructions</a> For installing your copy</li>\n <li><a href=\"http://trac.turbogears.org/browser/trunk\">TG2 Trac's svn view</a> In case you need a quick look</li>\n <li><a href=\"http://groups.google.com/group/turbogears-trunk\"> Join the TG-Trunk Mail List</a> for TG2 discuss/dev </li>\n </ul>\n </div>\n</%def>\n\n<%def name=\"header()\">\n <div id=\"header\">\n \t<h1>\n \t\tWelcome OSTacct\n\t\t<span class=\"subtitle\">The Inschool Accounting Application</span>\n\t</h1>\n </div>\n</%def>\n<%def name=\"footer()\">\n \n <div class=\"foottext\">\n <p>School finance folks are super heros</p>\n </div>\n <div class=\"clearingdiv\"></div>\n</div>\n</%def>\n\n\n\n\n % if tg.auth_stack_enabled:\n <span>\n % if not request.identity:\n <li id=\"login\" class=\"loginlogout\"><a href=\"${tg.url('/login')}\">Login</a></li>\n % else:\n <li id=\"login\" class=\"loginlogout\"><a href=\"${tg.url('/logout_handler')}\">Logout</a></li>\n <li id=\"admin\" class=\"loginlogout\"><a href=\"${tg.url('/admin')}\">Admin</a></li>\n % endif\n </span>\n % endif\n\n\n\n</html>\n" }, { "alpha_fraction": 0.6448332071304321, "alphanum_fraction": 0.7066720724105835, "avg_line_length": 90.03704071044922, "blob_id": "080d884a4b64f8de6a777c5d20c8ca7cbaff5dd7", "content_id": "c7b9a72c02518174dce9521c24fe9c88f04e1148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2458, "license_type": "no_license", "max_line_length": 195, "num_lines": 27, "path": "/ostacct/ostacct/templates/manuals.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\nLinks to Reference Manuals, Tables, and Miscellaneous\n</%def>\n <FORM><INPUT onclick=history.go(-1) type=button value=Back></FORM>\n\n <h2>Links to Manuals, Tables and other Reference Materials for new Finance Directors(let me know if link is bad)</h2>\n<h2><center> Manuals</center></h2>\n <p> <a href=\"/man/schacctman.pdf\">LEA Accounting Manual</a></p>\n <p> <a href=\"/man/CTASbudget2011.pdf\">CTAS 2011 Budget Workshop Manual</a></p>\n<p> <a href=\"/man/July 2011TISUAPM-MarkUp.pdf\">The new 2011 Internal School Uniform Accounting Policy Manual</a><font size= 1 color=\"red\" > Changes Highlighted --Added 7/23/2011</font></p>\n<p> <a href=\"/man/Summary of Revisions to TN Internal SchoolManual_1.doc\">Summary of Revisions to the Internal School Uniform Accounting Policy Manual</a></p>\n<h2><center> Reference Materials</center></h2>\n <p> <a href=\"http://www.comptroller1.state.tn.us/ca/chart.asp\">County Chart of Accounts</a></p>\n <p> <a href=\"/man/GASB54.pdf\">GASB 54 Crosswalk</a></p>\n <p> <a href=\"/man/IND COST RATES 2011-2012.pdf\">20011-2012 Indirect Cost Rates</a></p>\n <p> <a href=\"/man/FundBalRes.doc\">Fund Balance Resolution Sample needs to be passed every year</a></p>\n\n<p> <a href=\"/man/ARRA cheat sheet.xls\">spread sheet that Wesley created gives ARRA SSMS CFDA Account and sub fund info</a><font size= 1 color=\"red\" > Added 6/9/2011</font></p>\n<p> <a href=\"/man/2011-2012.xls\">2011-2012 salary schedule</a><font size= 1 color=\"red\" > Added 6/9/2011</font></p>\n<p> <a href=\"/man/2ndtier template.xls\">MOE template for testing 2012 budget includes formulas for 2nd tier</a><font size= 1 color=\"red\" > Added 6/9/2011</font></p>\n<p> <a href=\"/man/FTTTBUDGET_20CHANGE_20REQUEST_20FOR_20YEAR_20I_20Form[1]-1.docx\">FTTT Budget amendment request pre 2011 for summer 2011</a><font size= 1 color=\"red\" > Added 6/10/2011</font></p>\n<p> <a href=\"/man/FTTTBUDGETAMENDMENTWORKSHEET(1).xls\">FTTT Amendment worksheet sample</a><font size= 1 color=\"red\" > Added 6/10/2011</font></p>\n<p> <a href=\"/man/County_Fiscal_Year_End.pdf\">Local Government Closing Instructions</a><font size= 1 color=\"red\" > Added 6/13/2011</font></p>\n<p> <a href=\"/man/Combined_FYE.pptx\">Local Government Closing Power Point</a><font size= 1 color=\"red\" > Added 6/13/2011</font></p>\n<p> <a href=\"http://www.michie.com/tennessee/lpext.dll?f=templates&fn=main-h.htm&cp=tncode\">Lexis Nexis Tennessee Code</a></p>\n" }, { "alpha_fraction": 0.5995106101036072, "alphanum_fraction": 0.6178629398345947, "avg_line_length": 38.51612854003906, "blob_id": "ae053bf27d37c49c0b13fea275cb70e92bf17417", "content_id": "30c06036be5ad49f7c0bedd4b6cd787b270dbe98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2452, "license_type": "no_license", "max_line_length": 342, "num_lines": 62, "path": "/ostacct/data/templates/home/gutch/.python-eggs/tgext.admin-0.3.11-py2.6.egg-tmp/tgext/admin/templates/index.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317650504.174423\n_template_filename='/home/gutch/.python-eggs/tgext.admin-0.3.11-py2.6.egg-tmp/tgext/admin/templates/index.mak'\n_template_uri='/home/gutch/.python-eggs/tgext.admin-0.3.11-py2.6.egg-tmp/tgext/admin/templates/index.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n models = context.get('models', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\n')\n # SOURCE LINE 5\n __M_writer(u'\\n\\n<div style=\"height:0px;\"> &nbsp; </div>\\n <h2>TurboGears Admin</h2>\\n This is a fully-configurable administrative tool to help you administer your website.\\n Below is links to all of your models.<br/> They will bring you to a listing of the objects\\n in your database.\\n\\n<table class=\"admin_grid\">\\n')\n # SOURCE LINE 14\n for model in models:\n # SOURCE LINE 15\n __M_writer(u' <tr py:for=\"model in models\">\\n <td>\\n <a href=\\'')\n # SOURCE LINE 17\n __M_writer(escape(model.lower()))\n __M_writer(u's/\\' class=\"edit_link\">')\n __M_writer(escape(model))\n __M_writer(u'</a>\\n </td>\\n </tr>\\n')\n pass\n # SOURCE LINE 21\n __M_writer(u'</table>\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 3\n __M_writer(u'\\nTurbogears Administration System\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.5072386264801025, "alphanum_fraction": 0.5166849493980408, "avg_line_length": 42.52797317504883, "blob_id": "011f6ce7e233bfc9044258264dd2e63a61e08698", "content_id": "0986c75234fe7dd6140701bfa7d0c3fd5ceb04c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37375, "license_type": "no_license", "max_line_length": 556, "num_lines": 858, "path": "/ostacct/ostacct/controllers/root.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Main Controller\"\"\"\nfrom tg import expose, flash, require, url, request, redirect,validate\nfrom pylons.i18n import ugettext as _, lazy_ugettext as l_\nfrom tgext.admin.tgadminconfig import TGAdminConfig\nfrom tgext.admin.controller import AdminController\nfrom repoze.what import predicates\nfrom ostacct.lib.base import BaseController\nfrom ostacct.model import Schools, Bank, Fund, Account, Obj,Sub,TempC,Activities, Typ, TheEvent, Receipt, Chart,User,Deposit, Payoree,LastRef,Transtype,Transact, Clubs, Doc, DBSession, metadata\nfrom ostacct import model\nfrom ostacct.controllers.secure import SecureController\n#from tgext.menu import navbar, sidebar, menu\nfrom pylons import tmpl_context as c\nfrom ostacct.controllers.error import ErrorController\nimport tw2.core as twc\nimport tw2.forms as twf\nimport tw2.dynforms as twd\nfrom sqlalchemy.orm import eagerload\nimport transaction\nfrom tw2.jqplugins.jqgrid.widgets.core import jqGridWidget\n#import tw2.jqplugins.jqgrid\n#from tw2.jquery.plugins.jqgrid.base import word_wrap_css\nimport re, datetime, calendar\nfrom datetime import datetime\nimport os\nimport shutil\nfrom pkg_resources import resource_filename\nimport simplejson as json\nfrom webob import Request\n\n__all__ = ['RootController']\ndef urlgetit():\n try:\n theurl=request.url\n usertest=int(theurl[theurl.find('=')+1:])\n except:\n usertest=0\n return usertest\n#*****************************************************************\n#NEW VENDOR FORM ****\n#******************************************************************\nclass Newvendor(twf.TableForm):\n title =\"New vendor\"\n class namesfs(twf.FieldSet):\n class child(twf.TableLayout):\n id='names'\n Vendor_name=twf.TextField(size=50, validator=twc.Required)\n Contact_name=twf.TextField(size=50)\n class addressfs(twf.FieldSet): \n class child(twf.TableLayout):\n id='address'\n #extra_reps = 1\n Address=twf.TextField(size=75)\n class citystatefs(twf.FieldSet):\n class child(twf.GridLayout):\n id='city_state'\n extra_reps = 1\n City=twf.TextField(size= 37)\n State=twf.TextField(size= 37)\n Zip=twf.TextField(size= 20)\n class For_feds(twf.FieldSet):\n class child(twd.HidingTableLayout):\n Type=twd.HidingRadioButtonList(label_text='Vendor type', options=('Service Only','Other'), \n mapping={'Service Only':[ 'FedId']} )\n FedId=twf.TextField(size=20)\n\n#*****************************************************************\n#PO FORM ****\n#******************************************************************\nclass PO(twf.TableForm):\n title =\"Purchase Order\"\n class venddate(twf.FieldSet):\n class child(twf.GridLayout):\n id='Po_number_and_date'\n extra_reps = 1\n Po_Date=twd.CalendarDatePicker()\n Po_number=twf.TextField(size= 20)\n class Vend(twf.FieldSet):\n class child(twf.TableLayout):\n id='Payee'\n options=DBSession.query(Payoree).all()#payoree.compname#.order_by(payoree.compname).all()\n opt=[]\n for r in options:\n opt.append(r.compname)\n Payee=twf.SingleSelectField(options=opt, label_text='Vendor')\n class Purchase(twf.FieldSet):\n class child(twd.GrowingGridLayout):\n Description = twf.TextField( size=45)\n Price = twf.TextField(size=20)\n Quantity = twf.TextField(size=20)\n Amount = twf.TextField(size=20)\n class TotalPurchase(twf.TextField):\n size=20\n class Charge_to(twf.FieldSet):\n class child(twd.GrowingGridLayout):\n options=DBSession.query(Account).all()#payoree.compname#.order_by(payoree.compname).all()\n opt1=[]\n for r in options:\n opt1.append(r.AcctNo + \" - \"+ r.Description)\n Account = twf.SingleSelectField(options=opt1, label_text='Accounts')\n Amount = twf.TextField(size=20)\n class Total_chart_to(twf.TextField):\n size=20 \n \n#*****************************************************************\n#Accountl setup ****\n#******************************************************************\nclass curS():\n curSchool=\"\"\n\nclass chartsel( twf.TableForm):\n\n class child(twd.Shuttle):\n id='chartsel'\n label=\"testing\"\n default_selected=[] \n options=[]\n #size=250\n #*****************************************************************\n#school jqgrid setup ****\n#****************************************************************** \nclass school(jqGridWidget): \n entity=DBSession.query(Schools).order_by(Schools.name)\n id='grid' \n rows=15#kwargs.get('rows')\n page=1#kwargs.get('page')\n sord='desc'#kwargs.get('sord')\n name='schoolname'#kwargs.get('sidx')\n offset = (int(page)-1) * int(rows)\n #if (query):\n #d = {qtype:query}\n # allschools = DBSession.query(Schools).filter_by(**d)\n #else:\n allschools = DBSession.query(Schools).order_by(Schools.name)\n records = allschools.count()\n total=int(records)/int(rows)\n totaltest=total*rows\n if totaltest>total:total=total+1\n total=-1*-int(total)\n pageschools=allschools\n rows=[{'id':schools.id,'schoolname':schools.name, 'address':schools.address, 'city':schools.city, 'state':schools.state,\n 'zip':schools.zipcode, 'gradespan':schools.gradespan, \n 'accounts':\"<input style='height:22px;width:35px;' type='button' value='Acct' id=%s\" %\"t\"+str(schools.id)+ \" onclick=\"'\"return eacct(this)\"'\" />\",\n 'activities': \"<input style='height:22px;width:35px;' type='button' value='Club' id=%s\" %\"c\"+str(schools.id)+ \" onclick=\"'\"return eact(this)\"'\" />\",\n 'clubs': \"<input style='height:22px;width:35px;' type='button' value='Ath' id=%s\" %\"a\"+str(schools.id)+ \" onclick=\"'\"return eclub(this)\"'\" />\"\n }for schools in pageschools]\n \n # rows = [{'id' : str(schools.id),\n # 'cell':\t[schools.name,schools.address,schools.city,schools.state,\n # schools.zipcode,schools.gradespan,\n # ]}for schools in pageschools] \n options = {\n 'pager' : 'module-0-demo_pager', \n 'caption' : 'Schools',\n 'data' : rows,\n 'datatype' : 'local',\n 'colNames':[ 'School name','Address','City','State','Zip','Gradespan', 'Accounts', \"Activities\", \"Clubs\"],\n 'colModel' : [\n {'name':'schoolname', 'index':'schoolname', 'width':190, 'align':'left'},\n {'name':'address', 'index':'address', 'width':190, 'align':'left'},\n {'name':'city', 'index':'city', 'width':160, 'align':'left'},\n {'name':'state', 'index':'state', 'width':40, 'align':'left'},\n {'name':'zip', 'index':'zip', 'width':60, 'align':'left'},\n {'name':'gradespan', 'index':'gradespan', 'width':85, 'align':'center'},\n {'name':'accounts', 'index':'accounts', 'width':85, 'align':'center', 'sort':False},\n {'name':'activities', 'index':'activities', 'width':85, 'align':'center', 'sort':False},\n {'name':'clubs', 'index':'clubs', 'width':85, 'align':'center', 'sort':False},\n ],\n 'rowNum':15,\n 'rowList':[10,20,50],\n 'viewrecords':True,\n 'imgpath': 'scripts/jqGrid/themes/green/images',\n 'width': 900,\n 'height': 'auto',\n\n }\n\n\nclass Event(twf.TableForm):\n action='advent'\n Event_Date=twf.CalendarDatePicker(validator=twc.DateValidator)\n Event_Type=twf.RadioButtonList(options= (\"E_Report Due\", \"FACTS report due\", \"Printed Report Due\",\"Emailed report due\", \"Local Finance Event\", \"Federal Programs Event\", \"Application Due\", \"Other Event\"),value=\"Application Due\",validator=twc.Required )\n Description=twf.TextField(size=45,validator=twc.Required)\n class Link_a_Doc(twf.FieldSet):\n class child(twf.GridLayout):\n id=\"Link_a_Document\"\n extra_reps = 1\n upbut=twf.Button(value= \"Upload Doc\", attrs = {'onclick': 'window.location =\"/uploadeventdoc\"'})\n model.DBSession.flush()\n options=None \n options=DBSession.query(Doc).all()\n opt=[]\n for r in options:\n opt.append(str(r.name))\n Select_a_Document_for_Event=twf.SingleSelectField(options=opt, label_text='Select a document to link to the event') \n\nclass EditEvent(twf.TableForm):\n action='edvent'\n event_date=None\n Event_Type=None\n Description=None\n thisevent=None\n #valevent= request.environ['QUERY_STRING']\n valevent=urlgetit()\n try: \n\t #if valevent:valevent=int(valevent[valevent.find('=')+1:]) \n\t if valevent: thisevent=DBSession.query(TheEvent).filter_by(id=valevent).one()\n\t Event_Date=twf.CalendarDatePicker(validator=twc.Required,value=str(thisevent.month)+\"/\"+str(thisevent.day)+\"/\"+str(thisevent.year))\n\t Event_Type=twf.RadioButtonList(options= (\"E_Report Due\", \"FACTS report due\", \"Printed Report Due\",\"Emailed report due\", \"Local Finance Event\", \t \"Federal Programs Event\", \"Application Due\", \"Other Event\"),validator=twc.Required,value=thisevent.typ )\n\t Description=twf.TextField(size=45,validator=twc.Required, value=thisevent.Description)\n except:\n pass \n\tclass Link_a_Doc(twf.FieldSet):\n\t class child(twf.GridLayout):\n\t\t id=\"Link_a_Document\"\n\t\t extra_reps = 1\n valevent=0\n valevent=urlgetit()\n print valevent\n upbut=twf.Button(value= \"Upload Doc\", attrs = {'onclick': 'window.location =\"/uploadeventdoc\"'})\n model.DBSession.flush()\n options=None \n options=DBSession.query(Doc).all()\n opt=[]\n if valevent>0:\n thisevent=DBSession.query(TheEvent).filter_by(id=int(valevent)).one() \n for r in options:\n opt.append(str(r.name))\n Select_a_Document_for_Event=twf.SingleSelectField(options=opt, label_text='Select a document to link to the event',value=thisevent.doc) \n else:\n Select_a_Document_for_Event=twf.SingleSelectField(options=[], label_text='Select a document to link to the event')\n \nclass uploaddoc(twf.TableForm):\n action='uploadget'\n title = 'Doc_upload'\n class doc(twf.TableLayout):\n backbut=twf.Button(value= \"Back to Add Event\", attrs = {'onclick': 'window.location =\"/AddEvent\"'})\n Description=twf.TextField(size=100)\n FileLbl=twf.Label(text=\"_ will be replaced in file name with spaces\")\n filename=twf.FileField()\n\n\n#*****************************************************************\n#event calendar jqgrid setup ****\n#****************************************************************** \nclass eventgrid(jqGridWidget): \n #entity=DBSession.query(TheEvent).order_by(TheEvent.year,TheEvent.month,TheEvent.day)\n id='grid'\n # if kwargs: \n # rows=kwargs.get('rows')\n # page=kwargs.get('page')\n # sord=kwargs.get('sord')\n # name=kwargs.get('sidx')\n #else:\n rows=15\n page=1\n sord='Date'\n name='Date'\n offset = (int(page)-1) * int(rows)\n #if (query):\n #d = {qtype:query}\n # allschools = DBSession.query(Schools).filter_by(**d)\n #else:\n model.DBSession.flush()\n allevents=None\n allevents = DBSession.query(TheEvent).order_by(TheEvent.year,TheEvent.month,TheEvent.day)\n #url=\"\"\n records = allevents.count()\n total=int(records)/int(rows)\n totaltest=total*rows\n if totaltest>total:total=total+1\n total=-1*-int(total)\n pageevents=allevents\n rows=[{'id':events.id,'Date':str(events.month) +\"/\" + str(events.day) + \"/\" + str(events.year), 'Type':events.typ, 'Description':events.Description,'Document':\"<a href= /eventdoc/\" + (json.dumps(str(events.doc))[json.dumps(str(events.doc)).find(\",\")+1:json.dumps(str(events.doc)).rfind(\",\")].strip(\"'\"))+ \"/\" +json.dumps(str(events.doc))[json.dumps(str(events.doc)).find(\",\")+1:json.dumps(str(events.doc)).rfind(\",\")].strip(\"'\") +\">\"+json.dumps(str(events.doc))[json.dumps(str(events.doc)).find(\",\")+1:json.dumps(str(events.doc)).rfind(\",\")].strip(\"'\")\n+\"</a>\",\n'Edit':\"<input style='height:22px;width:35px;' type='button' value='Edit' id=%s\" %str(events.id)+ \" onclick=\"'\"return edit(this)\"'\" />\",\n'Delete':\"<input style='height:22px;width:45px;' type='button' value='Delete' id=%s\" %str(events.id)+ \" onclick=\"'\"return delevent(this)\"'\" />\"\n }for events in pageevents]\n \n\n options = {\n 'pager' : 'module-0-demo_pager', \n 'caption' : 'Events',\n \n 'data' : rows,\n 'datatype' : 'local',\n 'colNames':[ 'Date','Event Type','Event Desc','Events Documents','Edit','Delete'],#, '’showlink’Accounts', \"Activities\", \"Clubs\"],\n 'colModel' : [\n # {'name':'Month', 'index':'Month', 'width':40, 'align':'left'},\n {'name':'Date', 'index':'Date', 'width':60, 'align':'left'},\n #{'name':'Year', 'index':'Year', 'width':60, 'align':'left'},\n {'name':'Type', 'index':'Type', 'width':150, 'align':'left'},\n {'name':'Description', 'index':'Description', 'width':350, 'align':'left'},\n {'name':'Document', 'index':'Document', 'width':200, 'align':'left'},\n {'name':'Edit', 'index':'Edit', 'width':85, 'align':'center', 'sort':False},\n {'name':'Delete', 'index':'Delete', 'width':85, 'align':'center', 'sort':False},\n ],\n 'rowNum':15,\n 'rowList':[10,20,50],\n 'viewrecords':True,\n 'imgpath': 'scripts/jqGrid/themes/green/images',\n 'width': 900,\n 'height': 'auto',\n \n }\n \n \nclass RootController(BaseController):\n \"\"\"\n The root controller for the ostacct application.\n\n All the other controllers and WSGI applications should be mounted on this\n controller. For example::\n\n panel = ControlPanelController()\n another_app = AnotherWSGIApplication()\n\n Keep in mind that WSGI applications shouldn't be mounted directly: They\n must be wrapped around with :class:`tg.controllers.WSGIAppController`.\n\n \"\"\"\n secc = SecureController()\n\n admin = AdminController(model, DBSession, config_type=TGAdminConfig)\n\n error = ErrorController()\n \n #@navbar('index',sortorder=0 )\n @expose('ostacct.templates.index')\n def index(self):\n \"\"\"Handle the front-page.\"\"\"\n return dict(page='index')\n \n # @navbar('WxPyETn')\n @expose('ostacct.templates.WxPyETn')\n def WxPyETn(self):\n \"\"\"Handle the 'about' page.\"\"\"\n return dict(page='about')\n \n @expose('ostacct.templates.accessit')\n def accessit(self):\n \"\"\"Links to access forms.\"\"\"\n return dict(page='forms')\n\n @expose('ostacct.templates.SamAFR')\n def SamAFR(self):\n \"\"\"link to sample AFR's\"\"\"\n return dict(page='Sample AFR')\n\n\n @expose('ostacct.templates.manuals')\n def manuals(self):\n \"\"\"Link to manuals for new finance directors.\"\"\"\n return dict(page='Manuals')\n\n\n @expose('ostacct.templates.workshops')\n def workshops(self):\n \"\"\"Link to manuals for new finance directors.\"\"\"\n return dict(page='Workshops')\n\n @expose('ostacct.templates.data')\n @expose('json')\n def data(self, **kw):\n \"\"\"This method showcases how you can use the same controller for a data page and a display page\"\"\"\n return dict(params=kw)\n\n @expose('ostacct.templates.authentication')\n def auth(self):\n \"\"\"Display some information about auth* on this application.\"\"\"\n return dict(page='auth')\n\n @expose('ostacct.templates.index')\n @require(predicates.has_permission('manage', msg=l_('Only for managers')))\n def manage_permission_only(self, **kw):\n \"\"\"Illustrate how a page for managers only works.\"\"\"\n return dict(page='managers stuff')\n\n @expose('ostacct.templates.index')\n @require(predicates.is_user('editor', msg=l_('Only for the editor')))\n def editor_user_only(self, **kw):\n \"\"\"Illustrate how a page exclusive for the editor works.\"\"\"\n return dict(page='editor stuff')\n\n @expose('ostacct.templates.login')\n def login(self, came_from=url('/')):\n \"\"\"Start the user login.\"\"\"\n login_counter = request.environ['repoze.who.logins']\n if login_counter > 0:\n flash(_('Wrong credentials'), 'warning')\n return dict(page='login', login_counter=str(login_counter),\n came_from=came_from)\n\n @expose()\n def post_login(self, came_from='/'):\n \"\"\"\n Redirect the user to the initially requested page on successful\n authentication or redirect her back to the login page if login failed.\n\n \"\"\"\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)\n\n @expose()\n def post_logout(self, came_from=url('/')):\n \"\"\"\n Redirect the user to the initially requested page on logout and say\n goodbye as well.\n\n \"\"\"\n flash(_('We hope to see you soon!'))\n redirect(came_from)\n#++++++++START MY CON@TROLLERS+++++++++++++\n #@navbar('local Finance || calendar event')\n @expose('ostacct.templates.eventcal')\n def cal(self, *args, **kw):\n calendar.setfirstweekday(6)\n year = ['January', \n 'February', \n 'March', \n 'April', \n 'May', \n 'June', \n 'July', \n 'August', \n 'September', \n 'October', \n 'November', \n 'December'] \n \n today = datetime.date(datetime.now()) \n current = re.split('-', str(today)) \n current_no = int(current[1]) \n current_month = year[current_no-1] \n current_day = int(re.sub('\\A0', '', current[2])) \n current_yr = int(current[0]) \n month = calendar.monthcalendar(current_yr, current_no) \n nweeks=len(month)\n weeklist=[]\n for w in range(0,nweeks): \n\t\t\tweeklist.append(month[w])\n #event = DBSession.query(Event).filter_by(month=int(current_month)).all()\n if kw:\n\n if int(kw['month'])>current_no:\n current_no=int(kw['month'])\n current_yr=int(kw['year'])\n current_day=50\n month= calendar.monthcalendar(current_yr, current_no) \n nweeks=len(month)\n current_month = year[current_no-1] \n weeklist=[]\n for w in range(0,nweeks): \n weeklist.append(month[w])\n else:\n if int(kw['year'])>current_yr:\n\t\t current_no=int(kw['month'])\n\t\t current_yr=int(kw['year'])\n\t\t current_day=50\n\t\t month= calendar.monthcalendar(current_yr, current_no) \n\t\t nweeks=len(month)\n\t\t current_month = year[current_no-1] \n\t\t weeklist=[]\n\t\t for w in range(0,nweeks): \n\t\t weeklist.append(month[w])\n\t\t \n even = DBSession.query(TheEvent).filter_by(month=int(current_no)).all()\n events=[]\n rawevents=[]\n doneday=[]\n dontdouble=False\n for row in even:\n if row:\n if row.doc:\n doclink=row.doc.name\n doclinkdesc=row.doc.description\n public_dirname = os.path.join(os.path.abspath(resource_filename('ostacct', 'public')))\n doc_dirname = os.path.join(public_dirname, 'eventdoc')\n else: doclink=None\n #doclink = os.path.join(doc_dirname, str(row.docs))\n rawevents.append([row.month,row.day,row.year,row.typ,row.Description,doclink]) \n else:\n doc=\"something\"\n doclink=\"\"\n doclinkdesc=\"\"\n rawevents.append([row.month,row.day,row.year,row.typ,row.Description,doclink])\n doneit=False\n dontdouble=[] \n for m, d, y,typ,Desc,doc in rawevents:\n Thisday=d\n devent=[] \n for m, d, y,typ,Desc,doc in rawevents: \n if d==Thisday:\n devent.append((typ,Desc,doc))\n devent.append((\"-----------------------\",\"\",\"\"))\n for row in dontdouble:\n if row==Thisday:\n doneit=True\n dontdouble.append(Thisday)\n if doneit==False: \n events.append((m,Thisday,y,devent))\n #events.append((\"--\",\"--\",\"----\",\"-----\"))\n doneit=False \n #events.append([row.month.day,row.year,row.typ,row.Description,doclink]) \n return dict(page='Calendar', year=year, today=today, current=current, current_no=current_no, \n current_month=current_month, current_day=current_day, current_yr=current_yr, \n month = month, nweeks = nweeks , weeklist=weeklist,events=events)\n\n @require(predicates.has_permission('calendar', msg=l_('Only for Calendar Maintainers'))) \n @expose('ostacct.templates.eventedit')\n #@validate(Event, error_handler=AddEvent) \n def AddEvent(self, *args, **kwargs):\n\n \n c.form=Event()\n \n\n return dict(page = \"Add a New Event\")\n \n @require(predicates.has_permission('calendar', msg=l_('Only for Calendar Maintainers')))\n @expose('ostacct.templates.eventedit')\n def eventedit(self, *args, **kwargs):\n\n\n EditEvent.valevent=urlgetit()\n c.form=EditEvent()\n return dict(page = \"Edit an Event\")\n\n @expose('ostacct.templates.eventedit.mak')\n #@validate(Event, error_handler=AddEvent) \n def advent(self, *args, **kw):\n if kw:\n edate=re.split('/', str(kw['Event_Date']))\n emonth=int(edate[0])\n eday=int(edate[1])\n eyear=int(edate[2])\n C=model.TheEvent()\n C.month=emonth\n C.day=eday\n C.year=eyear\n if kw['Event_Type']: C.typ=kw['Event_Type']\n C.Description=kw['Description']\n try:\n adddoc=DBSession.query(Doc).filter_by(name=kw[\"Link_a_Document:0:Select_a_Document_for_Event\"]).one()\n except:\n adddoc=None\n C.doc=adddoc\n model.DBSession.add(C)\n transaction.commit()\n model.DBSession.flush()\n flash(kw['Description'] +\" was successfully added\")\n redirect(\"eventlist\")\n else: \n flash(\"some sort of problem\")\n redirect(\"AddEvent\")\n\n\n @expose('ostacct.templates.simple_mako.mak')\n def delevent(self, *args, **kw):\n try:\n valevent= urlgetit()\n \t if valevent:\n for d in DBSession.query(TheEvent).filter_by(id=int(valevent)):\n DBSession.delete(d)\n transaction.commit()\n model.DBSession.flush()\n msg= name + \"was deleted\"\n flash(msg)\n redirect(\"eventlist\")\n except:\n msg= \" Oh shit\"\n flash(msg)\n redirect('eventlist')\n return dict( page='Deleting an Event',msg=msg)\n \n\n @expose('ostacct.templates.eventedit.mak')\n #@validate(Event, error_handler=eventedit) \n def edvent(self, *args, **kw):\n try:\n valevent= urlgetit()\n \t #if valevent:valevent=int(valevent[valevent.find('=')+1:]) \n \t if valevent: thisevent=DBSession.query(TheEvent).filter_by(id=valevent).one()\n for d in DBSession.query(TheEvent).filter_by(id=int(ThisEvent)):\n DBSession.delete(d)\n transaction.commit()\n model.DBSession.flush()\n edate=re.split('/', str(kw['Event_Date']))\n emonth=int(edate[0])\n eday=int(edate[1])\n eyear=int(edate[2])\n C=model.TheEvent()\n C.month=emonth\n C.day=eday\n C.year=eyear\n if kw['Event_Type']: C.typ=kw['Event_Type']\n C.Description=kw['Description']\n try:\n adddoc=DBSession.query(Doc).filter_by(name=kw[\"Link_a_Document:0:Select_a_Document_for_Event\"]).one()\n except:\n adddoc=None\n C.doc=adddoc\n model.DBSession.add(C)\n transaction.commit()\n model.DBSession.flush()\n flash(kw['Description'] +\" was successfully added.--Add another.\")\n redirect(\"AddEvent\")\n except: \n flash(\"some sort of problem\")\n redirect(\"AddEvent\") \n\n\n @expose('ostacct.templates.ostacct')\n def uploadeventdoc(self, *args, **kw):\n c.form =uploaddoc()\n return dict(page = \"Upload a file to link from event calendar\")\n\n @expose('ostacct.templates.ostacct')\n def uploadget(self, *args, **kw):\n if kw:\n public_dirname = os.path.join(os.path.abspath(resource_filename('ostacct', 'public')))\n doc_dirname = os.path.join(public_dirname, 'eventdoc')\n fn=kw['doc:filename'].filename\n fn=str(fn).replace(\" \",\"\")\n C= model.Doc()\n C.name=fn \n C.description=kw['doc:Description']\n model.DBSession.add(C)\n transaction.commit() \n model.DBSession.flush()\n doc=DBSession.query(Doc).filter_by(name=str(fn)).one()\n doc_path = os.path.join(doc_dirname, str(doc.name))\n try:\n os.makedirs(doc_path)\n except OSError:\n #ignore if the folder already exists\n pass\n doc_path = os.path.join(doc_path, str(fn))\n f = file(doc_path, \"w\")\n f.write(kw['doc:filename'].value)\n f.close()\n newdoc_path=os.path.join(public_dirname, 'eventdoc')\n newdoc_path=os.path.join(newdoc_path, str(fn))\n shutil.copyfile(doc_path, fn)\n ostacct.remove_value('AddEvent')\n \n flash(\"Dccument was successfully created.\")\n redirect(\"AddEvent\")\n else:\n flash(\"some sort of problem\")\n redirect(\"upload\")\n\n @expose('ostacct.templates.eventlist')\n def eventlist(self, *args, **kwargs):\n c.form = eventgrid()\n return dict( page='Event List')\n\n \n #@navbar('Vendor' )\n @expose('ostacct.templates.ostacct')\n def Vendor(self, *args, **kw):\n c.form = Newvendor#().req()\n return dict( page='vendor')\n\n\n #@navbar('PO' )\n @expose('ostacct.templates.ostacct.mak')\n def PO(self, *args, **kw):\n c.form = PO#().req()\n return dict( page='Purchase order')\n \n @expose('ostacct.templates.chart')\n def Chart(self, *args, **kwargs):\n curS.curSchools=kwargs\n self.data=kwargs\n tempsubAll=[]\n subfundsel=[]\n subfundAll=[]\n if kwargs:\n if 'schools_id' in kwargs:\n curSchool=kwargs['schools_id'][1:]\n subtype=kwargs[\"schools_id\"][0]\n if subtype==\"c\":\n flabel=\"Club Sub Funds\"\n selSchool = DBSession.query(Clubs).filter_by(schools_id=int(curSchool)).order_by(Clubs.SubNo).all()\n if subtype==\"a\":\n flabel=\"Athletic Sub Funds\"\n selSchool = DBSession.query(Activities).filter_by(schools_id=int(curSchool)).order_by(Activities.SubNo).all()\n if subtype==\"t\":\n flabel=\"Accounts\"\n selSchool = DBSession.query(Chart).filter_by(schools_id=int(curSchool)).order_by(Chart.AcctNo).all() \n for a in selSchool:\n subfundsel.append(str(a.accounts.AcctNo)+\" \" +str(a.accounts.Description))\n for r in DBSession.query(Account).order_by(Account.AcctNo).all():\n includeit=True\n for w in subfundsel:\n if int(w[:4])==int(r.AcctNo):\n includeit=False\n if includeit==True and int(r.AcctNo)>1159: subfundAll.append(str(r.AcctNo)+\" \" +str(r.Description))\n \n if subtype==\"c\" or subtype==\"a\": \n for a in selSchool:\n subfundsel.append(str(a.SubNo)+\" \" +str(a.subs.Description))\n for r in DBSession.query(Sub).order_by(Sub.SubNo).all():\n if subtype==\"c\" and int(r.SubNo)>799 and int(r.SubNo)<900:\n includeit=True\n for w in subfundsel:\n if int(w[:4])==int(r.SubNo):\n includeit=False\n if includeit==True: subfundAll.append(str(r.SubNo)+\" \" +str(r.Description)) \n if subtype==\"a\" and int(r.SubNo)>599 and int(r.SubNo)<700:\n includeit=True\n for w in subfundsel:\n if int(w[:4])==int(r.SubNo):\n includeit=False\n if includeit==True: subfundAll.append(str(r.SubNo)+\" \" +str(r.Description))\n if 'chartsel' in kwargs:\n forFlash=[]\n NEWClubs=[]\n tClub=[]\n #nSchool=[]\n nSchool=int(curS.curSchools['schools_id'][1:])#int(curSchool)\n subtype=subtype=curS.curSchools[\"schools_id\"][0]#str(subtype)\n nSchool1=DBSession.query(Schools).filter_by(id=int(nSchool)).one()\n if subtype==\"c\" or subtype==\"a\":\n for r in DBSession.query(Sub).order_by(Sub.SubNo).all():\n if subtype==\"c\" and int(r.SubNo)>799 and int(r.SubNo)<900:\n tClub.append(r)\n if subtype==\"a\" and int(r.SubNo)>599 and int(r.SubNo)<700:\n tClub.append(r)\n if subtype==\"t\":\n for r in DBSession.query(Account).order_by(Account.AcctNo).all():\n tClub.append(r)\n select=kwargs['chartsel'][0:-1].split(',')\n Items=()\n lkw=len(select)\n #Items=pickle.dumps(kw['itemselector'])\n Items=(str(kwargs['chartsel'])[1:lkw])\n if lkw>0:\n i=0\n while i <lkw:\n if subtype==\"c\" or subtype==\"a\":\n if select[i][0:3] !=\"\":\n nClub=select[i][0:3]\n else:\n i=i+1\n nClub=select[i][0:3]\n if subtype==\"t\":\n if select[i][0:4] !=\"\":\n nClub=select[i][0:4]\n else:\n nClub=select[i][0:4]\n NEWClubs.append(nClub)\n if subtype==\"c\" or subtype==\"a\" and nClub != \"\":Club1=DBSession.query(Sub).filter_by(SubNo=str(nClub)).one()\n if subtype==\"t\":\n Acct1=DBSession.query(Account).filter_by(AcctNo=str(nClub)).one()\n if subtype==\"c\":\n schclubs=DBSession.query(Schools).options(eagerload('clubs')).filter_by(id=int(nSchool)).one()\n schclubClubs=schclubs.clubs\n if subtype==\"a\":\n schclubs=DBSession.query(Schools).options(eagerload('activities')).filter_by(id=int(nSchool)).one()\n schclubClubs=schclubs.activities\n if subtype==\"t\":\n schclubs=DBSession.query(Schools).options(eagerload('charts')).filter_by(id=int(nSchool)).one()\n schclubClubs=schclubs.charts\n doit=True\n for club in schclubClubs:\n if str(club)==str(nClub):\n doit=False\n if doit==True:\n if subtype==\"c\": C=model.Clubs()\n if subtype==\"a\": C=model.Activities()\n if subtype==\"t\": C=model.Chart()\n if subtype==\"c\" or subtype==\"a\":\n C.SubNo=str(nClub)\n C.subs_id=int(Club1.id)\n C.schools_id=nSchool1.id\n if subtype==\"t\":\n #C.AcctNo=str(nClub)\n if Acct1.SchoolWide==True and Acct1.SchoolActivity==False:C.fundid=1\n if Acct1.SchoolWide==False and Acct1.SchoolActivity==True:C.fundid=2\n if Acct1.SchoolWide==True and Acct1.SchoolActivity==True:C.fundid=3\n C.AcctNo=str(nClub)\n C.accounts_id=int(Acct1.id)\n C.schools_id=int(nSchool1.id)\n model.DBSession.add(C)\n i=i+1\n transaction.commit()\n model.DBSession.flush()\n if subtype==\"c\":\n for d in DBSession.query(Clubs).filter_by(schools_id=int(nSchool)):\n deleteit=True\n if len(NEWClubs)>0:\n for row in NEWClubs:\n if row <>None:\n if int(d.SubNo)==int(row):\n deleteit=False\n if deleteit==True:\n DBSession.delete(d)\n transaction.commit()\n model.DBSession.flush() \n\n if subtype==\"a\":\n for d in DBSession.query(Activities).filter_by(schools_id=int(nSchool)):\n deleteit=True\n if len(NEWClubs)>0:\n for row in NEWClubs:\n if row <>None:\n if int(d.SubNo)==int(row):\n deleteit=False\n if deleteit==True:\n DBSession.delete(d)\n transaction.commit()\n model.DBSession.flush()\n if subtype==\"t\":\n always=[1,20,15,16,17,18,19,6,7,8,9,10,11,12,13,14]\n forFlash=[1100,1170,1410,1420,1430,1810,1910,2100,2300,2600,3200,3300,3400,3500,3600,3999]\n for d in DBSession.query(Chart).filter_by(schools_id=int(nSchool)):\n \n deleteit=True\n if len(NEWClubs)>0:\n for row in NEWClubs:\n if row <>None:\n if int(d.accounts.AcctNo)==int(row):\n deleteit=False\n else:\n for z in always:\n if d.accounts.id==z:\n deleteit=False\n for tt in forFlash:\n if int(row)==int(tt):forFlash.remove(tt)\n if deleteit==True:\n DBSession.delete(d)\n transaction.commit()\n model.DBSession.flush()\n if len(forFlash)>0:\n strFlash=\"\"\n for row in forFlash:\n strFlash=strFlash+str(row)\n flash(\"You tried to delete the following accounts that cannot be deleted!--- \" + strFlash)\n\n chartsel.child.default_selected=subfundsel \n chartsel.child.options=subfundAll\n c.form = chartsel()\n schoolnam=DBSession.query(Schools).filter('id=%i' %int(curSchool)).one()\n return dict( page=schoolnam.name)\n \n \n #@navbar('schools' ) \n @expose('ostacct.templates.schools')\n def schools(self, *args, **kw):\n c.form = school()\n \n return dict( page='chart')\n \n \n \n" }, { "alpha_fraction": 0.5827450752258301, "alphanum_fraction": 0.5973856449127197, "avg_line_length": 34.39814758300781, "blob_id": "81fedec866d29d5c8bd37d4b38d87d73af646a6d", "content_id": "46ff43df8e83ab7c2b86d58a9c4a2a14dc14e8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3825, "license_type": "no_license", "max_line_length": 177, "num_lines": 108, "path": "/ostacct/data/templates/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/edit.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317668287.035789\n_template_filename='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/edit.mak'\n_template_uri='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/edit.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['header', 'body_class', 'title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n # SOURCE LINE 2\n ns = runtime.TemplateNamespace(u'menu_items', context._clean_inheritance_tokens(), templateuri=u'tgext.crud.templates.menu_items', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, u'menu_items')] = ns\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n value = context.get('value', UNDEFINED)\n model = context.get('model', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 2\n __M_writer(u'\\n\\n')\n # SOURCE LINE 6\n __M_writer(u'\\n\\n')\n # SOURCE LINE 8\n __M_writer(u'\\n')\n # SOURCE LINE 12\n __M_writer(u'\\n <div id=\"main_content\">\\n ')\n # SOURCE LINE 14\n __M_writer(escape(menu_items.menu_items('../../')))\n __M_writer(u'\\n <div style=\"float:left;\" class=\"crud_edit\">\\n <h2 style=\"margin-top:1px;\">Edit ')\n # SOURCE LINE 16\n __M_writer(escape(model))\n __M_writer(u'</h2>\\n ')\n # SOURCE LINE 17\n __M_writer(tmpl_context.widget(value=value, action='./') )\n __M_writer(u'\\n </div>\\n <div style=\"height:0px; clear:both;\"> &nbsp; </div>\\n </div> <!-- end main_content -->\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_header(context):\n context.caller_stack._push_frame()\n try:\n parent = context.get('parent', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 9\n __M_writer(u'\\n ')\n # SOURCE LINE 10\n __M_writer(escape(menu_items.menu_style()))\n __M_writer(u'\\n ')\n # SOURCE LINE 11\n __M_writer(escape(parent.header()))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_body_class(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 8\n __M_writer(u'tundra')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n model = context.get('model', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 4\n __M_writer(u'\\n')\n # SOURCE LINE 5\n __M_writer(escape(tmpl_context.title))\n __M_writer(u' - ')\n __M_writer(escape(model))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5400000214576721, "avg_line_length": 24, "blob_id": "428cae5b32b8087a47a7d6c5ea2efe6fd0dd4a43", "content_id": "009d7be4d74b62cf06fd93daba4366214dee7717", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/ostacct/ostacct/__init__.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"The ostacct package\"\"\"\n" }, { "alpha_fraction": 0.687252938747406, "alphanum_fraction": 0.7139328122138977, "avg_line_length": 56.82857131958008, "blob_id": "f31e3bc86bf5927908213d31a906035797dd3a9d", "content_id": "3f19b75785a5c157df38776d07cae9204d71397e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2024, "license_type": "no_license", "max_line_length": 477, "num_lines": 35, "path": "/ostacct/ostacct/templates/index.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\n Welcome to OpenSchoolTech.com: A worthy effort at greatness\n</%def>\n\n${parent.sidebar_top()}\n\n<div id=\"getting_started\">\n <h2>openschooltech (what is it?)</h2>\n <p>Just a name -- A Tennessee DOE Fiscal Consultant Picked for his web-site. </p>\n <ol id=\"what_is_it\">\n <li class=\"getting_started\">\n <h3>Why does he need a web-site?</h3>\n <p> Its me; Ron Adelman and my hobby is developing computer applications and I wanted to make them available even if they aren't ever used. I know they are useful and until you try them you won't ever know how useful they really are. Also, the DOE web-site seems to have been neglected for Local Finance stuff so I have some links to some reference material. If you can think of some vague hard to find links or reference material I have omitted, then please email me. </p>\n </li>\n\n <li class=\"getting_started\">\n <h3>What else?</h3>\n <p> That is all for now, but work is ongoing for an opensource webbased school accounting application\n\t as-well-as a church web-site that will highlight the various ministries within the church. <br>\n\nAs of 09/16/2011 I haven't done any work on the school accounting application, but am picking up some knowledge that could be used in that endeavor when I get back to it.<br><br> Will probably get back to it now. I just applied for Medicare and want this done when I retire, but first I need to set up some pages for Grandchildren pictures. This is the youngest below.\n </p>\n<img src=\"pictures/jack/JK439.jpg\" height=150 width= 120 border=\"0\"> </a>\n<img src=\"pictures/jack/JK474.jpg\" height=150 width= 120 border=\"0\"> </a>\n<img src=\"pictures/jack/JK497.jpg\" height=150 width= 120 border=\"0\"> </a>\n<img src=\"pictures/jack/JK501.jpg\" height=150 width= 120 border=\"0\"> </a>\n </ol>\n</div>\n<div class=\"clearingdiv\" />\n<div class=\"notice\"> My mission for retirement is to give my wants priority over my needs.\n</div>\n\n<%def name=\"sidebar_bottom()\"></%def>\n" }, { "alpha_fraction": 0.5729741454124451, "alphanum_fraction": 0.5904173254966736, "avg_line_length": 36.41322326660156, "blob_id": "5a996a95fd428946813e2a4b9da31f1404d6a59a", "content_id": "42b62378f03b07b9670ea5071c0d0af5464156f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4529, "license_type": "no_license", "max_line_length": 177, "num_lines": 121, "path": "/ostacct/data/templates/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/get_all.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317650509.9260261\n_template_filename='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/get_all.mak'\n_template_uri='/home/gutch/.python-eggs/tgext.crud-0.3.12-py2.6.egg-tmp/tgext/crud/templates/get_all.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['header', 'body_class', 'title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n # SOURCE LINE 2\n ns = runtime.TemplateNamespace(u'menu_items', context._clean_inheritance_tokens(), templateuri=u'tgext.crud.templates.menu_items', callables=None, calling_uri=_template_uri)\n context.namespaces[(__name__, u'menu_items')] = ns\n\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n value_list = context.get('value_list', UNDEFINED)\n model = context.get('model', UNDEFINED)\n dict = context.get('dict', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 2\n __M_writer(u'\\n\\n')\n # SOURCE LINE 6\n __M_writer(u'\\n')\n # SOURCE LINE 10\n __M_writer(u'\\n')\n # SOURCE LINE 11\n __M_writer(u'\\n<div id=\"main_content\">\\n ')\n # SOURCE LINE 13\n __M_writer(escape(menu_items.menu_items()))\n __M_writer(u'\\n <div style=\"float:left; width:80%\">\\n <h1 style=\"margin-top:1px;\">')\n # SOURCE LINE 15\n __M_writer(escape(model))\n __M_writer(u' Listing</h1>\\n <div style=\"margin:1ex 0; width:90%\">\\n <a href=\\'new\\' class=\"add_link\">New ')\n # SOURCE LINE 17\n __M_writer(escape(model))\n __M_writer(u'</a>\\n')\n # SOURCE LINE 18\n if tmpl_context.paginators:\n # SOURCE LINE 19\n __M_writer(u' <span style=\"margin-left:2em\">')\n __M_writer(escape(tmpl_context.paginators.value_list.pager(link='../%ss'%model.lower())))\n __M_writer(u'</span>\\n')\n pass\n # SOURCE LINE 21\n __M_writer(u' </div>\\n <div class=\"crud_table\" style=\"height:50%; width:90%\">\\n ')\n # SOURCE LINE 23\n __M_writer(tmpl_context.widget(value=value_list, action='../'+model.lower()+'s.json', attrs=dict(style=\"height:200px; border:solid black 3px;\")) )\n __M_writer(u'\\n </div>\\n </div>\\n</div>\\n<div style=\"clear:both;\"/>\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_header(context):\n context.caller_stack._push_frame()\n try:\n parent = context.get('parent', UNDEFINED)\n menu_items = _mako_get_namespace(context, 'menu_items')\n __M_writer = context.writer()\n # SOURCE LINE 7\n __M_writer(u'\\n')\n # SOURCE LINE 8\n __M_writer(escape(menu_items.menu_style()))\n __M_writer(u'\\n')\n # SOURCE LINE 9\n __M_writer(escape(parent.header()))\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_body_class(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 11\n __M_writer(u'tundra')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n model = context.get('model', UNDEFINED)\n tmpl_context = context.get('tmpl_context', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 4\n __M_writer(u'\\n')\n # SOURCE LINE 5\n __M_writer(escape(tmpl_context.title))\n __M_writer(u' - ')\n __M_writer(escape(model))\n __M_writer(u' Listing\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.631147563457489, "alphanum_fraction": 0.631147563457489, "avg_line_length": 23.200000762939453, "blob_id": "184780e2f99039563afcb4471c66b5f65a6251db", "content_id": "5b4cad6d8071a738624f8ca5cd4ccae6449a8d69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 122, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/ostacct/ostacct/templates/ostacct.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n<%def name=\"title()\">\n In School Accounting\n</%def>\n ${c.form().display() | n} \n" }, { "alpha_fraction": 0.3370024859905243, "alphanum_fraction": 0.3404286205768585, "avg_line_length": 35.28571319580078, "blob_id": "ea32e1aeafec0f812f63234ac0578ae727dbe508", "content_id": "1056b4e67fd05af2cd4982050eebb308247df2da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 13718, "license_type": "no_license", "max_line_length": 132, "num_lines": 378, "path": "/ostacct/ostacct/templates/eventcal.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "\n<%inherit file=\"local:templates.master\"/>\t \n\n<%def name=\"title()\">\n Welcome DOE Local Finance Event and Due Date Calendar\n</%def>\n\n<link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"${tg.url('/css/calstyles.css')}\" /> \t \n<HTML>\n\n<script type=\"text/javascript\">\nfunction update(){\nvar ThePath='/eventlist';\nwindow.location =ThePath}\nfunction winprint(){\nwindow.print()\n}\n\nfunction next(m,y){\nif (m==12){\n m=0;\n y=y+1;}\nvar ThePath='/cal?month='+(m+1)+\"&year=\"+ y;\nwindow.location =ThePath}\n\nfunction prior(m,y){\nif (m==1){\n m=13;\n y=y-1;}\nvar ThePath='/cal?month='+(m-1)+\"&year=\"+ y;\nwindow.location =ThePath}\n\nfunction back(){\nvar ThePath='/index';\nwindow.location=ThePath\n}\t \n\n</script>\n<head>\n\n\n<title>February 2007</title>\n\n</head>\n\n<body>\n<div id=\"container\">\n<b class=\"rtop\"><b class=\"r1\"></b> <b class=\"r2\"></b><b class=\"r3\"></b> <b class=\"r4\"></b></b>\n<table border=\"0\">\n<tr>\n\n<td><FORM METHOD = \"LINK\" ACTION=\"index\"><INPUT TYPE=\"SUBMIT\" VALUE=\"Back to Index\"></FORM></td>\n<td><INPUT TYPE=\"button\" VALUE=\"PRIOR<<--MONTH\" HEIGHT=\"30\" WIDTH=\"150\" BORDER=\"0\" ONCLICK=\"prior(${current_no},${current_yr})\"</td>\n<td><INPUT TYPE=\"button\" VALUE=\"NEXT-->>MONTH\" HEIGHT=\"30\" WIDTH=\"150\" BORDER=\"0\" ONCLICK=\"next(${current_no},${current_yr})\"</td>\n<td><FORM METHOD = \"LINK\" ACTION= \"eventlist\"><INPUT TYPE=\"SUBMIT\" VALUE=\"EDIT CALENDAR EVENTS\" ></FORM></td>\n\n</tr></table><br>\n <h1>${current_month}, ${current_yr}</h1 >\n<table id=\"month\">\n\t<thead>\n\t\t<tr>\n\t\t\t<center><th class=\"weekend\"><center>Sunday</center></th>\n\t\t\t<th><center>Monday</center></th>\n\t\t\t<th><center>Tuesday</center></th>\n\t\t\t<th><center>Wednesday</center></th>\n\n\t\t\t<th><center>Thursday</center></th>\n\t\t\t<th><center>Friday</center></th>\n\t\t\t<th class=\"weekend\"><center>Saturday</center></th>\n\t\t</tr>\n\t</thead>\n\t<tbody>\n\n %for sun, mon, tue, wed, thu, fri, sat in weeklist:\n\t\t <tr>\n\t\t\t%if sun ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if sun>0 and sun <> current_day:\n <td class=weekend></span>${sun}<div class=weekend><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==sun: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=weekend></div></td>\n\t\t\t %endif \n\t\t\t %if sun==current_day:\n <td class=weekend></span><strong>${sun}</strong?<div class=weekend><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==sun: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=weekend></div></td>\n\t\t\t %endif \n\t\t \n\t\t\t%if mon ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if mon>0 and mon <> current_day:\n <td class=day></span>${mon}<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==mon: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t %if mon==current_day:\n <td class=current></span><strong>${mon}</strong?<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==mon: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t\n\t\t\t%if tue ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if tue>0 and tue <> current_day:\n <td class=day></span>${tue}<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==tue: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t %if tue==current_day:\n <td class=current></span><strong>${tue}</strong?<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==tue: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\n\t\t\t%if wed ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if wed>0 and wed <> current_day:\n <td class=day></span>${wed}<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==wed: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t %if wed==current_day:\n <td class=current></span><strong>${wed}</strong?<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==wed: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\n\n\t\t\t%if thu ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if thu>0 and thu <> current_day:\n <td class=day></span>${thu}<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==thu: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t %if thu==current_day:\n <td class=current></span><strong>${thu}</strong?<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==thu: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif\n\n\t\t\t%if fri ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if fri>0 and fri <> current_day:\n <td class=day></span>${fri}<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==fri: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n\t\t\t %if fri==current_day:\n <td class=current></span><strong>${fri}</strong?<div class=day><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==fri: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=day></div></td>\n\t\t\t %endif \n \n\t\t\n\t\t\t%if sat ==0:\n\t\t\t <td class=previous></span><div class=previous></div></td>\n\t\t\t %endif\n\t\t\t %if sat>0 and sat <> current_day:\n <td class=weekend></span>${sat}<div class=weekend><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==sat: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=weekend></div></td>\n\t\t\t %endif \n\t\t\t %if sat==current_day:\n <td class=weekend></span><strong>${sat}</strong?<div class=weekend><br>\n\t\t\t %if events:\n %for m, d, y,devent in events: \n %if d==sat: \n\t\t\t %if devent:\n %for typ,Desc,doc in devent:\n %if doc: \t\t\t\t\t\t\t\t\t \n\t\t\t\t${typ}<br><a href=\"/eventdoc/${doc}/${doc}\">${Desc}</a> \n\t\t\t %else:\n\t\t\t\t${typ}<br> ${Desc}\n %endif\n %endfor\n\t\t\t %endif\n\t\t\t %endif\n %endfor\n %endif\t\n <div class=weekend></div></td>\n\t\t\t %endif\n \t\t </tr>\n\t\t %endfor\n\t\t \n\n\t</tbody>\n</table>\n</div>\n</body>\n</html>\n\n" }, { "alpha_fraction": 0.6407093405723572, "alphanum_fraction": 0.6511179804801941, "avg_line_length": 47.90565872192383, "blob_id": "de5020ca6a219e52b064a7d0bb4c0b1c2192b9b3", "content_id": "4bdc8eac5b26411c7eec40cbd7e1391b8061cef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2594, "license_type": "no_license", "max_line_length": 690, "num_lines": 53, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/eventlist.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317674649.511318\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/eventlist.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/eventlist.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n c = context.get('c', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\n<script type=\"text/javascript\">\\nfunction edit(me){\\nvar event=me.id;\\nvar ThePath=\\'/eventedit?id=\\'+event;\\nwindow.location =ThePath\\n}\\n</script>\\n<script type=\"text/javascript\">\\nfunction delevent(me){\\nvar event=me.id;\\nvar ThePath=\\'/delevent?id=\\'+event;\\nwindow.location =ThePath\\n}\\n</script>\\n<script type=\"text/javascript\">\\n(document).ready(\\n function(){\\njQuery(\"#grid\").jqGrid(\\'navGrid\\',\\'#pager\\',\\n { edit:false,view:false,add:false,del:false,search:false,\\n beforeRefresh: function(){\\n alert(\\'In beforeRefresh\\');\\n grid.jqGrid(\\'setGridParam\\',{datatype:\\'json\\'}).trigger(\\'reloadGrid\\');\\n }\\n })\\n\\n</script>\\n\\n')\n # SOURCE LINE 32\n __M_writer(u'\\n<table border=\"0\">\\n<tr>\\n<td><FORM METHOD = \"LINK\" ACTION= \"AddEvent\"><INPUT TYPE=\"SUBMIT\" VALUE=\"ADD CALENDAR EVENTS\" ></FORM></td>\\n\\n\\n<td><FORM METHOD = \"LINK\" ACTION= \"cal\"><INPUT TYPE=\"SUBMIT\" VALUE=\"BACK TO CALENDAR VIEW\" ></FORM></td>\\n</tr></table><br>\\n\\n ')\n # SOURCE LINE 41\n __M_writer(c.form().display() )\n __M_writer(u' \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 30\n __M_writer(u'\\n Calendar of Events\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.5985105037689209, "alphanum_fraction": 0.6222071647644043, "avg_line_length": 39.97222137451172, "blob_id": "b514d4bdd452f50da43ac97e6af74060700d18fe", "content_id": "1cc58ca0a566f198c7d3b02d58571504643c01dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1477, "license_type": "no_license", "max_line_length": 288, "num_lines": 36, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/error.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317651236.2653069\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/error.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/error.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = []\n\n\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n message = context.get('message', UNDEFINED)\n code = context.get('code', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n<html>\\n\\n<head>\\n <meta content=\"text/html; charset=UTF-8\" http-equiv=\"content-type\" py:replace=\"\\'\\'\"/>\\n <title>A ')\n # SOURCE LINE 7\n __M_writer(escape(code))\n __M_writer(u' Error has Occurred </title>\\n</head>\\n\\n<body>\\n<h1>Error ')\n # SOURCE LINE 11\n __M_writer(escape(code))\n __M_writer(u'</h1>\\n\\n<div>')\n # SOURCE LINE 13\n __M_writer(message )\n __M_writer(u'</div>\\n</body>\\n</html>')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.7094017267227173, "alphanum_fraction": 0.7163947224617004, "avg_line_length": 57.45454406738281, "blob_id": "32e721a05ad2888dd6408b6d517bb9e6f899e9bd", "content_id": "fdcbc7c99e125960fe058049bf46fbe88407c0c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1287, "license_type": "no_license", "max_line_length": 109, "num_lines": 22, "path": "/ostacct/ostacct/templates/accessit.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\n Local Finance Access Forms\n</%def>\n <FORM><INPUT onclick=history.go(-1) type=button value=Back></FORM>\n \n<h2>Payroll Information and Accounting Systems(PIRS)</h2>\n <p><a href=\"/accessforms/PIRS Read-Only Access Form.doc\">PIRS read-Only Access Form</a></p>\n <p><a href=\"/accessforms/PIRS User Access Form.doc\">PIRS User Access Form</a></p>\n<h2>Ereporting</h2>\n<p><a href=\"/accessforms/form - District Inquiry.doc\">Ereporting Inquiry only</a></p>\n<p><a href=\"/accessforms/District_User.doc\">Ereporting User</a></p>\n<p><a href=\"/accessforms/Director_of_Schools.Doc\">Director Access Form</a></p>\n<p><a href=\"/accessforms/form - Chairperson of BOE.doc\">Chair person of Board Access Form</a></p>\n<p><a href=\"/accessforms/form - County Clerk,City or SSD Recorder.doc\">County Court Clerk Access Form</a></p>\n<p><a href=\"/accessforms/form - County Trustee,City Treasurer\">County Trustee Access Form</a></p>\n<h2>FACTS</h2>\n<p><a href=\"/accessforms/FACTS View Only Access Form.doc\">FACTS read-Only Access Form</a></p>\n<p><a href=\"/accessforms/FACTS Update Access Form.doc\">FACTs Payment Requester, User Access Form</a></p>\n<h2>ATTENDANCE</h2>\n<p><a href=\"/accessforms/form - District User.doc\">Attendance User Access Form</a></p>\n\n" }, { "alpha_fraction": 0.6646646857261658, "alphanum_fraction": 0.6656656861305237, "avg_line_length": 23.341463088989258, "blob_id": "7ae6d861bdc52cfdb6eb95229b275ba5d314cd6a", "content_id": "bcd49f191c30d77ce6dd6f1daafcbbe4147372de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 999, "license_type": "no_license", "max_line_length": 107, "num_lines": 41, "path": "/ostacct/ostacct/templates/eventlist.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<script type=\"text/javascript\">\nfunction edit(me){\nvar event=me.id;\nvar ThePath='/eventedit?id='+event;\nwindow.location =ThePath\n}\n</script>\n<script type=\"text/javascript\">\nfunction delevent(me){\nvar event=me.id;\nvar ThePath='/delevent?id='+event;\nwindow.location =ThePath\n}\n</script>\n<script type=\"text/javascript\">\n(document).ready(\n function(){\njQuery(\"#grid\").jqGrid('navGrid','#pager',\n { edit:false,view:false,add:false,del:false,search:false,\n beforeRefresh: function(){\n alert('In beforeRefresh');\n grid.jqGrid('setGridParam',{datatype:'json'}).trigger('reloadGrid');\n }\n })\n\n</script>\n\n<%def name=\"title()\">\n Calendar of Events\n</%def>\n<table border=\"0\">\n<tr>\n<td><FORM METHOD = \"LINK\" ACTION= \"AddEvent\"><INPUT TYPE=\"SUBMIT\" VALUE=\"ADD CALENDAR EVENTS\" ></FORM></td>\n\n\n<td><FORM METHOD = \"LINK\" ACTION= \"cal\"><INPUT TYPE=\"SUBMIT\" VALUE=\"BACK TO CALENDAR VIEW\" ></FORM></td>\n</tr></table><br>\n\n ${c.form().display() | n} \n" }, { "alpha_fraction": 0.6453744769096375, "alphanum_fraction": 0.6568281650543213, "avg_line_length": 50.59090805053711, "blob_id": "49d626533379bc1ef26bc17cc017ca3923eb8a50", "content_id": "4c3f271461928efa5794803cd55d0fe559b7b693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2270, "license_type": "no_license", "max_line_length": 240, "num_lines": 44, "path": "/ostacct/ostacct/templates/WxPyETn.mak", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "<%inherit file=\"local:templates.master\"/>\n\n<%def name=\"title()\">\nWxPyETn: Print reports from ereporting .\n</%def>\n\n\n <div id=\"getting_started\">\n <h2>Getting started with WxPyETn.</h2>\n <p>You have to Download it first.</p>\n <ol id=\"what_is_it\">\n <li class=\"getting_started\">\n <h3>Download links below</h3>\n <p> <a href=\"http://sourceforge.net/projects/wxpyetn/files\">sourceforge</a></p>\n <p> <a href=\"/installWxPyETn3.4.exe\">Easy to Download WxPyETn3.4(updated 8/16/2011) with new chart of accounts now</a><font size= 1 color=\"red\" > The sub fund total bug in excel has been fixed</font></p>\n\t\t<hr>\n </li>\n <li class=\"getting_started\">\n <h3>Install it if--></h3>\n <p> Your \"<span class=\"code\">Operating System\" </span> is Microsoft. I apologize for not making it\n\t\tlinux and apple capable(time is precious). It would also be nice if you have excel, but it is not required. The most useful reports WxPyETn produces are sent to an excel spreadsheet. Other than Administrative privileges, that is all of\n\t\tthe requirements you will need.</p></li>\n\t\t<li class=\"getting_started\">\n <p><h3>and then--></h3>\n\t\t<p>Once it is downloaded just click on the file and go through the installation wizard.</p>\n </li>\n <li class=\"getting_started\">\n <h3>OK its installed -->You are ready to download the csv files from ereporting--click on \"Get File\"</h3>\n\t\t<p><span class=\"code\">Select and download all years available --one year at a time.</span></p>\n\t\t<hr>\n <p><img src=\"images/Screenshot1.jpg\"/> </p>\n\t\t<hr>\n\t\t\t<hr>\n\t\t<p><h3>Next to the last step open the csv file- with WxPyETn-></h3>\n\t\t<p><img src=\"images/Screenshot2.jpg\"/> </p>\n\t\t<p>If you have gotten this far, then all you have to do now is go through the menus.\n\t\tReports will get you reports, Graphs will get you graphs and Tables will allow you to update the chart of accounts.</p>\n\t\t<p>One other note. This should work on the new quarter csv files. The quarter will be listed. If not I will come out with an update </p> \n </li>\n </ol>\n\t<p></p>\n <p><a href=\"/SampleAFR.pdf\">Sample of an EXCEL AFR report</a></p>\n <p><a href=\"/SampleSubAFR.pdf\">Sample of an EXCEL AFR by Sub Fund report</a></p>\n</div\n" }, { "alpha_fraction": 0.6444017291069031, "alphanum_fraction": 0.6573762893676758, "avg_line_length": 38.22641372680664, "blob_id": "c73f4dcce63223f9d963804cecd83e8c07afade6", "content_id": "7949bca718ef19bf4ce87b24bde08964e78be658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2081, "license_type": "no_license", "max_line_length": 438, "num_lines": 53, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/schools.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317677102.5855179\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/schools.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/schools.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n c = context.get('c', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n\\n<script type=\"text/javascript\">\\nfunction eclub(me){\\nvar schools_id=me.id;\\nvar ThePath=\\'/Chart/?schools_id=\\'+schools_id;\\nwindow.location =ThePath\\n}\\nfunction eact(me){\\nvar schools_id=me.id;\\nvar ThePath=\\'/Chart/?schools_id=\\'+schools_id;\\nwindow.location =ThePath\\n}\\nfunction eacct(me){\\nvar schools_id=me.id;\\nvar ThePath=\\'/Chart/?schools_id=\\'+schools_id;\\nwindow.location =ThePath\\n}\\n</script>\\n\\n')\n # SOURCE LINE 23\n __M_writer(u'\\n\\n\\n\\n ')\n # SOURCE LINE 27\n __M_writer(c.form().display() )\n __M_writer(u' \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 21\n __M_writer(u'\\n In School Accounting\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" }, { "alpha_fraction": 0.2716255486011505, "alphanum_fraction": 0.2876913249492645, "avg_line_length": 50.867454528808594, "blob_id": "7e386b9e2b4088fefd885d4df462d4f643ba4055", "content_id": "24e49b9f12debecc42c0651a7920fc8a6c591632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39525, "license_type": "no_license", "max_line_length": 958, "num_lines": 762, "path": "/ostacct/data/templates/home/gutch/ost/ostacct/ostacct/templates/eventcal.mak.py", "repo_name": "gutch/OSTacct", "src_encoding": "UTF-8", "text": "# -*- encoding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 6\n_modified_time = 1317658015.372879\n_template_filename='/home/gutch/ost/ostacct/ostacct/templates/eventcal.mak'\n_template_uri='/home/gutch/ost/ostacct/ostacct/templates/eventcal.mak'\n_template_cache=cache.Cache(__name__, _modified_time)\n_source_encoding='utf-8'\nfrom webhelpers.html import escape\n_exports = ['title']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'local:templates.master', _template_uri)\ndef render_body(context,**pageargs):\n context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n weeklist = context.get('weeklist', UNDEFINED)\n current_month = context.get('current_month', UNDEFINED)\n events = context.get('events', UNDEFINED)\n tg = context.get('tg', UNDEFINED)\n current_day = context.get('current_day', UNDEFINED)\n current_yr = context.get('current_yr', UNDEFINED)\n current_no = context.get('current_no', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 1\n __M_writer(u'\\n')\n # SOURCE LINE 2\n __M_writer(u'\\t \\n\\n')\n # SOURCE LINE 6\n __M_writer(u'\\n\\n<link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"')\n # SOURCE LINE 8\n __M_writer(escape(tg.url('/css/calstyles.css')))\n __M_writer(u'\" /> \\t \\n<HTML>\\n\\n<script type=\"text/javascript\">\\nfunction update(){\\nvar ThePath=\\'/eventlist\\';\\nwindow.location =ThePath}\\nfunction winprint(){\\nwindow.print()\\n}\\n\\nfunction next(m,y){\\nif (m==12){\\n m=0;\\n y=y+1;}\\nvar ThePath=\\'/cal?month=\\'+(m+1)+\"&year=\"+ y;\\nwindow.location =ThePath}\\n\\nfunction prior(m,y){\\nif (m==1){\\n m=13;\\n y=y-1;}\\nvar ThePath=\\'/cal?month=\\'+(m-1)+\"&year=\"+ y;\\nwindow.location =ThePath}\\n\\nfunction back(){\\nvar ThePath=\\'/index\\';\\nwindow.location=ThePath\\n}\\t \\n\\n</script>\\n<head>\\n\\n\\n<title>February 2007</title>\\n\\n</head>\\n\\n<body>\\n<div id=\"container\">\\n<b class=\"rtop\"><b class=\"r1\"></b> <b class=\"r2\"></b><b class=\"r3\"></b> <b class=\"r4\"></b></b>\\n<table border=\"0\">\\n<tr>\\n\\n<td><FORM METHOD = \"LINK\" ACTION=\"index\"><INPUT TYPE=\"SUBMIT\" VALUE=\"Back to Index\"></FORM></td>\\n<td><INPUT TYPE=\"button\" VALUE=\"PRIOR<<--MONTH\" HEIGHT=\"30\" WIDTH=\"150\" BORDER=\"0\" ONCLICK=\"prior(')\n # SOURCE LINE 53\n __M_writer(escape(current_no))\n __M_writer(u',')\n __M_writer(escape(current_yr))\n __M_writer(u')\"</td>\\n<td><INPUT TYPE=\"button\" VALUE=\"NEXT-->>MONTH\" HEIGHT=\"30\" WIDTH=\"150\" BORDER=\"0\" ONCLICK=\"next(')\n # SOURCE LINE 54\n __M_writer(escape(current_no))\n __M_writer(u',')\n __M_writer(escape(current_yr))\n __M_writer(u')\"</td>\\n<td><FORM METHOD = \"LINK\" ACTION= \"eventlist\"><INPUT TYPE=\"SUBMIT\" VALUE=\"EDIT CALENDAR EVENTS\" ></FORM></td>\\n\\n</tr></table><br>\\n <h1>')\n # SOURCE LINE 58\n __M_writer(escape(current_month))\n __M_writer(u', ')\n __M_writer(escape(current_yr))\n __M_writer(u'</h1 >\\n<table id=\"month\">\\n\\t<thead>\\n\\t\\t<tr>\\n\\t\\t\\t<center><th class=\"weekend\"><center>Sunday</center></th>\\n\\t\\t\\t<th><center>Monday</center></th>\\n\\t\\t\\t<th><center>Tuesday</center></th>\\n\\t\\t\\t<th><center>Wednesday</center></th>\\n\\n\\t\\t\\t<th><center>Thursday</center></th>\\n\\t\\t\\t<th><center>Friday</center></th>\\n\\t\\t\\t<th class=\"weekend\"><center>Saturday</center></th>\\n\\t\\t</tr>\\n\\t</thead>\\n\\t<tbody>\\n\\n')\n # SOURCE LINE 74\n for sun, mon, tue, wed, thu, fri, sat in weeklist:\n # SOURCE LINE 75\n __M_writer(u'\\t\\t <tr>\\n')\n # SOURCE LINE 76\n if sun ==0:\n # SOURCE LINE 77\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 79\n if sun>0 and sun <> current_day:\n # SOURCE LINE 80\n __M_writer(u' <td class=weekend></span>')\n __M_writer(escape(sun))\n __M_writer(u'<div class=weekend><br>\\n')\n # SOURCE LINE 81\n if events:\n # SOURCE LINE 82\n for m, d, y,devent in events: \n # SOURCE LINE 83\n if d==sun: \n # SOURCE LINE 84\n if devent:\n # SOURCE LINE 85\n for typ,Desc,doc in devent:\n # SOURCE LINE 86\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 87\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 88\n else:\n # SOURCE LINE 89\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 96\n __M_writer(u' <div class=weekend></div></td>\\n')\n pass\n # SOURCE LINE 98\n if sun==current_day:\n # SOURCE LINE 99\n __M_writer(u' <td class=weekend></span><strong>')\n __M_writer(escape(sun))\n __M_writer(u'</strong?<div class=weekend><br>\\n')\n # SOURCE LINE 100\n if events:\n # SOURCE LINE 101\n for m, d, y,devent in events: \n # SOURCE LINE 102\n if d==sun: \n # SOURCE LINE 103\n if devent:\n # SOURCE LINE 104\n for typ,Desc,doc in devent:\n # SOURCE LINE 105\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 106\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 107\n else:\n # SOURCE LINE 108\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 115\n __M_writer(u' <div class=weekend></div></td>\\n')\n pass\n # SOURCE LINE 117\n __M_writer(u'\\t\\t \\n')\n # SOURCE LINE 118\n if mon ==0:\n # SOURCE LINE 119\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 121\n if mon>0 and mon <> current_day:\n # SOURCE LINE 122\n __M_writer(u' <td class=day></span>')\n __M_writer(escape(mon))\n __M_writer(u'<div class=day><br>\\n')\n # SOURCE LINE 123\n if events:\n # SOURCE LINE 124\n for m, d, y,devent in events: \n # SOURCE LINE 125\n if d==mon: \n # SOURCE LINE 126\n if devent:\n # SOURCE LINE 127\n for typ,Desc,doc in devent:\n # SOURCE LINE 128\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 129\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 130\n else:\n # SOURCE LINE 131\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 138\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 140\n if mon==current_day:\n # SOURCE LINE 141\n __M_writer(u' <td class=current></span><strong>')\n __M_writer(escape(mon))\n __M_writer(u'</strong?<div class=day><br>\\n')\n # SOURCE LINE 142\n if events:\n # SOURCE LINE 143\n for m, d, y,devent in events: \n # SOURCE LINE 144\n if d==mon: \n # SOURCE LINE 145\n if devent:\n # SOURCE LINE 146\n for typ,Desc,doc in devent:\n # SOURCE LINE 147\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 148\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 149\n else:\n # SOURCE LINE 150\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 157\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 159\n __M_writer(u'\\t\\t\\t\\n')\n # SOURCE LINE 160\n if tue ==0:\n # SOURCE LINE 161\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 163\n if tue>0 and tue <> current_day:\n # SOURCE LINE 164\n __M_writer(u' <td class=day></span>')\n __M_writer(escape(tue))\n __M_writer(u'<div class=day><br>\\n')\n # SOURCE LINE 165\n if events:\n # SOURCE LINE 166\n for m, d, y,devent in events: \n # SOURCE LINE 167\n if d==tue: \n # SOURCE LINE 168\n if devent:\n # SOURCE LINE 169\n for typ,Desc,doc in devent:\n # SOURCE LINE 170\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 171\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 172\n else:\n # SOURCE LINE 173\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 180\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 182\n if tue==current_day:\n # SOURCE LINE 183\n __M_writer(u' <td class=current></span><strong>')\n __M_writer(escape(tue))\n __M_writer(u'</strong?<div class=day><br>\\n')\n # SOURCE LINE 184\n if events:\n # SOURCE LINE 185\n for m, d, y,devent in events: \n # SOURCE LINE 186\n if d==tue: \n # SOURCE LINE 187\n if devent:\n # SOURCE LINE 188\n for typ,Desc,doc in devent:\n # SOURCE LINE 189\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 190\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 191\n else:\n # SOURCE LINE 192\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 199\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 201\n __M_writer(u'\\t\\t\\n')\n # SOURCE LINE 202\n if wed ==0:\n # SOURCE LINE 203\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 205\n if wed>0 and wed <> current_day:\n # SOURCE LINE 206\n __M_writer(u' <td class=day></span>')\n __M_writer(escape(wed))\n __M_writer(u'<div class=day><br>\\n')\n # SOURCE LINE 207\n if events:\n # SOURCE LINE 208\n for m, d, y,devent in events: \n # SOURCE LINE 209\n if d==wed: \n # SOURCE LINE 210\n if devent:\n # SOURCE LINE 211\n for typ,Desc,doc in devent:\n # SOURCE LINE 212\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 213\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 214\n else:\n # SOURCE LINE 215\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 222\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 224\n if wed==current_day:\n # SOURCE LINE 225\n __M_writer(u' <td class=current></span><strong>')\n __M_writer(escape(wed))\n __M_writer(u'</strong?<div class=day><br>\\n')\n # SOURCE LINE 226\n if events:\n # SOURCE LINE 227\n for m, d, y,devent in events: \n # SOURCE LINE 228\n if d==wed: \n # SOURCE LINE 229\n if devent:\n # SOURCE LINE 230\n for typ,Desc,doc in devent:\n # SOURCE LINE 231\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 232\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 233\n else:\n # SOURCE LINE 234\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 241\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 243\n __M_writer(u'\\n\\n')\n # SOURCE LINE 245\n if thu ==0:\n # SOURCE LINE 246\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 248\n if thu>0 and thu <> current_day:\n # SOURCE LINE 249\n __M_writer(u' <td class=day></span>')\n __M_writer(escape(thu))\n __M_writer(u'<div class=day><br>\\n')\n # SOURCE LINE 250\n if events:\n # SOURCE LINE 251\n for m, d, y,devent in events: \n # SOURCE LINE 252\n if d==thu: \n # SOURCE LINE 253\n if devent:\n # SOURCE LINE 254\n for typ,Desc,doc in devent:\n # SOURCE LINE 255\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 256\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 257\n else:\n # SOURCE LINE 258\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 265\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 267\n if thu==current_day:\n # SOURCE LINE 268\n __M_writer(u' <td class=current></span><strong>')\n __M_writer(escape(thu))\n __M_writer(u'</strong?<div class=day><br>\\n')\n # SOURCE LINE 269\n if events:\n # SOURCE LINE 270\n for m, d, y,devent in events: \n # SOURCE LINE 271\n if d==thu: \n # SOURCE LINE 272\n if devent:\n # SOURCE LINE 273\n for typ,Desc,doc in devent:\n # SOURCE LINE 274\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 275\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 276\n else:\n # SOURCE LINE 277\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 284\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 286\n __M_writer(u'\\n')\n # SOURCE LINE 287\n if fri ==0:\n # SOURCE LINE 288\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 290\n if fri>0 and fri <> current_day:\n # SOURCE LINE 291\n __M_writer(u' <td class=day></span>')\n __M_writer(escape(fri))\n __M_writer(u'<div class=day><br>\\n')\n # SOURCE LINE 292\n if events:\n # SOURCE LINE 293\n for m, d, y,devent in events: \n # SOURCE LINE 294\n if d==fri: \n # SOURCE LINE 295\n if devent:\n # SOURCE LINE 296\n for typ,Desc,doc in devent:\n # SOURCE LINE 297\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 298\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 299\n else:\n # SOURCE LINE 300\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 307\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 309\n if fri==current_day:\n # SOURCE LINE 310\n __M_writer(u' <td class=current></span><strong>')\n __M_writer(escape(fri))\n __M_writer(u'</strong?<div class=day><br>\\n')\n # SOURCE LINE 311\n if events:\n # SOURCE LINE 312\n for m, d, y,devent in events: \n # SOURCE LINE 313\n if d==fri: \n # SOURCE LINE 314\n if devent:\n # SOURCE LINE 315\n for typ,Desc,doc in devent:\n # SOURCE LINE 316\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 317\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 318\n else:\n # SOURCE LINE 319\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 326\n __M_writer(u' <div class=day></div></td>\\n')\n pass\n # SOURCE LINE 328\n __M_writer(u' \\n\\t\\t\\n')\n # SOURCE LINE 330\n if sat ==0:\n # SOURCE LINE 331\n __M_writer(u'\\t\\t\\t <td class=previous></span><div class=previous></div></td>\\n')\n pass\n # SOURCE LINE 333\n if sat>0 and sat <> current_day:\n # SOURCE LINE 334\n __M_writer(u' <td class=weekend></span>')\n __M_writer(escape(sat))\n __M_writer(u'<div class=weekend><br>\\n')\n # SOURCE LINE 335\n if events:\n # SOURCE LINE 336\n for m, d, y,devent in events: \n # SOURCE LINE 337\n if d==sat: \n # SOURCE LINE 338\n if devent:\n # SOURCE LINE 339\n for typ,Desc,doc in devent:\n # SOURCE LINE 340\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 341\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 342\n else:\n # SOURCE LINE 343\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 350\n __M_writer(u' <div class=weekend></div></td>\\n')\n pass\n # SOURCE LINE 352\n if sat==current_day:\n # SOURCE LINE 353\n __M_writer(u' <td class=weekend></span><strong>')\n __M_writer(escape(sat))\n __M_writer(u'</strong?<div class=weekend><br>\\n')\n # SOURCE LINE 354\n if events:\n # SOURCE LINE 355\n for m, d, y,devent in events: \n # SOURCE LINE 356\n if d==sat: \n # SOURCE LINE 357\n if devent:\n # SOURCE LINE 358\n for typ,Desc,doc in devent:\n # SOURCE LINE 359\n if doc: \t\t\t\t\t\t\t\t\t \n # SOURCE LINE 360\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br><a href=\"/eventdoc/')\n __M_writer(escape(doc))\n __M_writer(u'/')\n __M_writer(escape(doc))\n __M_writer(u'\">')\n __M_writer(escape(Desc))\n __M_writer(u'</a> \\n')\n # SOURCE LINE 361\n else:\n # SOURCE LINE 362\n __M_writer(u'\\t\\t\\t\\t')\n __M_writer(escape(typ))\n __M_writer(u'<br> ')\n __M_writer(escape(Desc))\n __M_writer(u'\\n')\n pass\n pass\n pass\n pass\n pass\n pass\n # SOURCE LINE 369\n __M_writer(u' <div class=weekend></div></td>\\n')\n pass\n # SOURCE LINE 371\n __M_writer(u' \\t\\t </tr>\\n')\n pass\n # SOURCE LINE 373\n __M_writer(u'\\t\\t \\n\\n\\t</tbody>\\n</table>\\n</div>\\n</body>\\n</html>\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context):\n context.caller_stack._push_frame()\n try:\n __M_writer = context.writer()\n # SOURCE LINE 4\n __M_writer(u'\\n Welcome DOE Local Finance Event and Due Date Calendar\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n" } ]
27
kumarabie/HttppostEs
https://github.com/kumarabie/HttppostEs
db4f7d60d080fa0f97ddb2c5521551d32de2539c
8fa3d3ac6b25876ef2d4592fc97baa3fa171bf8c
5f1998a252df5463cae501fa2fed568ef645125a
refs/heads/main
2023-04-01T06:47:14.676734
2021-04-09T12:34:33
2021-04-09T12:34:33
356,245,180
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7235772609710693, "alphanum_fraction": 0.7479674816131592, "avg_line_length": 25.33333396911621, "blob_id": "c110f4ba0cddbf9bdc487318415d9d197f59b347", "content_id": "9068cee420340d32a42479f86b5bca416dd83646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 246, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/Dockerfile", "repo_name": "kumarabie/HttppostEs", "src_encoding": "UTF-8", "text": "FROM ubuntu:latest\r\n\r\nRUN apt-get update && apt-get upgrade -y\r\nRUN apt-get -y install build-essential python3.6 python3.6-dev python3-pip libssl-dev git\r\n\r\nWORKDIR /home/elastalert\r\n\r\nADD requirement.txt ./\r\nRUN pip3 install -r requirement.txt\r\n" }, { "alpha_fraction": 0.5888111591339111, "alphanum_fraction": 0.590809166431427, "avg_line_length": 44.53953552246094, "blob_id": "30360510b58c76f5a047ff914f5e26152a26a1c2", "content_id": "21acd687652dc6880789ac5856ed6137878f9fe5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10010, "license_type": "no_license", "max_line_length": 122, "num_lines": 215, "path": "/HttpPostAlert.py", "repo_name": "kumarabie/HttppostEs", "src_encoding": "UTF-8", "text": "import copy\r\nimport json\r\nimport os\r\nimport requests\r\n\r\nfrom requests.exceptions import RequestException\r\nfrom staticconf.loader import yaml_loader\r\nfrom texttable import Texttable\r\nfrom .util import EAException\r\nfrom .util import elastalert_logger\r\nfrom .util import lookup_es_key\r\n\r\n\r\nclass DateTimeEncoder(json.JSONEncoder):\r\n def default(self, obj):\r\n if hasattr(obj, 'isoformat'):\r\n return obj.isoformat()\r\n else:\r\n return json.JSONEncoder.default(self, obj)\r\n\r\nclass HTTPPostAlerter(Alerter):\r\n \"\"\" Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. \"\"\"\r\n\r\n def __init__(self, rule):\r\n super(HTTPPostAlerter, self).__init__(rule)\r\n post_url = self.rule.get('http_post_url')\r\n if isinstance(post_url, str):\r\n post_url = [post_url]\r\n self.post_url = post_url\r\n self.post_proxy = self.rule.get('http_post_proxy')\r\n self.post_payload = self.rule.get('http_post_payload', {})\r\n self.post_static_payload = self.rule.get('http_post_static_payload', {})\r\n self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)\r\n self.post_http_headers = self.rule.get('http_post_headers', {})\r\n self.timeout = self.rule.get('http_post_timeout', 10)\r\n\r\n def alert(self, matches):\r\n \"\"\" Each match will trigger a POST to the specified endpoint(s). \"\"\"\r\n for match in matches:\r\n payload = match if self.post_all_values else {}\r\n payload.update(self.post_static_payload)\r\n for post_key, es_key in list(self.post_payload.items()):\r\n payload[post_key] = lookup_es_key(match, es_key)\r\n headers = {\r\n \"Content-Type\": \"application/json\",\r\n \"Accept\": \"application/json;charset=utf-8\"\r\n }\r\n headers.update(self.post_http_headers)\r\n proxies = {'https': self.post_proxy} if self.post_proxy else None\r\n for url in self.post_url:\r\n try:\r\n response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),\r\n headers=headers, proxies=proxies, timeout=self.timeout)\r\n response.raise_for_status()\r\n except RequestException as e:\r\n raise EAException(\"Error posting HTTP Post alert: %s\" % e)\r\n elastalert_logger.info(\"HTTP Post alert sent.\")\r\n\r\n def get_info(self):\r\n return {'type': 'http_post',\r\n 'http_post_webhook_url': self.post_url}\r\n\r\n\r\n class Alerter(object):\r\n \"\"\" Base class for types of alerts.\r\n\r\n :param rule: The rule configuration.\r\n \"\"\"\r\n required_options = frozenset([])\r\n\r\n def __init__(self, rule):\r\n self.rule = rule\r\n # pipeline object is created by ElastAlerter.send_alert()\r\n # and attached to each alerters used by a rule before calling alert()\r\n self.pipeline = None\r\n self.resolve_rule_references(self.rule)\r\n\r\n def resolve_rule_references(self, root):\r\n # Support referencing other top-level rule properties to avoid redundant copy/paste\r\n if type(root) == list:\r\n # Make a copy since we may be modifying the contents of the structure we're walking\r\n for i, item in enumerate(copy.copy(root)):\r\n if type(item) == dict or type(item) == list:\r\n self.resolve_rule_references(root[i])\r\n else:\r\n root[i] = self.resolve_rule_reference(item)\r\n elif type(root) == dict:\r\n # Make a copy since we may be modifying the contents of the structure we're walking\r\n for key, value in root.copy().items():\r\n if type(value) == dict or type(value) == list:\r\n self.resolve_rule_references(root[key])\r\n else:\r\n root[key] = self.resolve_rule_reference(value)\r\n\r\n def resolve_rule_reference(self, value):\r\n strValue = str(value)\r\n if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:\r\n if type(value) == int:\r\n return int(self.rule[strValue[1:-1]])\r\n else:\r\n return self.rule[strValue[1:-1]]\r\n else:\r\n return value\r\n\r\n def alert(self, match):\r\n \"\"\" Send an alert. Match is a dictionary of information about the alert.\r\n\r\n :param match: A dictionary of relevant information to the alert.\r\n \"\"\"\r\n raise NotImplementedError()\r\n\r\n def get_info(self):\r\n \"\"\" Returns a dictionary of data related to this alert. At minimum, this should contain\r\n a field type corresponding to the type of Alerter. \"\"\"\r\n return {'type': 'Unknown'}\r\n\r\n def create_title(self, matches):\r\n \"\"\" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.\r\n\r\n :param matches: A list of dictionaries of relevant information to the alert.\r\n \"\"\"\r\n if 'alert_subject' in self.rule:\r\n return self.create_custom_title(matches)\r\n\r\n return self.create_default_title(matches)\r\n\r\n def create_custom_title(self, matches):\r\n alert_subject = str(self.rule['alert_subject'])\r\n alert_subject_max_len = int(self.rule.get('alert_subject_max_len', 2048))\r\n\r\n if 'alert_subject_args' in self.rule:\r\n alert_subject_args = self.rule['alert_subject_args']\r\n alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]\r\n\r\n # Support referencing other top-level rule properties\r\n # This technically may not work if there is a top-level rule property with the same name\r\n # as an es result key, since it would have been matched in the lookup_es_key call above\r\n for i, subject_value in enumerate(alert_subject_values):\r\n if subject_value is None:\r\n alert_value = self.rule.get(alert_subject_args[i])\r\n if alert_value:\r\n alert_subject_values[i] = alert_value\r\n\r\n missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')\r\n alert_subject_values = [missing if val is None else val for val in alert_subject_values]\r\n alert_subject = alert_subject.format(*alert_subject_values)\r\n\r\n if len(alert_subject) > alert_subject_max_len:\r\n alert_subject = alert_subject[:alert_subject_max_len]\r\n\r\n return alert_subject\r\n\r\n def create_alert_body(self, matches):\r\n body = self.get_aggregation_summary_text(matches)\r\n if self.rule.get('alert_text_type') != 'aggregation_summary_only':\r\n for match in matches:\r\n body += str(BasicMatchString(self.rule, match))\r\n # Separate text of aggregated alerts with dashes\r\n if len(matches) > 1:\r\n body += '\\n----------------------------------------\\n'\r\n return body\r\n\r\n def get_aggregation_summary_text__maximum_width(self):\r\n \"\"\"Get maximum width allowed for summary text.\"\"\"\r\n return 80\r\n\r\n def get_aggregation_summary_text(self, matches):\r\n text = ''\r\n if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:\r\n text = self.rule.get('summary_prefix', '')\r\n summary_table_fields = self.rule['summary_table_fields']\r\n if not isinstance(summary_table_fields, list):\r\n summary_table_fields = [summary_table_fields]\r\n # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered\r\n summary_table_fields_with_count = summary_table_fields + ['count']\r\n text += \"Aggregation resulted in the following data for summary_table_fields ==> {0}:\\n\\n\".format(\r\n summary_table_fields_with_count\r\n )\r\n text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width())\r\n text_table.header(summary_table_fields_with_count)\r\n # Format all fields as 'text' to avoid long numbers being shown as scientific notation\r\n text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count])\r\n match_aggregation = {}\r\n\r\n # Maintain an aggregate count for each unique key encountered in the aggregation period\r\n for match in matches:\r\n key_tuple = tuple([str(lookup_es_key(match, key)) for key in summary_table_fields])\r\n if key_tuple not in match_aggregation:\r\n match_aggregation[key_tuple] = 1\r\n else:\r\n match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1\r\n for keys, count in match_aggregation.items():\r\n text_table.add_row([key for key in keys] + [count])\r\n text += text_table.draw() + '\\n\\n'\r\n text += self.rule.get('summary_prefix', '')\r\n return str(text)\r\n\r\n def create_default_title(self, matches):\r\n return self.rule['name']\r\n\r\n def get_account(self, account_file):\r\n \"\"\" Gets the username and password from an account file.\r\n\r\n :param account_file: Path to the file which contains user and password information.\r\n It can be either an absolute file path or one that is relative to the given rule.\r\n \"\"\"\r\n if os.path.isabs(account_file):\r\n account_file_path = account_file\r\n else:\r\n account_file_path = os.path.join(os.path.dirname(self.rule['rule_file']), account_file)\r\n account_conf = yaml_loader(account_file_path)\r\n if 'user' not in account_conf or 'password' not in account_conf:\r\n raise EAException('Account file must have user and password fields')\r\n self.user = account_conf['user']\r\n self.password = account_conf['password']\r\n\r\n\r\n" }, { "alpha_fraction": 0.45985400676727295, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 16.68181800842285, "blob_id": "25c6b53f0247be650b0a1fbb8d2833916eb7ce0a", "content_id": "305f6815e332f76cdfcb8be0d76ee493eaa8fbf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 411, "license_type": "no_license", "max_line_length": 29, "num_lines": 22, "path": "/requirement.txt", "repo_name": "kumarabie/HttppostEs", "src_encoding": "UTF-8", "text": "apscheduler>=3.3.0\r\naws-requests-auth>=0.3.0\r\nblist>=1.3.6\r\nboto3>=1.4.4\r\ncffi>=1.11.5\r\nconfigparser>=3.5.0\r\ncroniter>=0.3.16\r\nelasticsearch>=7.0.0\r\nenvparse>=0.2.0\r\nexotel>=0.1.3\r\njira>=1.0.10,<1.0.15\r\njsonschema>=3.0.2\r\nmock>=2.0.0\r\nprison>=0.1.2\r\npy-zabbix==1.1.3\r\nPyStaticConfiguration>=0.10.3\r\npython-dateutil>=2.6.0,<2.7.0\r\nPyYAML>=5.1\r\nrequests>=2.0.0\r\nstomp.py>=4.1.17\r\ntexttable>=0.8.8\r\ntwilio==6.0.0\r\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 13, "blob_id": "9f3cbdaad5f110d2e6365eee3d12a3b0f3e7f5c3", "content_id": "30a7d7139534000e39a6c331089cb47bcf6127c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "no_license", "max_line_length": 40, "num_lines": 4, "path": "/README.md", "repo_name": "kumarabie/HttppostEs", "src_encoding": "UTF-8", "text": "# HttppostEs\n\n\nFor HTTP post Alerting in Elastic search\n" } ]
4
danielkag2000/ML_voice
https://github.com/danielkag2000/ML_voice
1fdd8a58b7a21e74d1ac15d70e129919ff869362
d5eb888a4f60d7bb11a2e2a2a582eedce800a024
2dbf8a91ee40b737bc55a7e91ceec8438db1f958
refs/heads/master
2020-07-03T05:11:49.989800
2019-08-11T17:29:45
2019-08-11T17:29:45
201,794,646
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7900552749633789, "alphanum_fraction": 0.7955800890922546, "avg_line_length": 59.33333206176758, "blob_id": "d40704ba9c6e4fd71cb0399874e62326c730cc99", "content_id": "b26104498aa62df2d2f7f6426ef5c2ba779d1c34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 87, "num_lines": 3, "path": "/README.md", "repo_name": "danielkag2000/ML_voice", "src_encoding": "UTF-8", "text": "# ML_voice\nby using voice image, the model learn to identify the diffrent voice image by using CNN\nthe data_set can be downloaded from https://github.com/orsanawwad/ML4_dataset.git\n" }, { "alpha_fraction": 0.6624037027359009, "alphanum_fraction": 0.677517831325531, "avg_line_length": 24.391143798828125, "blob_id": "4061e8acdfe64f6db5f48428dee7e23fa1ad5609", "content_id": "e7c2ea1d8074634f73506bbf6c50c8203c91f2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6881, "license_type": "no_license", "max_line_length": 82, "num_lines": 271, "path": "/main.py", "repo_name": "danielkag2000/ML_voice", "src_encoding": "UTF-8", "text": "from gcommand_loader import GCommandLoader\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nfrom sys import argv\n\nlabel_num = 30\nwindow_size = 3\nepochs = 100\nseed = 19\nmodule_file = \"cnn_data\"\ndebug = True\n\n\ndef create_conv2d(size_in, ch_in, ch_out, ker, stride=1, device=None):\n\t\"\"\"\n\tCreates a convolution instance with the given parameters and\n\treturns the new output size based on the parameters\n\t:param size_in: the input size\n\t:param ch_in: the number of input channels\n\t:param ch_out: the number of output channels\n\t:param ker: the kernel size\n\t:param stride: the stride\n\t:param device: the device\n\t:return:\n\t\t\tconv: the matrix of the conv\n\t\t\tsize_new: the new size of the output\n\t\"\"\"\n\t# make the conv\n\tconv = nn.Conv2d(ch_in, ch_out, kernel_size=ker, stride=stride)\n\t# to run on a device\n\tif device:\n\t\tconv = conv.to(device)\n\n\t# calculate the new size\n\tsize_new = [(size - ker) // stride + 1 for size in size_in]\n\n\treturn conv, size_new\n\n\nclass CNN(nn.Module):\n\tdef __init__(self, vid_size, device):\n\t\t\"\"\"\n\t\t:param vid_size: the input size\n\t\t:param device: the device to run on\n\t\t\"\"\"\n\t\tsuper(CNN, self).__init__()\n\n\t\t# create conv1\n\t\tself.conv1, size = create_conv2d(vid_size, 1, 6, 10, device=device)\n\n\t\t# for the pooling\n\t\tsize = [s // window_size for s in size]\n\t\t# create conv2\n\t\tself.conv2, size = create_conv2d(size, 6, 16, 5, device=device)\n\n\t\t# for the pooling\n\t\tsize = [s // window_size for s in size]\n\n\t\t# define the layers (to be linear)\n\t\t# first (the size after conv2 and second pooling X 120)\n\t\tself.fc0 = nn.Linear(size[0] * size[1] * 16, 120).to(device)\n\n\t\t# second (120 X 84)\n\t\tself.fc1 = nn.Linear(120, 84).to(device)\n\n\t\t# third (84 X label_num=30)\n\t\tself.fc2 = nn.Linear(84, label_num).to(device)\n\n\tdef forward(self, x):\n\t\t\"\"\"\n\t\tDo forward propagation\n\t\t:param x the example\n\t\t:return: the output of the last layer (with softmax)\n\t\t\"\"\"\n\t\t# Max pooling over a (2, 2) window\n\t\tx = F.max_pool2d(F.relu(self.conv1(x)), window_size)\n\t\t# If the size is a square you can only specify a single number\n\t\tx = F.max_pool2d(F.relu(self.conv2(x)), window_size)\n\t\tx = x.view(-1, self.get_vector_size(x))\n\t\tx = F.relu(self.fc0(x))\n\t\tx = F.relu(self.fc1(x))\n\t\tx = self.fc2(x)\n\t\t# return F.log_softmax(x, dim=1)\n\t\treturn x\n\n\t@staticmethod\n\tdef get_vector_size(x):\n\t\t\"\"\"\n\t\tGet the vector size\n\t\t:param x the example\n\t\t:return: the size of the flat vector (the size of the 1D array of the vector)\n\t\t\"\"\"\n\t\tnumber_of_features = 1\n\t\tsize = x.size()[1:] # all dimensions except the batch dimension\n\t\tfor s in size:\n\t\t\tnumber_of_features *= s\n\t\treturn number_of_features\n\n\ndef train(n_epochs, model, train_loader, optimizer, device):\n\t\"\"\"\n\tTrain the model\n\t:param n_epochs the number of epochs to train with the dataset\n\t:param model the model\n\t:param train_loader the train loader\n\t:param optimizer the optimizer\n\t:param device the device\n\t\"\"\"\n\tif debug:\n\t\tprint(\"Started Training\")\n\t# run epochs times\n\tfor epoch in range(n_epochs):\n\t\tif debug:\n\t\t\tprint(\"epoch:\", epoch)\n\t\t# train the model\n\t\tmodel.train()\n\t\tfor batch_idx, (data, labels) in enumerate(train_loader):\n\t\t\tdata = data.to(device)\n\t\t\tlabels = labels.to(device)\n\t\t\toptimizer.zero_grad()\n\t\t\toutput = model(data)\n\t\t\t# loss = F.nll_loss(output, labels)\n\t\t\tloss = F.cross_entropy(output, labels)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\tif debug:\n\t\tprint(\"Finished Training\")\n\n\ndef load_module(model):\n\t\"\"\"\n\tLoads a model from the file\n\t:param model: te model itself\n\t:return: nothing, void\n\t\"\"\"\n\tif os.path.isfile(module_file):\n\t\tif debug:\n\t\t\tprint(\"Started Loading Module\")\n\t\tmodel.load_state_dict(torch.load(module_file))\n\t\tif debug:\n\t\t\tprint(\"Finished Loading Module\")\n\telse:\n\t\tprint(\"Couldn't Load Module, File '{}' Doesn't Exists\".format(load_module))\n\t\texit()\n\n\ndef validate(model, val_loader, device):\n\t\"\"\"\n\tValidate the model\n\t:param model the model\n\t:param val_loader the validation loader\n\t:param device the device\n\t\"\"\"\n\tif debug:\n\t\tprint(\"Started Validating\")\n\n\tcorrect = 0\n\ttotal = 0\n\n\tfor i, (inputs, labels) in enumerate(val_loader):\n\t\tinputs, labels = inputs.to(device), labels.to(device)\n\t\toutput = model(inputs)\n\t\tcorrect += output.argmax(dim=1).eq(labels).sum().item()\n\t\ttotal += len(output)\n\n\tif debug:\n\t\tprint(\"Finished Validating\")\n\tprint('Accuracy: {}/{} ({:.0f}%)'.format(correct, total, 100. * correct / total))\n\n\treturn 100. * correct / total\n\n\ndef test(model, test_loader, output_file):\n\t\"\"\"\n\tValidate the model\n\t:param model: the model\n\t:param test_loader: the testing loader\n\t:param output_file: the output file to write all the classifications\n\t\"\"\"\n\tif debug:\n\t\tprint(\"Started Testing\")\n\tmodel.to(\"cpu\")\n\tmodel.eval()\n\t# get the paths to all the testing files\n\tpaths = test_loader.dataset.spects\n\tpaths = [os.path.basename(p[0]) for p in paths]\n\t# get the files themselves\n\tinputs = [x[0] for x in test_loader]\n\t# if it's the first one to test\n\tfirst = True\n\n\t# open the test output file\n\twith open(output_file, \"w\") as f:\n\t\t# go over each input file\n\t\tfor x, path in zip(inputs, paths):\n\t\t\t# do classification\n\t\t\tvalue, classification = torch.max(model.forward(x), 1)\n\t\t\t# write the classification into the output file\n\t\t\tif not first:\n\t\t\t\tf.write(\"\\n\")\n\t\t\tf.write(path + \", \" + str(classification.item()))\n\t\t\tfirst = False\n\n\tif debug:\n\t\tprint(\"Finished Testing\")\n\n\ndef main():\n\tnp.random.seed(seed)\n\ttorch.manual_seed(seed)\n\n\t# get the data set\n\tdataset = GCommandLoader('./data/train')\n\n\t# create the train loader\n\ttrain_loader = torch.utils.data.DataLoader(\n\t\tdataset, batch_size=100, shuffle=True,\n\t\tnum_workers=8, pin_memory=True, sampler=None)\n\n\t# get the validation data set\n\tvalid_set = GCommandLoader('./data/valid')\n\n\t# create the validation loader\n\tvalid_loader = torch.utils.data.DataLoader(\n\t\tvalid_set, batch_size=100, shuffle=False,\n\t\tnum_workers=8, pin_memory=True, sampler=None)\n\n\t# get the validation data set\n\ttest_set = GCommandLoader('./data/test')\n\n\t# create the validation loader\n\ttest_loader = torch.utils.data.DataLoader(\n\t\ttest_set, batch_size=1, shuffle=False,\n\t\tnum_workers=8, pin_memory=True, sampler=None)\n\n\t# use cuda (for running on the gpu)\n\tcuda = torch.cuda.is_available()\n\tdevice = torch.device(\"cuda:0\" if cuda else \"cpu\")\n\n\t# create the model\n\tmodel = CNN([161, 101], device)\n\t# model = CNN(device)\n\n\t# create the optimizer\n\t# optimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\toptimizer = optim.Adam(model.parameters())\n\n\t# -- TRAIN -- #\n\tif len(argv) <= 1 or argv[1] == \"train\":\n\t\ttrain(epochs, model, train_loader, optimizer, device)\n\t\t# -- SAVE -- #\n\t\ttorch.save(model.state_dict(), module_file)\n\telse:\n\t\tload_module(model)\n\n\t# -- VALIDATION -- #\n\tvalidate(model, valid_loader, device)\n\n\t# -- TEST -- #\n\tif len(argv) <= 1 or argv[1] == \"test\":\n\t\ttest(model, test_loader, \"test_y\")\n\n\nif __name__ == \"__main__\":\n\tprint(\"Started\")\n\tmain()\n\tprint(\"Fnished\")\n" } ]
2
YuPo13/py_fcsv
https://github.com/YuPo13/py_fcsv
bf16dff9cb0e586a75ac26d37e68100a745f364b
4f7d3e9d0a4ba051fa8dcb47f532cbf5cb40a3aa
96364864ee715363cc2ea12ce573ec6da891f997
refs/heads/master
2020-09-22T23:42:02.647257
2019-12-03T15:18:51
2019-12-03T15:18:51
225,345,799
0
0
null
2019-12-02T10:19:18
2019-12-02T05:50:53
2019-12-02T10:17:53
null
[ { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 27, "blob_id": "1d76bf29d73c824af7887bcc357db49a1364e444", "content_id": "fcc8253bce44b23e9436ea729c7305cf99dec059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/fcsv.py", "repo_name": "YuPo13/py_fcsv", "src_encoding": "UTF-8", "text": "\"\"\"This program collects quantitative product data (price, q-ty)\n and calculates the total value\"\"\"\nimport csv\n\n\ndef calc_price(filename, open_=open):\n \"\"\"Find the total value of all products\"\"\"\n result = 0\n csv_file = open_(filename, \"r\")\n csv_reader = csv.reader(csv_file, delimiter=',')\n for line in csv_reader:\n result += float(line[1]) * float(line[2])\n filename.close()\n\n return result\n" } ]
1
TDL3/country-flags-helper
https://github.com/TDL3/country-flags-helper
a77dd28cb9e8b74e357d2135a95410455d64ec31
22c607b5298e8638ea092b8b1b1095c7f0b3e0cc
e8409a29d34f62f35a976654ac75be906cb3d276
refs/heads/main
2023-02-18T14:38:35.186520
2021-01-04T13:11:11
2021-01-04T13:11:11
326,686,238
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5917207598686218, "alphanum_fraction": 0.5949675440788269, "avg_line_length": 31.3157901763916, "blob_id": "a721436335456124f65892e065c322647ad45a90", "content_id": "65d654b6b9763a3f12be2beed00f91ad0fd88600", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1232, "license_type": "no_license", "max_line_length": 88, "num_lines": 38, "path": "/rename.py", "repo_name": "TDL3/country-flags-helper", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n\nimport re\nimport os\n\n# parse ISO country codes table\ncodes_dict = {}\nwith open(\"iso.txt\", \"r\") as codes:\n country_name_regex = r\"^.*\\t\\t\"\n country_code_regex = r\"\\t\\w{2}\"\n for line in codes.readlines():\n country_name = re.findall(country_name_regex, line)[0].strip()\n country_code = re.findall(country_code_regex, line)[0].strip()\n codes_dict[country_code] = country_name\n\n# Convert country names to capitalized names\n# For instance, UNITED STATES OF AMERICA --> United States of America\nfor key, value in codes_dict.items():\n elements = value.split()\n capitalized_name = \"\"\n for e in elements:\n if e == \"AND\":\n e = \"and\"\n elif e == \"OF\":\n e = \"of\"\n else:\n e = e.capitalize()\n capitalized_name += e + \" \"\n codes_dict[key] = capitalized_name\n\n# Rename\nflag_path = \"./svg/\"\nflags = [f for f in os.listdir(flag_path) if os.path.isfile(os.path.join(flag_path, f))]\nfor flag in flags:\n country_code = flag.split(\".\")[0].upper()\n new_name = codes_dict[country_code] + \" \" + country_code + \".svg\"\n os.rename(flag_path + flag, flag_path + new_name)\n print(\"Renamed: \" + flag + \" --> \" + new_name)\n " }, { "alpha_fraction": 0.8040540814399719, "alphanum_fraction": 0.8040540814399719, "avg_line_length": 48.66666793823242, "blob_id": "b388dd9d82f902223306213d2b85da62fecb97b7", "content_id": "afa0c6079be8e6471c452d351680a99758c4c648", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 148, "license_type": "no_license", "max_line_length": 133, "num_lines": 3, "path": "/README.md", "repo_name": "TDL3/country-flags-helper", "src_encoding": "UTF-8", "text": "# Flag Helper\n\nThis helper convert ISO country codes to human-friendly country names for [country-flags](https://github.com/hjnilsson/country-flags)" } ]
2
krezreb/openapi-client-clevercloud
https://github.com/krezreb/openapi-client-clevercloud
b8e8454e571d0e08eae6ea5a83a129b1928c085c
e42e5380c6fd4003cdfbdccda89f9dd5a3c76754
61d888900947129abd1cbb1a7d99296ca6c31dcf
refs/heads/master
2023-05-06T19:28:16.547835
2021-05-28T08:31:35
2021-05-28T08:31:35
371,631,524
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5275715589523315, "alphanum_fraction": 0.5275715589523315, "avg_line_length": 44.95121765136719, "blob_id": "a6b83fdee5eea8bd442c4cd8145e9db976da9147", "content_id": "7e044a27d6a1ce8a4df19acb9ad3378440811967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1886, "license_type": "no_license", "max_line_length": 168, "num_lines": 41, "path": "/docs/InvoiceRendering.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# InvoiceRendering\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**number** | **str** | | [optional] \n**status** | **str** | | [optional] \n**emission_date** | **str** | | [optional] \n**payment_date** | **str** | | [optional] \n**name** | **str** | | [optional] \n**company** | **str** | | [optional] \n**target** | **str** | | [optional] \n**address** | **str** | | [optional] \n**zip_code** | **str** | | [optional] \n**city** | **str** | | [optional] \n**country** | **str** | | [optional] \n**country_code** | **str** | | [optional] \n**vat_number** | **str** | | [optional] \n**from_subscription** | **bool** | | [optional] \n**lines** | [**[InvoiceLineRendering]**](InvoiceLineRendering.md) | | [optional] \n**original_total** | **float** | | [optional] \n**total_ht** | **float** | | [optional] \n**total_vat** | **float** | | [optional] \n**total_ttc** | **float** | | [optional] \n**type** | **str** | | [optional] \n**paying_user** | [**OrganisationMemberView**](OrganisationMemberView.md) | | [optional] \n**error_code** | **str** | | [optional] \n**error_short_msg** | **str** | | [optional] \n**error_long_msg** | **str** | | [optional] \n**token** | **str** | | [optional] \n**target_id** | **str** | | [optional] \n**for_id** | **str** | | [optional] \n**customer_order_id** | **str** | | [optional] \n**customer_cost_center** | **str** | | [optional] \n**vat_rate** | **float** | | [optional] \n**pay_when** | **str** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.6620630621910095, "alphanum_fraction": 0.6647564172744751, "avg_line_length": 28.3260498046875, "blob_id": "7d9cc9c7e0fb914c78662dd2969d9ffe774f20a1", "content_id": "4becc2f7deec42b2deef29d9be37293b4fc8a504", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 17450, "license_type": "no_license", "max_line_length": 180, "num_lines": 595, "path": "/docs/PaymentApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.PaymentApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**check_vat**](PaymentApi.md#check_vat) | **GET** /vat_check | \n[**end_payment_with_stripe**](PaymentApi.md#end_payment_with_stripe) | **POST** /payments/{bid}/end/stripe | \n[**get_available_payment_providers**](PaymentApi.md#get_available_payment_providers) | **GET** /payments/providers | \n[**get_coupon**](PaymentApi.md#get_coupon) | **GET** /payments/coupons/{name} | \n[**get_invoice_status_button**](PaymentApi.md#get_invoice_status_button) | **GET** /payments/assets/pay_button/{token}/button.png | \n[**get_stripe_token**](PaymentApi.md#get_stripe_token) | **GET** /payments/tokens/stripe | \n[**stripe_sepa_webhook**](PaymentApi.md#stripe_sepa_webhook) | **POST** /payments/webhooks/stripe/sepa | \n[**update_stripe_payment**](PaymentApi.md#update_stripe_payment) | **PUT** /payments/{bid}/end/stripe | \n[**validate**](PaymentApi.md#validate) | **GET** /validation/vat/{key} | \n\n\n# **check_vat**\n> VatResult check_vat()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.vat_result import VatResult\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n country = \"country_example\" # str | (optional)\n vat = \"vat_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.check_vat(country=country, vat=vat)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->check_vat: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **country** | **str**| | [optional]\n **vat** | **str**| | [optional]\n\n### Return type\n\n[**VatResult**](VatResult.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **end_payment_with_stripe**\n> InvoiceRendering end_payment_with_stripe(bid, payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.stripe_confirmation_error_message import StripeConfirmationErrorMessage\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n bid = \"bid_example\" # str | \n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.end_payment_with_stripe(bid, payment_data)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->end_payment_with_stripe: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n**402** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_available_payment_providers**\n> [PaymentProviderView] get_available_payment_providers()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.payment_provider_view import PaymentProviderView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_available_payment_providers()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->get_available_payment_providers: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[PaymentProviderView]**](PaymentProviderView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_coupon**\n> CouponView get_coupon(name)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.coupon_view import CouponView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n name = \"name_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_coupon(name)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->get_coupon: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **name** | **str**| |\n\n### Return type\n\n[**CouponView**](CouponView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_invoice_status_button**\n> get_invoice_status_button(token)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n token = \"token_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_invoice_status_button(token)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->get_invoice_status_button: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **token** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: image/png\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_stripe_token**\n> BraintreeToken get_stripe_token()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_stripe_token()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->get_stripe_token: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**BraintreeToken**](BraintreeToken.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **stripe_sepa_webhook**\n> stripe_sepa_webhook()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n stripe_signature = \"Stripe-Signature_example\" # str | (optional)\n body = \"body_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.stripe_sepa_webhook(stripe_signature=stripe_signature, body=body)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->stripe_sepa_webhook: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **stripe_signature** | **str**| | [optional]\n **body** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_stripe_payment**\n> InvoiceRendering update_stripe_payment(bid, setup_intent_view)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.setup_intent_view import SetupIntentView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n bid = \"bid_example\" # str | \n setup_intent_view = SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ) # SetupIntentView | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_stripe_payment(bid, setup_intent_view)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->update_stripe_payment: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **setup_intent_view** | [**SetupIntentView**](SetupIntentView.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **validate**\n> Message validate(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import payment_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = payment_api.PaymentApi(api_client)\n key = \"key_example\" # str | \n action = \"action_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.validate(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->validate: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.validate(key, action=action)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling PaymentApi->validate: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n **action** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.7127862572669983, "alphanum_fraction": 0.7213740348815918, "avg_line_length": 25.200000762939453, "blob_id": "3d4b62e74c4953103f250eee307ba7e12aadb4e7", "content_id": "9169fda09d5df934e67acc39c2f9958d1d4d8cdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 80, "num_lines": 40, "path": "/test/test_invoice_rendering.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.invoice_line_rendering import InvoiceLineRendering\nfrom openapi_client.model.organisation_member_view import OrganisationMemberView\nglobals()['InvoiceLineRendering'] = InvoiceLineRendering\nglobals()['OrganisationMemberView'] = OrganisationMemberView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\n\n\nclass TestInvoiceRendering(unittest.TestCase):\n \"\"\"InvoiceRendering unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testInvoiceRendering(self):\n \"\"\"Test InvoiceRendering\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = InvoiceRendering() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6649178266525269, "alphanum_fraction": 0.6675153374671936, "avg_line_length": 28.635671615600586, "blob_id": "d30b41e74c7abc7491aadb89071018c816d5ca36", "content_id": "c8f4bd8a20f8f9b89e2a1ba8f7f246adc9c0225c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38883, "license_type": "no_license", "max_line_length": 180, "num_lines": 1312, "path": "/docs/ProductsApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.ProductsApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**bill_owner**](ProductsApi.md#bill_owner) | **POST** /vendor/apps/{addonId}/consumptions | \n[**edit_application_configuration**](ProductsApi.md#edit_application_configuration) | **PUT** /vendor/apps/{addonId} | \n[**end_addon_migration**](ProductsApi.md#end_addon_migration) | **PUT** /vendor/apps/{addonId}/migration_callback | \n[**get_addon_provider**](ProductsApi.md#get_addon_provider) | **GET** /products/addonproviders/{provider_id} | \n[**get_addon_provider_infos**](ProductsApi.md#get_addon_provider_infos) | **GET** /products/addonproviders/{provider_id}/informations | \n[**get_addon_provider_versions**](ProductsApi.md#get_addon_provider_versions) | **GET** /products/addonproviders/{provider_id}/versions | \n[**get_addon_providers**](ProductsApi.md#get_addon_providers) | **GET** /products/addonproviders | \n[**get_application_info**](ProductsApi.md#get_application_info) | **GET** /vendor/apps/{addonId} | \n[**get_available_instances**](ProductsApi.md#get_available_instances) | **GET** /products/instances | \n[**get_available_packages**](ProductsApi.md#get_available_packages) | **GET** /products/packages | \n[**get_countries**](ProductsApi.md#get_countries) | **GET** /products/countries | \n[**get_country_codes**](ProductsApi.md#get_country_codes) | **GET** /products/countrycodes | \n[**get_excahnge_rates**](ProductsApi.md#get_excahnge_rates) | **GET** /products/prices | \n[**get_flavors**](ProductsApi.md#get_flavors) | **GET** /products/flavors | \n[**get_instance**](ProductsApi.md#get_instance) | **GET** /products/instances/{type}-{version} | \n[**get_mfa_kinds**](ProductsApi.md#get_mfa_kinds) | **GET** /products/mfa_kinds | \n[**get_zones**](ProductsApi.md#get_zones) | **GET** /products/zones | \n[**list_apps**](ProductsApi.md#list_apps) | **GET** /vendor/apps | \n[**logscollector**](ProductsApi.md#logscollector) | **GET** /vendor/apps/{addonId}/logscollector | \n[**provision_other_addon**](ProductsApi.md#provision_other_addon) | **POST** /vendor/addons | \n\n\n# **bill_owner**\n> bill_owner(addon_id, wannabe_addon_billing)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.wannabe_addon_billing import WannabeAddonBilling\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n addon_id = \"addonId_example\" # str | \n wannabe_addon_billing = WannabeAddonBilling(\n cost=3.14,\n ) # WannabeAddonBilling | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.bill_owner(addon_id, wannabe_addon_billing)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->bill_owner: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **wannabe_addon_billing** | [**WannabeAddonBilling**](WannabeAddonBilling.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_application_configuration**\n> AddonView edit_application_configuration(addon_id, wannabe_addon_config)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.wannabe_addon_config import WannabeAddonConfig\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n addon_id = \"addonId_example\" # str | \n wannabe_addon_config = WannabeAddonConfig(\n config={\n \"key\": \"key_example\",\n },\n ) # WannabeAddonConfig | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_application_configuration(addon_id, wannabe_addon_config)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->edit_application_configuration: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **wannabe_addon_config** | [**WannabeAddonConfig**](WannabeAddonConfig.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **end_addon_migration**\n> AddonView end_addon_migration(addon_id, wannabe_addon_config)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.wannabe_addon_config import WannabeAddonConfig\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n addon_id = \"addonId_example\" # str | \n wannabe_addon_config = WannabeAddonConfig(\n config={\n \"key\": \"key_example\",\n },\n ) # WannabeAddonConfig | \n plan_id = \"plan_id_example\" # str | (optional)\n region = \"region_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.end_addon_migration(addon_id, wannabe_addon_config)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->end_addon_migration: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.end_addon_migration(addon_id, wannabe_addon_config, plan_id=plan_id, region=region)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->end_addon_migration: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **wannabe_addon_config** | [**WannabeAddonConfig**](WannabeAddonConfig.md)| |\n **plan_id** | **str**| | [optional]\n **region** | **str**| | [optional]\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_provider**\n> AddonProviderInfoFullView get_addon_provider(provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n provider_id = \"provider_id_example\" # str | \n orga_id = \"orgaId_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_provider(provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_addon_provider: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_addon_provider(provider_id, orga_id=orga_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_addon_provider: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **provider_id** | **str**| |\n **orga_id** | **str**| | [optional]\n\n### Return type\n\n[**AddonProviderInfoFullView**](AddonProviderInfoFullView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_provider_infos**\n> str get_addon_provider_infos(provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n provider_id = \"provider_id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_provider_infos(provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_addon_provider_infos: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **provider_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_provider_versions**\n> str get_addon_provider_versions(provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n provider_id = \"provider_id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_provider_versions(provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_addon_provider_versions: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **provider_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_providers**\n> [AddonProviderInfoFullView] get_addon_providers()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n orga_id = \"orgaId_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_addon_providers(orga_id=orga_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_addon_providers: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **orga_id** | **str**| | [optional]\n\n### Return type\n\n[**[AddonProviderInfoFullView]**](AddonProviderInfoFullView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_info**\n> AddonApplicationInfo get_application_info(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.addon_application_info import AddonApplicationInfo\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_info(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_application_info: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**AddonApplicationInfo**](AddonApplicationInfo.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_available_instances**\n> [AvailableInstanceView] get_available_instances()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.available_instance_view import AvailableInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n _for = \"for_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_available_instances(_for=_for)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_available_instances: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **_for** | **str**| | [optional]\n\n### Return type\n\n[**[AvailableInstanceView]**](AvailableInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_available_packages**\n> [PackageView] get_available_packages()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.package_view import PackageView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n coupon = \"coupon_example\" # str | (optional)\n orga_id = \"orgaId_example\" # str | (optional)\n currency = \"currency_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_available_packages(coupon=coupon, orga_id=orga_id, currency=currency)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_available_packages: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **coupon** | **str**| | [optional]\n **orga_id** | **str**| | [optional]\n **currency** | **str**| | [optional]\n\n### Return type\n\n[**[PackageView]**](PackageView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_countries**\n> str get_countries()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_countries()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_countries: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_country_codes**\n> str get_country_codes()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_country_codes()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_country_codes: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_excahnge_rates**\n> [DropPriceView] get_excahnge_rates()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.drop_price_view import DropPriceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_excahnge_rates()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_excahnge_rates: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[DropPriceView]**](DropPriceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_flavors**\n> [FlavorView] get_flavors()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.flavor_view import FlavorView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n context = \"context_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_flavors(context=context)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_flavors: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **context** | **str**| | [optional]\n\n### Return type\n\n[**[FlavorView]**](FlavorView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_instance**\n> AvailableInstanceView get_instance(type, version)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.available_instance_view import AvailableInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n type = \"type_example\" # str | \n version = \"version_example\" # str | \n _for = \"for_example\" # str | (optional)\n app = \"app_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_instance(type, version)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_instance: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_instance(type, version, _for=_for, app=app)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_instance: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **type** | **str**| |\n **version** | **str**| |\n **_for** | **str**| | [optional]\n **app** | **str**| | [optional]\n\n### Return type\n\n[**AvailableInstanceView**](AvailableInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_mfa_kinds**\n> [str] get_mfa_kinds()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_mfa_kinds()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_mfa_kinds: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_zones**\n> [ZoneView] get_zones()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.zone_view import ZoneView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_zones()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->get_zones: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[ZoneView]**](ZoneView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **list_apps**\n> [AddonApplicationSummary] list_apps()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.addon_application_summary import AddonApplicationSummary\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n offset = 1 # int | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.list_apps(offset=offset)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->list_apps: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **offset** | **int**| | [optional]\n\n### Return type\n\n[**[AddonApplicationSummary]**](AddonApplicationSummary.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **logscollector**\n> logscollector(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.logscollector(addon_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->logscollector: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **provision_other_addon**\n> provision_other_addon(wannabe_inter_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import products_api\nfrom openapi_client.model.wannabe_inter_addon_provision import WannabeInterAddonProvision\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = products_api.ProductsApi(api_client)\n wannabe_inter_addon_provision = WannabeInterAddonProvision(\n organisation_id=\"organisation_id_example\",\n user_id=\"user_id_example\",\n provider_id=\"provider_id_example\",\n addon_id=\"addon_id_example\",\n plan=\"plan_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n ) # WannabeInterAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.provision_other_addon(wannabe_inter_addon_provision)\n except openapi_client.ApiException as e:\n print(\"Exception when calling ProductsApi->provision_other_addon: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_inter_addon_provision** | [**WannabeInterAddonProvision**](WannabeInterAddonProvision.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.4329165816307068, "alphanum_fraction": 0.43402454257011414, "avg_line_length": 35.25539016723633, "blob_id": "bdaf91b25779ae23f7e41d1989f818037424b53c", "content_id": "89751f73c082d177f8e0c9db2127199e5c29291c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85744, "license_type": "no_license", "max_line_length": 107, "num_lines": 2365, "path": "/openapi_client/api/products_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.addon_application_info import AddonApplicationInfo\nfrom openapi_client.model.addon_application_summary import AddonApplicationSummary\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.available_instance_view import AvailableInstanceView\nfrom openapi_client.model.drop_price_view import DropPriceView\nfrom openapi_client.model.flavor_view import FlavorView\nfrom openapi_client.model.package_view import PackageView\nfrom openapi_client.model.wannabe_addon_billing import WannabeAddonBilling\nfrom openapi_client.model.wannabe_addon_config import WannabeAddonConfig\nfrom openapi_client.model.wannabe_inter_addon_provision import WannabeInterAddonProvision\nfrom openapi_client.model.zone_view import ZoneView\n\n\nclass ProductsApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __bill_owner(\n self,\n addon_id,\n wannabe_addon_billing,\n **kwargs\n ):\n \"\"\"bill_owner # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.bill_owner(addon_id, wannabe_addon_billing, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n wannabe_addon_billing (WannabeAddonBilling):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_addon_billing'] = \\\n wannabe_addon_billing\n return self.call_with_http_info(**kwargs)\n\n self.bill_owner = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/vendor/apps/{addonId}/consumptions',\n 'operation_id': 'bill_owner',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'wannabe_addon_billing',\n ],\n 'required': [\n 'addon_id',\n 'wannabe_addon_billing',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'wannabe_addon_billing':\n (WannabeAddonBilling,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'wannabe_addon_billing': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__bill_owner\n )\n\n def __edit_application_configuration(\n self,\n addon_id,\n wannabe_addon_config,\n **kwargs\n ):\n \"\"\"edit_application_configuration # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_application_configuration(addon_id, wannabe_addon_config, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n wannabe_addon_config (WannabeAddonConfig):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_addon_config'] = \\\n wannabe_addon_config\n return self.call_with_http_info(**kwargs)\n\n self.edit_application_configuration = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/vendor/apps/{addonId}',\n 'operation_id': 'edit_application_configuration',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'wannabe_addon_config',\n ],\n 'required': [\n 'addon_id',\n 'wannabe_addon_config',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'wannabe_addon_config':\n (WannabeAddonConfig,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'wannabe_addon_config': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_application_configuration\n )\n\n def __end_addon_migration(\n self,\n addon_id,\n wannabe_addon_config,\n **kwargs\n ):\n \"\"\"end_addon_migration # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.end_addon_migration(addon_id, wannabe_addon_config, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n wannabe_addon_config (WannabeAddonConfig):\n\n Keyword Args:\n plan_id (str): [optional]\n region (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_addon_config'] = \\\n wannabe_addon_config\n return self.call_with_http_info(**kwargs)\n\n self.end_addon_migration = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/vendor/apps/{addonId}/migration_callback',\n 'operation_id': 'end_addon_migration',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'wannabe_addon_config',\n 'plan_id',\n 'region',\n ],\n 'required': [\n 'addon_id',\n 'wannabe_addon_config',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'wannabe_addon_config':\n (WannabeAddonConfig,),\n 'plan_id':\n (str,),\n 'region':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n 'plan_id': 'plan_id',\n 'region': 'region',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'wannabe_addon_config': 'body',\n 'plan_id': 'query',\n 'region': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__end_addon_migration\n )\n\n def __get_addon_provider(\n self,\n provider_id,\n **kwargs\n ):\n \"\"\"get_addon_provider # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_provider(provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n provider_id (str):\n\n Keyword Args:\n orga_id (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderInfoFullView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_provider = _Endpoint(\n settings={\n 'response_type': (AddonProviderInfoFullView,),\n 'auth': [],\n 'endpoint_path': '/products/addonproviders/{provider_id}',\n 'operation_id': 'get_addon_provider',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'provider_id',\n 'orga_id',\n ],\n 'required': [\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'provider_id':\n (str,),\n 'orga_id':\n (str,),\n },\n 'attribute_map': {\n 'provider_id': 'provider_id',\n 'orga_id': 'orgaId',\n },\n 'location_map': {\n 'provider_id': 'path',\n 'orga_id': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_provider\n )\n\n def __get_addon_provider_infos(\n self,\n provider_id,\n **kwargs\n ):\n \"\"\"get_addon_provider_infos # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_provider_infos(provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_provider_infos = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/products/addonproviders/{provider_id}/informations',\n 'operation_id': 'get_addon_provider_infos',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'provider_id',\n ],\n 'required': [\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'provider_id': 'provider_id',\n },\n 'location_map': {\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_provider_infos\n )\n\n def __get_addon_provider_versions(\n self,\n provider_id,\n **kwargs\n ):\n \"\"\"get_addon_provider_versions # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_provider_versions(provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_provider_versions = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/products/addonproviders/{provider_id}/versions',\n 'operation_id': 'get_addon_provider_versions',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'provider_id',\n ],\n 'required': [\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'provider_id': 'provider_id',\n },\n 'location_map': {\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_provider_versions\n )\n\n def __get_addon_providers(\n self,\n **kwargs\n ):\n \"\"\"get_addon_providers # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_providers(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n orga_id (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonProviderInfoFullView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_providers = _Endpoint(\n settings={\n 'response_type': ([AddonProviderInfoFullView],),\n 'auth': [],\n 'endpoint_path': '/products/addonproviders',\n 'operation_id': 'get_addon_providers',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'orga_id',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'orga_id':\n (str,),\n },\n 'attribute_map': {\n 'orga_id': 'orgaId',\n },\n 'location_map': {\n 'orga_id': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_providers\n )\n\n def __get_application_info(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_application_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_info(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonApplicationInfo\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_info = _Endpoint(\n settings={\n 'response_type': (AddonApplicationInfo,),\n 'auth': [],\n 'endpoint_path': '/vendor/apps/{addonId}',\n 'operation_id': 'get_application_info',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_info\n )\n\n def __get_available_instances(\n self,\n **kwargs\n ):\n \"\"\"get_available_instances # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_available_instances(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _for (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AvailableInstanceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_available_instances = _Endpoint(\n settings={\n 'response_type': ([AvailableInstanceView],),\n 'auth': [],\n 'endpoint_path': '/products/instances',\n 'operation_id': 'get_available_instances',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n '_for',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n '_for':\n (str,),\n },\n 'attribute_map': {\n '_for': 'for',\n },\n 'location_map': {\n '_for': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_available_instances\n )\n\n def __get_available_packages(\n self,\n **kwargs\n ):\n \"\"\"get_available_packages # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_available_packages(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n coupon (str): [optional]\n orga_id (str): [optional]\n currency (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [PackageView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_available_packages = _Endpoint(\n settings={\n 'response_type': ([PackageView],),\n 'auth': [],\n 'endpoint_path': '/products/packages',\n 'operation_id': 'get_available_packages',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'coupon',\n 'orga_id',\n 'currency',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'coupon':\n (str,),\n 'orga_id':\n (str,),\n 'currency':\n (str,),\n },\n 'attribute_map': {\n 'coupon': 'coupon',\n 'orga_id': 'orgaId',\n 'currency': 'currency',\n },\n 'location_map': {\n 'coupon': 'query',\n 'orga_id': 'query',\n 'currency': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_available_packages\n )\n\n def __get_countries(\n self,\n **kwargs\n ):\n \"\"\"get_countries # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_countries(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_countries = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/products/countries',\n 'operation_id': 'get_countries',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_countries\n )\n\n def __get_country_codes(\n self,\n **kwargs\n ):\n \"\"\"get_country_codes # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_country_codes(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_country_codes = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/products/countrycodes',\n 'operation_id': 'get_country_codes',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_country_codes\n )\n\n def __get_excahnge_rates(\n self,\n **kwargs\n ):\n \"\"\"get_excahnge_rates # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_excahnge_rates(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [DropPriceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_excahnge_rates = _Endpoint(\n settings={\n 'response_type': ([DropPriceView],),\n 'auth': [],\n 'endpoint_path': '/products/prices',\n 'operation_id': 'get_excahnge_rates',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_excahnge_rates\n )\n\n def __get_flavors(\n self,\n **kwargs\n ):\n \"\"\"get_flavors # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_flavors(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n context (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [FlavorView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_flavors = _Endpoint(\n settings={\n 'response_type': ([FlavorView],),\n 'auth': [],\n 'endpoint_path': '/products/flavors',\n 'operation_id': 'get_flavors',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'context',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'context':\n (str,),\n },\n 'attribute_map': {\n 'context': 'context',\n },\n 'location_map': {\n 'context': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_flavors\n )\n\n def __get_instance(\n self,\n type,\n version,\n **kwargs\n ):\n \"\"\"get_instance # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_instance(type, version, async_req=True)\n >>> result = thread.get()\n\n Args:\n type (str):\n version (str):\n\n Keyword Args:\n _for (str): [optional]\n app (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AvailableInstanceView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['type'] = \\\n type\n kwargs['version'] = \\\n version\n return self.call_with_http_info(**kwargs)\n\n self.get_instance = _Endpoint(\n settings={\n 'response_type': (AvailableInstanceView,),\n 'auth': [],\n 'endpoint_path': '/products/instances/{type}-{version}',\n 'operation_id': 'get_instance',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'type',\n 'version',\n '_for',\n 'app',\n ],\n 'required': [\n 'type',\n 'version',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'type':\n (str,),\n 'version':\n (str,),\n '_for':\n (str,),\n 'app':\n (str,),\n },\n 'attribute_map': {\n 'type': 'type',\n 'version': 'version',\n '_for': 'for',\n 'app': 'app',\n },\n 'location_map': {\n 'type': 'path',\n 'version': 'path',\n '_for': 'query',\n 'app': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_instance\n )\n\n def __get_mfa_kinds(\n self,\n **kwargs\n ):\n \"\"\"get_mfa_kinds # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_mfa_kinds(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_mfa_kinds = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/products/mfa_kinds',\n 'operation_id': 'get_mfa_kinds',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_mfa_kinds\n )\n\n def __get_zones(\n self,\n **kwargs\n ):\n \"\"\"get_zones # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_zones(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ZoneView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_zones = _Endpoint(\n settings={\n 'response_type': ([ZoneView],),\n 'auth': [],\n 'endpoint_path': '/products/zones',\n 'operation_id': 'get_zones',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_zones\n )\n\n def __list_apps(\n self,\n **kwargs\n ):\n \"\"\"list_apps # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.list_apps(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n offset (int): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonApplicationSummary]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.list_apps = _Endpoint(\n settings={\n 'response_type': ([AddonApplicationSummary],),\n 'auth': [],\n 'endpoint_path': '/vendor/apps',\n 'operation_id': 'list_apps',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'offset',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'offset':\n (int,),\n },\n 'attribute_map': {\n 'offset': 'offset',\n },\n 'location_map': {\n 'offset': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__list_apps\n )\n\n def __logscollector(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"logscollector # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.logscollector(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.logscollector = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/vendor/apps/{addonId}/logscollector',\n 'operation_id': 'logscollector',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__logscollector\n )\n\n def __provision_other_addon(\n self,\n wannabe_inter_addon_provision,\n **kwargs\n ):\n \"\"\"provision_other_addon # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.provision_other_addon(wannabe_inter_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_inter_addon_provision (WannabeInterAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_inter_addon_provision'] = \\\n wannabe_inter_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.provision_other_addon = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/vendor/addons',\n 'operation_id': 'provision_other_addon',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_inter_addon_provision',\n ],\n 'required': [\n 'wannabe_inter_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_inter_addon_provision':\n (WannabeInterAddonProvision,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_inter_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__provision_other_addon\n )\n" }, { "alpha_fraction": 0.6572912335395813, "alphanum_fraction": 0.6597599387168884, "avg_line_length": 30.248626708984375, "blob_id": "70835287dc709d52da08c96ed44177b715dbbcfe", "content_id": "3122089c83f4e5a5bda31cfef3a1d8cd9e4943d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 261677, "license_type": "no_license", "max_line_length": 219, "num_lines": 8374, "path": "/docs/OrganisationApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.OrganisationApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**abort_addon_migration**](OrganisationApi.md#abort_addon_migration) | **DELETE** /organisations/{id}/addons/{addonId}/migrations/{migrationId} | \n[**add_addon_tag_by_orga_and_addon_id**](OrganisationApi.md#add_addon_tag_by_orga_and_addon_id) | **PUT** /organisations/{id}/addons/{addonId}/tags/{tag} | \n[**add_application_by_orga**](OrganisationApi.md#add_application_by_orga) | **POST** /organisations/{id}/applications | \n[**add_application_dependency_by_orga_and_app_id**](OrganisationApi.md#add_application_dependency_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/dependencies/{dependencyId} | \n[**add_application_tag_by_orga_and_app_id**](OrganisationApi.md#add_application_tag_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/tags/{tag} | \n[**add_beta_tester**](OrganisationApi.md#add_beta_tester) | **POST** /organisations/{id}/addonproviders/{providerId}/testers | \n[**add_organisation_member**](OrganisationApi.md#add_organisation_member) | **POST** /organisations/{id}/members | \n[**add_payment_method_by_orga**](OrganisationApi.md#add_payment_method_by_orga) | **POST** /organisations/{id}/payments/methods | \n[**add_provider_feature**](OrganisationApi.md#add_provider_feature) | **POST** /organisations/{id}/addonproviders/{providerId}/features | \n[**add_provider_plan**](OrganisationApi.md#add_provider_plan) | **POST** /organisations/{id}/addonproviders/{providerId}/plans | \n[**add_tcp_redir**](OrganisationApi.md#add_tcp_redir) | **POST** /organisations/{id}/applications/{appId}/tcpRedirs | \n[**add_vhosts_by_orga_and_app_id**](OrganisationApi.md#add_vhosts_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/vhosts/{domain} | \n[**buy_drops_by_orga**](OrganisationApi.md#buy_drops_by_orga) | **POST** /organisations/{id}/payments/billings | \n[**cancel_application_deployment_for_orga**](OrganisationApi.md#cancel_application_deployment_for_orga) | **DELETE** /organisations/{id}/applications/{appId}/deployments/{deploymentId}/instances | \n[**change_plan_by_orga_and_addon_id**](OrganisationApi.md#change_plan_by_orga_and_addon_id) | **POST** /organisations/{id}/addons/{addonId}/migrations | \n[**choose_payment_provider_by_orga**](OrganisationApi.md#choose_payment_provider_by_orga) | **PUT** /organisations/{id}/payments/billings/{bid} | \n[**create_consumer_by_orga**](OrganisationApi.md#create_consumer_by_orga) | **POST** /organisations/{id}/consumers | \n[**create_organisation**](OrganisationApi.md#create_organisation) | **POST** /organisations | \n[**create_provider**](OrganisationApi.md#create_provider) | **POST** /organisations/{id}/addonproviders | \n[**delete_addon_tag_by_orga_and_addon_id**](OrganisationApi.md#delete_addon_tag_by_orga_and_addon_id) | **DELETE** /organisations/{id}/addons/{addonId}/tags/{tag} | \n[**delete_application_by_orga_and_app_id**](OrganisationApi.md#delete_application_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId} | \n[**delete_application_dependency_by_orga_and_app_id**](OrganisationApi.md#delete_application_dependency_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId}/dependencies/{dependencyId} | \n[**delete_application_tag_by_orga_and_app_id**](OrganisationApi.md#delete_application_tag_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId}/tags/{tag} | \n[**delete_consumer_by_orga**](OrganisationApi.md#delete_consumer_by_orga) | **DELETE** /organisations/{id}/consumers/{key} | \n[**delete_organisation**](OrganisationApi.md#delete_organisation) | **DELETE** /organisations/{id} | \n[**delete_payment_method_by_orga**](OrganisationApi.md#delete_payment_method_by_orga) | **DELETE** /organisations/{id}/payments/methods/{mId} | \n[**delete_provider**](OrganisationApi.md#delete_provider) | **DELETE** /organisations/{id}/addonproviders/{providerId} | \n[**delete_provider_feature**](OrganisationApi.md#delete_provider_feature) | **DELETE** /organisations/{id}/addonproviders/{providerId}/features/{featureId} | \n[**delete_provider_plan**](OrganisationApi.md#delete_provider_plan) | **DELETE** /organisations/{id}/addonproviders/{providerId}/plans/{planId} | \n[**delete_provider_plan_feature**](OrganisationApi.md#delete_provider_plan_feature) | **DELETE** /organisations/{id}/addonproviders/{providerId}/plans/{planId}/features/{featureName} | \n[**delete_purchase_order_by_orga**](OrganisationApi.md#delete_purchase_order_by_orga) | **DELETE** /organisations/{id}/payments/billings/{bid} | \n[**delete_recurrent_payment_by_orga**](OrganisationApi.md#delete_recurrent_payment_by_orga) | **DELETE** /organisations/{id}/payments/recurring | \n[**deprovision_addon_by_orga_and_addon_id**](OrganisationApi.md#deprovision_addon_by_orga_and_addon_id) | **DELETE** /organisations/{id}/addons/{addonId} | \n[**edit_application_by_orga_and_app_id**](OrganisationApi.md#edit_application_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId} | \n[**edit_application_env_by_orga_and_app_id_and_env_name**](OrganisationApi.md#edit_application_env_by_orga_and_app_id_and_env_name) | **PUT** /organisations/{id}/applications/{appId}/env/{envName} | \n[**edit_application_environment_by_orga_and_app_id**](OrganisationApi.md#edit_application_environment_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/env | \n[**edit_organisation**](OrganisationApi.md#edit_organisation) | **PUT** /organisations/{id} | \n[**edit_organisation_member**](OrganisationApi.md#edit_organisation_member) | **PUT** /organisations/{id}/members/{userId} | \n[**edit_provider_plan**](OrganisationApi.md#edit_provider_plan) | **PUT** /organisations/{id}/addonproviders/{providerId}/plans/{planId} | \n[**edit_provider_plan_feature**](OrganisationApi.md#edit_provider_plan_feature) | **PUT** /organisations/{id}/addonproviders/{providerId}/plans/{planId}/features/{featureName} | \n[**get_addon_by_orga_and_addon_id**](OrganisationApi.md#get_addon_by_orga_and_addon_id) | **GET** /organisations/{id}/addons/{addonId} | \n[**get_addon_env_by_orga_and_addon_id**](OrganisationApi.md#get_addon_env_by_orga_and_addon_id) | **GET** /organisations/{id}/addons/{addonId}/env | \n[**get_addon_instance**](OrganisationApi.md#get_addon_instance) | **GET** /organisations/{id}/addons/{addonId}/instances/{instanceId} | \n[**get_addon_instances**](OrganisationApi.md#get_addon_instances) | **GET** /organisations/{id}/addons/{addonId}/instances | \n[**get_addon_migration**](OrganisationApi.md#get_addon_migration) | **GET** /organisations/{id}/addons/{addonId}/migrations/{migrationId} | \n[**get_addon_migrations**](OrganisationApi.md#get_addon_migrations) | **GET** /organisations/{id}/addons/{addonId}/migrations | \n[**get_addon_sso_data_for_orga**](OrganisationApi.md#get_addon_sso_data_for_orga) | **GET** /organisations/{id}/addons/{addonId}/sso | \n[**get_addon_tags_by_orga_id_and_addon_id**](OrganisationApi.md#get_addon_tags_by_orga_id_and_addon_id) | **GET** /organisations/{id}/addons/{addonId}/tags | \n[**get_addons_by_orga_id**](OrganisationApi.md#get_addons_by_orga_id) | **GET** /organisations/{id}/addons | \n[**get_addons_linked_to_application_by_orga_and_app_id**](OrganisationApi.md#get_addons_linked_to_application_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/addons | \n[**get_all_applications_by_orga**](OrganisationApi.md#get_all_applications_by_orga) | **GET** /organisations/{id}/applications | \n[**get_amount_for_orga**](OrganisationApi.md#get_amount_for_orga) | **GET** /organisations/{id}/credits | \n[**get_application_branches_by_orga_and_app_id**](OrganisationApi.md#get_application_branches_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/branches | \n[**get_application_by_orga_and_app_id**](OrganisationApi.md#get_application_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId} | \n[**get_application_dependencies_by_orga_and_app_id**](OrganisationApi.md#get_application_dependencies_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/dependencies | \n[**get_application_dependencies_env_by_orga_and_app_id**](OrganisationApi.md#get_application_dependencies_env_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/dependencies/env | \n[**get_application_dependents_by_orga_and_app_id**](OrganisationApi.md#get_application_dependents_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/dependents | \n[**get_application_deployment_for_orga**](OrganisationApi.md#get_application_deployment_for_orga) | **GET** /organisations/{id}/applications/{appId}/deployments/{deploymentId} | \n[**get_application_deployments_for_orga**](OrganisationApi.md#get_application_deployments_for_orga) | **GET** /organisations/{id}/applications/{appId}/deployments | \n[**get_application_env_by_orga_and_app_id**](OrganisationApi.md#get_application_env_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/env | \n[**get_application_instance_by_orga_and_app_and_instance_id**](OrganisationApi.md#get_application_instance_by_orga_and_app_and_instance_id) | **GET** /organisations/{id}/applications/{appId}/instances/{instanceId} | \n[**get_application_instances_by_orga_and_app_id**](OrganisationApi.md#get_application_instances_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/instances | \n[**get_application_tags_by_orga_and_app_id**](OrganisationApi.md#get_application_tags_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/tags | \n[**get_applications_linked_to_addon_by_orga_and_addon_id**](OrganisationApi.md#get_applications_linked_to_addon_by_orga_and_addon_id) | **GET** /organisations/{id}/addons/{addonId}/applications | \n[**get_consumer_by_orga**](OrganisationApi.md#get_consumer_by_orga) | **GET** /organisations/{id}/consumers/{key} | \n[**get_consumer_secret_by_orga**](OrganisationApi.md#get_consumer_secret_by_orga) | **GET** /organisations/{id}/consumers/{key}/secret | \n[**get_consumers_by_orga**](OrganisationApi.md#get_consumers_by_orga) | **GET** /organisations/{id}/consumers | \n[**get_consumptions_for_orga**](OrganisationApi.md#get_consumptions_for_orga) | **GET** /organisations/{id}/consumptions | \n[**get_default_method_by_orga**](OrganisationApi.md#get_default_method_by_orga) | **GET** /organisations/{id}/payments/methods/default | \n[**get_deployments_for_all_apps**](OrganisationApi.md#get_deployments_for_all_apps) | **GET** /organisations/{id}/deployments | \n[**get_env_of_addons_linked_to_application_by_orga_and_app_id**](OrganisationApi.md#get_env_of_addons_linked_to_application_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/addons/env | \n[**get_exposed_env_by_orga_and_app_id**](OrganisationApi.md#get_exposed_env_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/exposed_env | \n[**get_favourite_vhost_by_orga_and_app_id**](OrganisationApi.md#get_favourite_vhost_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/vhosts/favourite | \n[**get_instances_for_all_apps_for_orga**](OrganisationApi.md#get_instances_for_all_apps_for_orga) | **GET** /organisations/{id}/instances | \n[**get_invoice_by_orga**](OrganisationApi.md#get_invoice_by_orga) | **GET** /organisations/{id}/payments/billings/{bid} | \n[**get_invoices_by_orga**](OrganisationApi.md#get_invoices_by_orga) | **GET** /organisations/{id}/payments/billings | \n[**get_monthly_invoice_by_orga**](OrganisationApi.md#get_monthly_invoice_by_orga) | **GET** /organisations/{id}/payments/monthlyinvoice | \n[**get_namespaces**](OrganisationApi.md#get_namespaces) | **GET** /organisations/{id}/namespaces | \n[**get_new_setup_intent_by_orga**](OrganisationApi.md#get_new_setup_intent_by_orga) | **GET** /organisations/{id}/payments/methods/newintent | \n[**get_organisation**](OrganisationApi.md#get_organisation) | **GET** /organisations/{id} | \n[**get_organisation_members**](OrganisationApi.md#get_organisation_members) | **GET** /organisations/{id}/members | \n[**get_payment_info_for_orga**](OrganisationApi.md#get_payment_info_for_orga) | **GET** /organisations/{id}/payment-info | \n[**get_pdf_invoice_by_orga**](OrganisationApi.md#get_pdf_invoice_by_orga) | **GET** /organisations/{id}/payments/billings/{bid}.pdf | \n[**get_price_with_tax_by_orga**](OrganisationApi.md#get_price_with_tax_by_orga) | **GET** /organisations/{id}/payments/fullprice/{price} | \n[**get_provider_features**](OrganisationApi.md#get_provider_features) | **GET** /organisations/{id}/addonproviders/{providerId}/features | \n[**get_provider_info**](OrganisationApi.md#get_provider_info) | **GET** /organisations/{id}/addonproviders/{providerId} | \n[**get_provider_plan**](OrganisationApi.md#get_provider_plan) | **GET** /organisations/{id}/addonproviders/{providerId}/plans/{planId} | \n[**get_provider_plans**](OrganisationApi.md#get_provider_plans) | **GET** /organisations/{id}/addonproviders/{providerId}/plans | \n[**get_provider_tags**](OrganisationApi.md#get_provider_tags) | **GET** /organisations/{id}/addonproviders/{providerId}/tags | \n[**get_providers_info**](OrganisationApi.md#get_providers_info) | **GET** /organisations/{id}/addonproviders | \n[**get_recurrent_payment_by_orga**](OrganisationApi.md#get_recurrent_payment_by_orga) | **GET** /organisations/{id}/payments/recurring | \n[**get_sso_data_for_orga**](OrganisationApi.md#get_sso_data_for_orga) | **GET** /organisations/{id}/addonproviders/{providerId}/sso | \n[**get_stripe_token_by_orga**](OrganisationApi.md#get_stripe_token_by_orga) | **GET** /organisations/{id}/payments/tokens/stripe | \n[**get_tcp_redirs**](OrganisationApi.md#get_tcp_redirs) | **GET** /organisations/{id}/applications/{appId}/tcpRedirs | \n[**get_unpaid_invoices_by_orga**](OrganisationApi.md#get_unpaid_invoices_by_orga) | **GET** /organisations/{id}/payments/billings/unpaid | \n[**get_unpaid_invoices_by_orga1**](OrganisationApi.md#get_unpaid_invoices_by_orga1) | **GET** /organisations/{id}/payments/methods | \n[**get_user_organisationss**](OrganisationApi.md#get_user_organisationss) | **GET** /organisations | \n[**get_vhosts_by_orga_and_app_id**](OrganisationApi.md#get_vhosts_by_orga_and_app_id) | **GET** /organisations/{id}/applications/{appId}/vhosts | \n[**link_addon_to_application_by_orga_and_app_id**](OrganisationApi.md#link_addon_to_application_by_orga_and_app_id) | **POST** /organisations/{id}/applications/{appId}/addons | \n[**mark_favourite_vhost_by_orga_and_app_id**](OrganisationApi.md#mark_favourite_vhost_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/vhosts/favourite | \n[**preorder_addon_by_orga_id**](OrganisationApi.md#preorder_addon_by_orga_id) | **POST** /organisations/{id}/addons/preorders | \n[**preorder_migration**](OrganisationApi.md#preorder_migration) | **GET** /organisations/{id}/addons/{addonId}/migrations/preorders | \n[**provision_addon_by_orga_id**](OrganisationApi.md#provision_addon_by_orga_id) | **POST** /organisations/{id}/addons | \n[**redeploy_application_by_orga_and_app_id**](OrganisationApi.md#redeploy_application_by_orga_and_app_id) | **POST** /organisations/{id}/applications/{appId}/instances | \n[**remove_application_env_by_orga_and_app_id_and_env_name**](OrganisationApi.md#remove_application_env_by_orga_and_app_id_and_env_name) | **DELETE** /organisations/{id}/applications/{appId}/env/{envName} | \n[**remove_organisation_member**](OrganisationApi.md#remove_organisation_member) | **DELETE** /organisations/{id}/members/{userId} | \n[**remove_tcp_redir**](OrganisationApi.md#remove_tcp_redir) | **DELETE** /organisations/{id}/applications/{appId}/tcpRedirs/{sourcePort} | \n[**remove_vhosts_by_orga_and_app_id**](OrganisationApi.md#remove_vhosts_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId}/vhosts/{domain} | \n[**replace_addon_tags**](OrganisationApi.md#replace_addon_tags) | **PUT** /organisations/{id}/addons/{addonId}/tags | \n[**replace_application_tags**](OrganisationApi.md#replace_application_tags) | **PUT** /organisations/{id}/applications/{appId}/tags | \n[**set_application_branch_by_orga_and_app_id**](OrganisationApi.md#set_application_branch_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/branch | \n[**set_build_instance_flavor_by_orga_and_app_id**](OrganisationApi.md#set_build_instance_flavor_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/buildflavor | \n[**set_default_method_by_orga**](OrganisationApi.md#set_default_method_by_orga) | **PUT** /organisations/{id}/payments/methods/default | \n[**set_max_credits_per_month_by_orga**](OrganisationApi.md#set_max_credits_per_month_by_orga) | **PUT** /organisations/{id}/payments/monthlyinvoice/maxcredit | \n[**set_orga_avatar**](OrganisationApi.md#set_orga_avatar) | **PUT** /organisations/{id}/avatar | \n[**undeploy_application_by_orga_and_app_id**](OrganisationApi.md#undeploy_application_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId}/instances | \n[**unlink_addon_from_application_by_orga_and_app_andd_addon_id**](OrganisationApi.md#unlink_addon_from_application_by_orga_and_app_andd_addon_id) | **DELETE** /organisations/{id}/applications/{appId}/addons/{addonId} | \n[**unmark_favourite_vhost_by_orga_and_app_id**](OrganisationApi.md#unmark_favourite_vhost_by_orga_and_app_id) | **DELETE** /organisations/{id}/applications/{appId}/vhosts/favourite | \n[**update_addon_info**](OrganisationApi.md#update_addon_info) | **PUT** /organisations/{id}/addons/{addonId} | \n[**update_consumer_by_orga**](OrganisationApi.md#update_consumer_by_orga) | **PUT** /organisations/{id}/consumers/{key} | \n[**update_exposed_env_by_orga_and_app_id**](OrganisationApi.md#update_exposed_env_by_orga_and_app_id) | **PUT** /organisations/{id}/applications/{appId}/exposed_env | \n[**update_provider_infos**](OrganisationApi.md#update_provider_infos) | **PUT** /organisations/{id}/addonproviders/{providerId} | \n\n\n# **abort_addon_migration**\n> str abort_addon_migration(id, addon_id, migration_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n migration_id = \"migrationId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.abort_addon_migration(id, addon_id, migration_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->abort_addon_migration: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **migration_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_addon_tag_by_orga_and_addon_id**\n> [str] add_addon_tag_by_orga_and_addon_id(id, addon_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_addon_tag_by_orga_and_addon_id(id, addon_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_addon_tag_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_application_by_orga**\n> ApplicationView add_application_by_orga(id, wannabe_application)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_application = WannabeApplication(\n name=\"name_example\",\n description=\"description_example\",\n zone=\"zone_example\",\n deploy=\"deploy_example\",\n shutdownable=True,\n instance_type=\"instance_type_example\",\n instance_version=\"instance_version_example\",\n instance_variant=\"instance_variant_example\",\n instance_lifetime=\"REGULAR\",\n min_instances=1,\n max_instances=1,\n min_flavor=\"min_flavor_example\",\n max_flavor=\"max_flavor_example\",\n tags=[\n \"tags_example\",\n ],\n archived=True,\n sticky_sessions=True,\n homogeneous=True,\n favourite=True,\n cancel_on_push=True,\n separate_build=True,\n build_flavor=\"build_flavor_example\",\n oauth_service=\"oauth_service_example\",\n oauth_app_id=\"oauth_app_id_example\",\n oauth_app=WannabeOauthApp(\n owner=\"owner_example\",\n name=\"name_example\",\n ),\n appliance_id=\"appliance_id_example\",\n branch=\"branch_example\",\n force_https=\"ENABLED\",\n ) # WannabeApplication | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_application_by_orga(id, wannabe_application)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_application_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_application** | [**WannabeApplication**](WannabeApplication.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_application_dependency_by_orga_and_app_id**\n> Message add_application_dependency_by_orga_and_app_id(id, app_id, dependency_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n dependency_id = \"dependencyId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_application_dependency_by_orga_and_app_id(id, app_id, dependency_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_application_dependency_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **dependency_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_application_tag_by_orga_and_app_id**\n> [str] add_application_tag_by_orga_and_app_id(id, app_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_application_tag_by_orga_and_app_id(id, app_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_application_tag_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_beta_tester**\n> Message add_beta_tester(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_beta_tester(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_beta_tester: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_organisation_member**\n> Message add_organisation_member(id, wannabe_member)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_member import WannabeMember\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_member = WannabeMember(\n role=\"role_example\",\n job=\"job_example\",\n email=\"email_example\",\n ) # WannabeMember | \n invitation_key = \"invitationKey_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_organisation_member(id, wannabe_member)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_organisation_member: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.add_organisation_member(id, wannabe_member, invitation_key=invitation_key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_organisation_member: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_member** | [**WannabeMember**](WannabeMember.md)| |\n **invitation_key** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_payment_method_by_orga**\n> PaymentMethodView add_payment_method_by_orga(id, payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_payment_method_by_orga(id, payment_data)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_payment_method_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\n[**PaymentMethodView**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_provider_feature**\n> AddonFeatureView add_provider_feature(id, provider_id, wannabe_addon_feature)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_addon_feature import WannabeAddonFeature\nfrom openapi_client.model.addon_feature_view import AddonFeatureView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n wannabe_addon_feature = WannabeAddonFeature(\n name=\"name_example\",\n type=\"BOOLEAN\",\n ) # WannabeAddonFeature | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_provider_feature(id, provider_id, wannabe_addon_feature)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_provider_feature: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **wannabe_addon_feature** | [**WannabeAddonFeature**](WannabeAddonFeature.md)| |\n\n### Return type\n\n[**AddonFeatureView**](AddonFeatureView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_provider_plan**\n> AddonPlanView add_provider_plan(id, provider_id, wannabe_addon_plan)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom openapi_client.model.wannabe_addon_plan import WannabeAddonPlan\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n wannabe_addon_plan = WannabeAddonPlan(\n name=\"name_example\",\n slug=\"slug_example\",\n price=3.14,\n features=[\n AddonFeatureInstanceView(\n name=\"name_example\",\n type=\"BOOLEAN\",\n value=\"value_example\",\n computable_value=\"computable_value_example\",\n name_code=\"name_code_example\",\n ),\n ],\n ) # WannabeAddonPlan | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_provider_plan(id, provider_id, wannabe_addon_plan)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_provider_plan: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **wannabe_addon_plan** | [**WannabeAddonPlan**](WannabeAddonPlan.md)| |\n\n### Return type\n\n[**AddonPlanView**](AddonPlanView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_tcp_redir**\n> TcpRedirView add_tcp_redir(id, app_id, wannabe_namespace)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.tcp_redir_view import TcpRedirView\nfrom openapi_client.model.wannabe_namespace import WannabeNamespace\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n wannabe_namespace = WannabeNamespace(\n namespace=\"namespace_example\",\n min_port=1,\n max_port=1,\n ) # WannabeNamespace | \n payment = \"payment_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_tcp_redir(id, app_id, wannabe_namespace)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_tcp_redir: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.add_tcp_redir(id, app_id, wannabe_namespace, payment=payment)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_tcp_redir: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **wannabe_namespace** | [**WannabeNamespace**](WannabeNamespace.md)| |\n **payment** | **str**| | [optional]\n\n### Return type\n\n[**TcpRedirView**](TcpRedirView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_vhosts_by_orga_and_app_id**\n> Message add_vhosts_by_orga_and_app_id(id, app_id, domain)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n domain = \"domain_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_vhosts_by_orga_and_app_id(id, app_id, domain)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->add_vhosts_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **domain** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **buy_drops_by_orga**\n> InvoiceRendering buy_drops_by_orga(id, wanna_buy_package)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wanna_buy_package import WannaBuyPackage\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wanna_buy_package = WannaBuyPackage(\n package_id=1,\n currency=\"currency_example\",\n coupon=\"coupon_example\",\n drop_quantity=3.14,\n ) # WannaBuyPackage | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.buy_drops_by_orga(id, wanna_buy_package)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->buy_drops_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wanna_buy_package** | [**WannaBuyPackage**](WannaBuyPackage.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **cancel_application_deployment_for_orga**\n> Message cancel_application_deployment_for_orga(id, app_id, deployment_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.cancel_application_deployment_for_orga(id, app_id, deployment_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->cancel_application_deployment_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **deployment_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **change_plan_by_orga_and_addon_id**\n> str change_plan_by_orga_and_addon_id(id, addon_id, wannabe_plan_change)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_plan_change import WannabePlanChange\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n wannabe_plan_change = WannabePlanChange(\n plan_id=\"plan_id_example\",\n region=\"region_example\",\n version=\"version_example\",\n ) # WannabePlanChange | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.change_plan_by_orga_and_addon_id(id, addon_id, wannabe_plan_change)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->change_plan_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **wannabe_plan_change** | [**WannabePlanChange**](WannabePlanChange.md)| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **choose_payment_provider_by_orga**\n> NextInPaymentFlow choose_payment_provider_by_orga(id, bid, payment_provider_selection)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.next_in_payment_flow import NextInPaymentFlow\nfrom openapi_client.model.payment_provider_selection import PaymentProviderSelection\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n bid = \"bid_example\" # str | \n payment_provider_selection = PaymentProviderSelection(\n provider=\"PAYPAL\",\n ) # PaymentProviderSelection | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.choose_payment_provider_by_orga(id, bid, payment_provider_selection)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->choose_payment_provider_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **bid** | **str**| |\n **payment_provider_selection** | [**PaymentProviderSelection**](PaymentProviderSelection.md)| |\n\n### Return type\n\n[**NextInPaymentFlow**](NextInPaymentFlow.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_consumer_by_orga**\n> OAuth1ConsumerView create_consumer_by_orga(id, wannabe_o_auth1_consumer)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_o_auth1_consumer = WannabeOAuth1Consumer(\n name=\"name_example\",\n description=\"description_example\",\n url=\"url_example\",\n picture=\"picture_example\",\n base_url=\"base_url_example\",\n rights={\n \"key\": True,\n },\n ) # WannabeOAuth1Consumer | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.create_consumer_by_orga(id, wannabe_o_auth1_consumer)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->create_consumer_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_o_auth1_consumer** | [**WannabeOAuth1Consumer**](WannabeOAuth1Consumer.md)| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_organisation**\n> OrganisationView create_organisation(wannabe_organisation)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom openapi_client.model.wannabe_organisation import WannabeOrganisation\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n wannabe_organisation = WannabeOrganisation(\n name=\"name_example\",\n description=\"description_example\",\n address=\"address_example\",\n city=\"city_example\",\n zipcode=\"zipcode_example\",\n country=\"country_example\",\n company=\"company_example\",\n customer_full_name=\"customer_full_name_example\",\n vat=\"vat_example\",\n billing_email=\"billing_email_example\",\n ) # WannabeOrganisation | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.create_organisation(wannabe_organisation)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->create_organisation: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_organisation** | [**WannabeOrganisation**](WannabeOrganisation.md)| |\n\n### Return type\n\n[**OrganisationView**](OrganisationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_provider**\n> AddonProviderInfoFullView create_provider(id, wannabe_addon_provider)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom openapi_client.model.wannabe_addon_provider import WannabeAddonProvider\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_addon_provider = WannabeAddonProvider(\n id=\"id_example\",\n name=\"name_example\",\n api=WannabeAddonProviderAPI(\n config_vars=[\n \"config_vars_example\",\n ],\n password=\"password_example\",\n sso_salt=\"sso_salt_example\",\n regions=[\n \"regions_example\",\n ],\n production=WannabeAddonProviderAPIUrl(\n base_url=\"base_url_example\",\n sso_url=\"sso_url_example\",\n ),\n test=WannabeAddonProviderAPIUrl(\n base_url=\"base_url_example\",\n sso_url=\"sso_url_example\",\n ),\n ),\n ) # WannabeAddonProvider | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.create_provider(id, wannabe_addon_provider)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->create_provider: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_addon_provider** | [**WannabeAddonProvider**](WannabeAddonProvider.md)| |\n\n### Return type\n\n[**AddonProviderInfoFullView**](AddonProviderInfoFullView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_addon_tag_by_orga_and_addon_id**\n> [str] delete_addon_tag_by_orga_and_addon_id(id, addon_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_addon_tag_by_orga_and_addon_id(id, addon_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_addon_tag_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_application_by_orga_and_app_id**\n> Message delete_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_application_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_application_dependency_by_orga_and_app_id**\n> delete_application_dependency_by_orga_and_app_id(id, app_id, dependency_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n dependency_id = \"dependencyId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_application_dependency_by_orga_and_app_id(id, app_id, dependency_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_application_dependency_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **dependency_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_application_tag_by_orga_and_app_id**\n> [str] delete_application_tag_by_orga_and_app_id(id, app_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_application_tag_by_orga_and_app_id(id, app_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_application_tag_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_consumer_by_orga**\n> delete_consumer_by_orga(id, key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_consumer_by_orga(id, key)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_consumer_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **key** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_organisation**\n> Message delete_organisation(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_organisation(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_organisation: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_payment_method_by_orga**\n> delete_payment_method_by_orga(id, m_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n m_id = \"mId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_payment_method_by_orga(id, m_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_payment_method_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **m_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**204** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_provider**\n> delete_provider(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_provider(id, provider_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_provider: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_provider_feature**\n> delete_provider_feature(id, provider_id, feature_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n feature_id = \"featureId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_provider_feature(id, provider_id, feature_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_provider_feature: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **feature_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_provider_plan**\n> delete_provider_plan(id, provider_id, plan_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n plan_id = \"planId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_provider_plan(id, provider_id, plan_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_provider_plan: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **plan_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_provider_plan_feature**\n> delete_provider_plan_feature(id, provider_id, plan_id, feature_name)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n plan_id = \"planId_example\" # str | \n feature_name = \"featureName_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_provider_plan_feature(id, provider_id, plan_id, feature_name)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_provider_plan_feature: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **plan_id** | **str**| |\n **feature_name** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_purchase_order_by_orga**\n> delete_purchase_order_by_orga(id, bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n bid = \"bid_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_purchase_order_by_orga(id, bid)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_purchase_order_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **bid** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_recurrent_payment_by_orga**\n> delete_recurrent_payment_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_recurrent_payment_by_orga(id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->delete_recurrent_payment_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **deprovision_addon_by_orga_and_addon_id**\n> Message deprovision_addon_by_orga_and_addon_id(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.deprovision_addon_by_orga_and_addon_id(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->deprovision_addon_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_application_by_orga_and_app_id**\n> ApplicationView edit_application_by_orga_and_app_id(id, app_id, wannabe_application)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n wannabe_application = WannabeApplication(\n name=\"name_example\",\n description=\"description_example\",\n zone=\"zone_example\",\n deploy=\"deploy_example\",\n shutdownable=True,\n instance_type=\"instance_type_example\",\n instance_version=\"instance_version_example\",\n instance_variant=\"instance_variant_example\",\n instance_lifetime=\"REGULAR\",\n min_instances=1,\n max_instances=1,\n min_flavor=\"min_flavor_example\",\n max_flavor=\"max_flavor_example\",\n tags=[\n \"tags_example\",\n ],\n archived=True,\n sticky_sessions=True,\n homogeneous=True,\n favourite=True,\n cancel_on_push=True,\n separate_build=True,\n build_flavor=\"build_flavor_example\",\n oauth_service=\"oauth_service_example\",\n oauth_app_id=\"oauth_app_id_example\",\n oauth_app=WannabeOauthApp(\n owner=\"owner_example\",\n name=\"name_example\",\n ),\n appliance_id=\"appliance_id_example\",\n branch=\"branch_example\",\n force_https=\"ENABLED\",\n ) # WannabeApplication | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_application_by_orga_and_app_id(id, app_id, wannabe_application)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **wannabe_application** | [**WannabeApplication**](WannabeApplication.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_application_env_by_orga_and_app_id_and_env_name**\n> ApplicationView edit_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name, wannabe_value)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_value import WannabeValue\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n env_name = \"envName_example\" # str | \n wannabe_value = WannabeValue(\n value=\"value_example\",\n ) # WannabeValue | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name, wannabe_value)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_application_env_by_orga_and_app_id_and_env_name: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **env_name** | **str**| |\n **wannabe_value** | [**WannabeValue**](WannabeValue.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_application_environment_by_orga_and_app_id**\n> ApplicationView edit_application_environment_by_orga_and_app_id(id, app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_application_environment_by_orga_and_app_id(id, app_id, body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_application_environment_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **body** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_organisation**\n> OrganisationView edit_organisation(id, wannabe_organisation)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom openapi_client.model.wannabe_organisation import WannabeOrganisation\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_organisation = WannabeOrganisation(\n name=\"name_example\",\n description=\"description_example\",\n address=\"address_example\",\n city=\"city_example\",\n zipcode=\"zipcode_example\",\n country=\"country_example\",\n company=\"company_example\",\n customer_full_name=\"customer_full_name_example\",\n vat=\"vat_example\",\n billing_email=\"billing_email_example\",\n ) # WannabeOrganisation | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_organisation(id, wannabe_organisation)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_organisation: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_organisation** | [**WannabeOrganisation**](WannabeOrganisation.md)| |\n\n### Return type\n\n[**OrganisationView**](OrganisationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_organisation_member**\n> Message edit_organisation_member(id, user_id, wannabe_member)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_member import WannabeMember\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n user_id = \"userId_example\" # str | \n wannabe_member = WannabeMember(\n role=\"role_example\",\n job=\"job_example\",\n email=\"email_example\",\n ) # WannabeMember | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_organisation_member(id, user_id, wannabe_member)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_organisation_member: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **user_id** | **str**| |\n **wannabe_member** | [**WannabeMember**](WannabeMember.md)| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_provider_plan**\n> AddonPlanView edit_provider_plan(id, provider_id, plan_id, wannabe_addon_plan)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom openapi_client.model.wannabe_addon_plan import WannabeAddonPlan\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n plan_id = \"planId_example\" # str | \n wannabe_addon_plan = WannabeAddonPlan(\n name=\"name_example\",\n slug=\"slug_example\",\n price=3.14,\n features=[\n AddonFeatureInstanceView(\n name=\"name_example\",\n type=\"BOOLEAN\",\n value=\"value_example\",\n computable_value=\"computable_value_example\",\n name_code=\"name_code_example\",\n ),\n ],\n ) # WannabeAddonPlan | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_provider_plan(id, provider_id, plan_id, wannabe_addon_plan)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_provider_plan: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **plan_id** | **str**| |\n **wannabe_addon_plan** | [**WannabeAddonPlan**](WannabeAddonPlan.md)| |\n\n### Return type\n\n[**AddonPlanView**](AddonPlanView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_provider_plan_feature**\n> AddonPlanView edit_provider_plan_feature(id, provider_id, plan_id, feature_name, addon_feature_instance_view)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom openapi_client.model.addon_feature_instance_view import AddonFeatureInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n plan_id = \"planId_example\" # str | \n feature_name = \"featureName_example\" # str | \n addon_feature_instance_view = AddonFeatureInstanceView(\n name=\"name_example\",\n type=\"BOOLEAN\",\n value=\"value_example\",\n computable_value=\"computable_value_example\",\n name_code=\"name_code_example\",\n ) # AddonFeatureInstanceView | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_provider_plan_feature(id, provider_id, plan_id, feature_name, addon_feature_instance_view)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->edit_provider_plan_feature: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **plan_id** | **str**| |\n **feature_name** | **str**| |\n **addon_feature_instance_view** | [**AddonFeatureInstanceView**](AddonFeatureInstanceView.md)| |\n\n### Return type\n\n[**AddonPlanView**](AddonPlanView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_by_orga_and_addon_id**\n> AddonView get_addon_by_orga_and_addon_id(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_by_orga_and_addon_id(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_env_by_orga_and_addon_id**\n> [AddonEnvironmentView] get_addon_env_by_orga_and_addon_id(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_env_by_orga_and_addon_id(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_env_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n[**[AddonEnvironmentView]**](AddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_instance**\n> str get_addon_instance(id, addon_id, instance_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n instance_id = \"instanceId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_instance(id, addon_id, instance_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_instance: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **instance_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_instances**\n> [SuperNovaInstanceView] get_addon_instances(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | (optional)\n with_deleted = \"withDeleted_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_instances(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_instances: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_addon_instances(id, addon_id, deployment_id=deployment_id, with_deleted=with_deleted)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_instances: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **deployment_id** | **str**| | [optional]\n **with_deleted** | **str**| | [optional]\n\n### Return type\n\n[**[SuperNovaInstanceView]**](SuperNovaInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_migration**\n> str get_addon_migration(id, addon_id, migration_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n migration_id = \"migrationId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_migration(id, addon_id, migration_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_migration: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **migration_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_migrations**\n> str get_addon_migrations(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_migrations(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_migrations: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_sso_data_for_orga**\n> AddonProviderSSOData get_addon_sso_data_for_orga(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_provider_sso_data import AddonProviderSSOData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_sso_data_for_orga(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_sso_data_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n[**AddonProviderSSOData**](AddonProviderSSOData.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_tags_by_orga_id_and_addon_id**\n> [str] get_addon_tags_by_orga_id_and_addon_id(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_tags_by_orga_id_and_addon_id(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addon_tags_by_orga_id_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addons_by_orga_id**\n> [AddonView] get_addons_by_orga_id(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addons_by_orga_id(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addons_by_orga_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[AddonView]**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addons_linked_to_application_by_orga_and_app_id**\n> [AddonView] get_addons_linked_to_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addons_linked_to_application_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_addons_linked_to_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[AddonView]**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_all_applications_by_orga**\n> [ApplicationView] get_all_applications_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n instance_id = \"instanceId_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_all_applications_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_all_applications_by_orga: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_all_applications_by_orga(id, instance_id=instance_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_all_applications_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **instance_id** | **str**| | [optional]\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_amount_for_orga**\n> DropCountView get_amount_for_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.drop_count_view import DropCountView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_amount_for_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_amount_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**DropCountView**](DropCountView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_branches_by_orga_and_app_id**\n> [str] get_application_branches_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_branches_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_branches_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_by_orga_and_app_id**\n> ApplicationView get_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_dependencies_by_orga_and_app_id**\n> [ApplicationView] get_application_dependencies_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_dependencies_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_dependencies_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_dependencies_env_by_orga_and_app_id**\n> get_application_dependencies_env_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_application_dependencies_env_by_orga_and_app_id(id, app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_dependencies_env_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_dependents_by_orga_and_app_id**\n> [ApplicationView] get_application_dependents_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_dependents_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_dependents_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_deployment_for_orga**\n> get_application_deployment_for_orga(id, app_id, deployment_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_application_deployment_for_orga(id, app_id, deployment_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_deployment_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **deployment_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_deployments_for_orga**\n> [DeploymentView] get_application_deployments_for_orga(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.deployment_view import DeploymentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n limit = \"limit_example\" # str | (optional)\n offset = \"offset_example\" # str | (optional)\n action = \"action_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_deployments_for_orga(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_deployments_for_orga: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_application_deployments_for_orga(id, app_id, limit=limit, offset=offset, action=action)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_deployments_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **limit** | **str**| | [optional]\n **offset** | **str**| | [optional]\n **action** | **str**| | [optional]\n\n### Return type\n\n[**[DeploymentView]**](DeploymentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_env_by_orga_and_app_id**\n> [AddonEnvironmentView] get_application_env_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_env_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_env_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[AddonEnvironmentView]**](AddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_instance_by_orga_and_app_and_instance_id**\n> str get_application_instance_by_orga_and_app_and_instance_id(id, app_id, instance_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n instance_id = \"instanceId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_instance_by_orga_and_app_and_instance_id(id, app_id, instance_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_instance_by_orga_and_app_and_instance_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **instance_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_instances_by_orga_and_app_id**\n> [SuperNovaInstanceView] get_application_instances_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | (optional)\n with_deleted = \"withDeleted_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_instances_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_instances_by_orga_and_app_id: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_application_instances_by_orga_and_app_id(id, app_id, deployment_id=deployment_id, with_deleted=with_deleted)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_instances_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **deployment_id** | **str**| | [optional]\n **with_deleted** | **str**| | [optional]\n\n### Return type\n\n[**[SuperNovaInstanceView]**](SuperNovaInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_tags_by_orga_and_app_id**\n> [str] get_application_tags_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_tags_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_application_tags_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_applications_linked_to_addon_by_orga_and_addon_id**\n> [ApplicationView] get_applications_linked_to_addon_by_orga_and_addon_id(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_applications_linked_to_addon_by_orga_and_addon_id(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_applications_linked_to_addon_by_orga_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_consumer_by_orga**\n> OAuth1ConsumerView get_consumer_by_orga(id, key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_consumer_by_orga(id, key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_consumer_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **key** | **str**| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_consumer_secret_by_orga**\n> SecretView get_consumer_secret_by_orga(id, key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.secret_view import SecretView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_consumer_secret_by_orga(id, key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_consumer_secret_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **key** | **str**| |\n\n### Return type\n\n[**SecretView**](SecretView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_consumers_by_orga**\n> [OAuth1ConsumerView] get_consumers_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_consumers_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_consumers_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[OAuth1ConsumerView]**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_consumptions_for_orga**\n> str get_consumptions_for_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | (optional)\n _from = \"from_example\" # str | (optional)\n to = \"to_example\" # str | (optional)\n _for = \"for_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_consumptions_for_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_consumptions_for_orga: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_consumptions_for_orga(id, app_id=app_id, _from=_from, to=to, _for=_for)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_consumptions_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| | [optional]\n **_from** | **str**| | [optional]\n **to** | **str**| | [optional]\n **_for** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_default_method_by_orga**\n> PaymentMethodView get_default_method_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_default_method_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_default_method_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**PaymentMethodView**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_deployments_for_all_apps**\n> get_deployments_for_all_apps(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n limit = 1 # int | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_deployments_for_all_apps(id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_deployments_for_all_apps: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_deployments_for_all_apps(id, limit=limit)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_deployments_for_all_apps: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **limit** | **int**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_env_of_addons_linked_to_application_by_orga_and_app_id**\n> [LinkedAddonEnvironmentView] get_env_of_addons_linked_to_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.linked_addon_environment_view import LinkedAddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_env_of_addons_linked_to_application_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_env_of_addons_linked_to_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[LinkedAddonEnvironmentView]**](LinkedAddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_exposed_env_by_orga_and_app_id**\n> str get_exposed_env_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_exposed_env_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_exposed_env_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_favourite_vhost_by_orga_and_app_id**\n> VhostView get_favourite_vhost_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_favourite_vhost_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_favourite_vhost_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**VhostView**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_instances_for_all_apps_for_orga**\n> str get_instances_for_all_apps_for_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_instances_for_all_apps_for_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_instances_for_all_apps_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_invoice_by_orga**\n> InvoiceRendering get_invoice_by_orga(id, bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n bid = \"bid_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_invoice_by_orga(id, bid)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_invoice_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **bid** | **str**| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_invoices_by_orga**\n> [InvoiceRendering] get_invoices_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_invoices_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_invoices_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[InvoiceRendering]**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_monthly_invoice_by_orga**\n> str get_monthly_invoice_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_monthly_invoice_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_monthly_invoice_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_namespaces**\n> [NamespaceView] get_namespaces(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.namespace_view import NamespaceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_namespaces(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_namespaces: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[NamespaceView]**](NamespaceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_new_setup_intent_by_orga**\n> SetupIntentView get_new_setup_intent_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.setup_intent_view import SetupIntentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n type = \"type_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_new_setup_intent_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_new_setup_intent_by_orga: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_new_setup_intent_by_orga(id, type=type)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_new_setup_intent_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **type** | **str**| | [optional]\n\n### Return type\n\n[**SetupIntentView**](SetupIntentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_organisation**\n> OrganisationView get_organisation(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_organisation(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_organisation: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**OrganisationView**](OrganisationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_organisation_members**\n> [OrganisationMemberView] get_organisation_members(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.organisation_member_view import OrganisationMemberView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_organisation_members(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_organisation_members: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[OrganisationMemberView]**](OrganisationMemberView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_payment_info_for_orga**\n> PaymentInfoView get_payment_info_for_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.payment_info_view import PaymentInfoView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_payment_info_for_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_payment_info_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**PaymentInfoView**](PaymentInfoView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_pdf_invoice_by_orga**\n> get_pdf_invoice_by_orga(id, bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n bid = \"bid_example\" # str | \n token = \"token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_pdf_invoice_by_orga(id, bid)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_pdf_invoice_by_orga: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_pdf_invoice_by_orga(id, bid, token=token)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_pdf_invoice_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **bid** | **str**| |\n **token** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/pdf\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_price_with_tax_by_orga**\n> PriceWithTaxInfo get_price_with_tax_by_orga(id, price)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.price_with_tax_info import PriceWithTaxInfo\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n price = \"price_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_price_with_tax_by_orga(id, price)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_price_with_tax_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **price** | **str**| |\n\n### Return type\n\n[**PriceWithTaxInfo**](PriceWithTaxInfo.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_provider_features**\n> [AddonFeatureView] get_provider_features(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_feature_view import AddonFeatureView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_provider_features(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_provider_features: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n[**[AddonFeatureView]**](AddonFeatureView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_provider_info**\n> AddonProviderInfoView get_provider_info(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_provider_info_view import AddonProviderInfoView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_provider_info(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_provider_info: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n[**AddonProviderInfoView**](AddonProviderInfoView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_provider_plan**\n> AddonPlanView get_provider_plan(id, provider_id, plan_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n plan_id = \"planId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_provider_plan(id, provider_id, plan_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_provider_plan: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **plan_id** | **str**| |\n\n### Return type\n\n[**AddonPlanView**](AddonPlanView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_provider_plans**\n> [AddonPlanView] get_provider_plans(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_provider_plans(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_provider_plans: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n[**[AddonPlanView]**](AddonPlanView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_provider_tags**\n> [str] get_provider_tags(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_provider_tags(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_provider_tags: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_providers_info**\n> [AddonProviderInfoFullView] get_providers_info(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_providers_info(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_providers_info: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[AddonProviderInfoFullView]**](AddonProviderInfoFullView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_recurrent_payment_by_orga**\n> RecurrentPaymentView get_recurrent_payment_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_recurrent_payment_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_recurrent_payment_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**RecurrentPaymentView**](RecurrentPaymentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_sso_data_for_orga**\n> AddonProviderSSOData get_sso_data_for_orga(id, provider_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_provider_sso_data import AddonProviderSSOData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_sso_data_for_orga(id, provider_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_sso_data_for_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n\n### Return type\n\n[**AddonProviderSSOData**](AddonProviderSSOData.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_stripe_token_by_orga**\n> BraintreeToken get_stripe_token_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_stripe_token_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_stripe_token_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**BraintreeToken**](BraintreeToken.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_tcp_redirs**\n> [TcpRedirView] get_tcp_redirs(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.tcp_redir_view import TcpRedirView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_tcp_redirs(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_tcp_redirs: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[TcpRedirView]**](TcpRedirView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n**402** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_unpaid_invoices_by_orga**\n> [InvoiceRendering] get_unpaid_invoices_by_orga(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_unpaid_invoices_by_orga(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_unpaid_invoices_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[InvoiceRendering]**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_unpaid_invoices_by_orga1**\n> [PaymentMethodView] get_unpaid_invoices_by_orga1(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_unpaid_invoices_by_orga1(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_unpaid_invoices_by_orga1: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[PaymentMethodView]**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_user_organisationss**\n> [OrganisationView] get_user_organisationss()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n user = \"user_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_user_organisationss(user=user)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_user_organisationss: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **user** | **str**| | [optional]\n\n### Return type\n\n[**[OrganisationView]**](OrganisationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_vhosts_by_orga_and_app_id**\n> [VhostView] get_vhosts_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_vhosts_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->get_vhosts_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**[VhostView]**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **link_addon_to_application_by_orga_and_app_id**\n> link_addon_to_application_by_orga_and_app_id(id, app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.link_addon_to_application_by_orga_and_app_id(id, app_id, body)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->link_addon_to_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **body** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **mark_favourite_vhost_by_orga_and_app_id**\n> VhostView mark_favourite_vhost_by_orga_and_app_id(id, app_id, vhost_view)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n vhost_view = VhostView(\n fqdn=\"fqdn_example\",\n ) # VhostView | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.mark_favourite_vhost_by_orga_and_app_id(id, app_id, vhost_view)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->mark_favourite_vhost_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **vhost_view** | [**VhostView**](VhostView.md)| |\n\n### Return type\n\n[**VhostView**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **preorder_addon_by_orga_id**\n> InvoiceRendering preorder_addon_by_orga_id(id, wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.preorder_addon_by_orga_id(id, wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->preorder_addon_by_orga_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **preorder_migration**\n> str preorder_migration(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n plan_id = \"planId_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.preorder_migration(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->preorder_migration: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.preorder_migration(id, addon_id, plan_id=plan_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->preorder_migration: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **plan_id** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **provision_addon_by_orga_id**\n> AddonView provision_addon_by_orga_id(id, wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.stripe_confirmation_error_message import StripeConfirmationErrorMessage\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.provision_addon_by_orga_id(id, wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->provision_addon_by_orga_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n**402** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **redeploy_application_by_orga_and_app_id**\n> redeploy_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n commit = \"commit_example\" # str | (optional)\n use_cache = \"useCache_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_instance.redeploy_application_by_orga_and_app_id(id, app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->redeploy_application_by_orga_and_app_id: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.redeploy_application_by_orga_and_app_id(id, app_id, commit=commit, use_cache=use_cache)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->redeploy_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **commit** | **str**| | [optional]\n **use_cache** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_application_env_by_orga_and_app_id_and_env_name**\n> ApplicationView remove_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n env_name = \"envName_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->remove_application_env_by_orga_and_app_id_and_env_name: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **env_name** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_organisation_member**\n> Message remove_organisation_member(id, user_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n user_id = \"userId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_organisation_member(id, user_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->remove_organisation_member: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **user_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_tcp_redir**\n> remove_tcp_redir(id, app_id, source_port)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n source_port = 1 # int | \n namespace = \"namespace_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_instance.remove_tcp_redir(id, app_id, source_port)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->remove_tcp_redir: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.remove_tcp_redir(id, app_id, source_port, namespace=namespace)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->remove_tcp_redir: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **source_port** | **int**| |\n **namespace** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_vhosts_by_orga_and_app_id**\n> Message remove_vhosts_by_orga_and_app_id(id, app_id, domain)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n domain = \"domain_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_vhosts_by_orga_and_app_id(id, app_id, domain)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->remove_vhosts_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **domain** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **replace_addon_tags**\n> [str] replace_addon_tags(id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n body = \"body_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.replace_addon_tags(id, addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->replace_addon_tags: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.replace_addon_tags(id, addon_id, body=body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->replace_addon_tags: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **body** | **str**| | [optional]\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **replace_application_tags**\n> [str] replace_application_tags(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.replace_application_tags(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->replace_application_tags: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.replace_application_tags(id, app_id, body=body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->replace_application_tags: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **body** | **str**| | [optional]\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_application_branch_by_orga_and_app_id**\n> set_application_branch_by_orga_and_app_id(id, app_id, wannabe_branch)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_branch import WannabeBranch\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n wannabe_branch = WannabeBranch(\n branch=\"branch_example\",\n ) # WannabeBranch | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_application_branch_by_orga_and_app_id(id, app_id, wannabe_branch)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->set_application_branch_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **wannabe_branch** | [**WannabeBranch**](WannabeBranch.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_build_instance_flavor_by_orga_and_app_id**\n> set_build_instance_flavor_by_orga_and_app_id(id, app_id, wannabe_build_flavor)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_build_flavor import WannabeBuildFlavor\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n wannabe_build_flavor = WannabeBuildFlavor(\n flavor_name=\"flavor_name_example\",\n ) # WannabeBuildFlavor | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_build_instance_flavor_by_orga_and_app_id(id, app_id, wannabe_build_flavor)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->set_build_instance_flavor_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **wannabe_build_flavor** | [**WannabeBuildFlavor**](WannabeBuildFlavor.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_default_method_by_orga**\n> set_default_method_by_orga(id, payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_default_method_by_orga(id, payment_data)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->set_default_method_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_max_credits_per_month_by_orga**\n> str set_max_credits_per_month_by_orga(id, wannabe_max_credits)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_max_credits import WannabeMaxCredits\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n wannabe_max_credits = WannabeMaxCredits(\n max=3.14,\n ) # WannabeMaxCredits | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.set_max_credits_per_month_by_orga(id, wannabe_max_credits)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->set_max_credits_per_month_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **wannabe_max_credits** | [**WannabeMaxCredits**](WannabeMaxCredits.md)| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_orga_avatar**\n> UrlView set_orga_avatar(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.url_view import UrlView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.set_orga_avatar(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->set_orga_avatar: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**UrlView**](UrlView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **undeploy_application_by_orga_and_app_id**\n> Message undeploy_application_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.undeploy_application_by_orga_and_app_id(id, app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->undeploy_application_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **unlink_addon_from_application_by_orga_and_app_andd_addon_id**\n> unlink_addon_from_application_by_orga_and_app_andd_addon_id(id, app_id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.unlink_addon_from_application_by_orga_and_app_andd_addon_id(id, app_id, addon_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->unlink_addon_from_application_by_orga_and_app_andd_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **unmark_favourite_vhost_by_orga_and_app_id**\n> unmark_favourite_vhost_by_orga_and_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.unmark_favourite_vhost_by_orga_and_app_id(id, app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->unmark_favourite_vhost_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_addon_info**\n> AddonView update_addon_info(id, addon_id, wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n addon_id = \"addonId_example\" # str | \n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_addon_info(id, addon_id, wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->update_addon_info: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **addon_id** | **str**| |\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_consumer_by_orga**\n> OAuth1ConsumerView update_consumer_by_orga(id, key, wannabe_o_auth1_consumer)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n key = \"key_example\" # str | \n wannabe_o_auth1_consumer = WannabeOAuth1Consumer(\n name=\"name_example\",\n description=\"description_example\",\n url=\"url_example\",\n picture=\"picture_example\",\n base_url=\"base_url_example\",\n rights={\n \"key\": True,\n },\n ) # WannabeOAuth1Consumer | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_consumer_by_orga(id, key, wannabe_o_auth1_consumer)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->update_consumer_by_orga: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **key** | **str**| |\n **wannabe_o_auth1_consumer** | [**WannabeOAuth1Consumer**](WannabeOAuth1Consumer.md)| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_exposed_env_by_orga_and_app_id**\n> Message update_exposed_env_by_orga_and_app_id(id, app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_exposed_env_by_orga_and_app_id(id, app_id, body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->update_exposed_env_by_orga_and_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n **body** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_provider_infos**\n> AddonProviderInfoView update_provider_infos(id, provider_id, wannabe_addon_provider_infos)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import organisation_api\nfrom openapi_client.model.wannabe_addon_provider_infos import WannabeAddonProviderInfos\nfrom openapi_client.model.addon_provider_info_view import AddonProviderInfoView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = organisation_api.OrganisationApi(api_client)\n id = \"id_example\" # str | \n provider_id = \"providerId_example\" # str | \n wannabe_addon_provider_infos = WannabeAddonProviderInfos(\n name=\"name_example\",\n website=\"website_example\",\n support_email=\"support_email_example\",\n google_plus_name=\"google_plus_name_example\",\n twitter_name=\"twitter_name_example\",\n analytics_id=\"analytics_id_example\",\n short_desc=\"short_desc_example\",\n long_desc=\"long_desc_example\",\n logo_url=\"logo_url_example\",\n ) # WannabeAddonProviderInfos | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_provider_infos(id, provider_id, wannabe_addon_provider_infos)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling OrganisationApi->update_provider_infos: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **provider_id** | **str**| |\n **wannabe_addon_provider_infos** | [**WannabeAddonProviderInfos**](WannabeAddonProviderInfos.md)| |\n\n### Return type\n\n[**AddonProviderInfoView**](AddonProviderInfoView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.5734144449234009, "alphanum_fraction": 0.5751520395278931, "avg_line_length": 51.227272033691406, "blob_id": "c7bab09b2ad374b773c3ef7c67e3a7fcce7640b7", "content_id": "ad9e0d532732aa950416e8998f1a79c5d2419375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 168, "num_lines": 22, "path": "/docs/OrganisationSummary.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# OrganisationSummary\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**name** | **str** | | [optional] \n**avatar** | **str** | | [optional] \n**applications** | [**[ApplicationSummary]**](ApplicationSummary.md) | | [optional] \n**addons** | [**[AddonSummary]**](AddonSummary.md) | | [optional] \n**consumers** | [**[OAuth1ConsumerSummary]**](OAuth1ConsumerSummary.md) | | [optional] \n**role** | **str** | | [optional] \n**providers** | [**[ProviderSummary]**](ProviderSummary.md) | | [optional] \n**vat_state** | **str** | | [optional] \n**can_pay** | **bool** | | [optional] \n**can_sepa** | **bool** | | [optional] \n**clever_enterprise** | **bool** | | [optional] \n**emergency_number** | **str** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.5509832501411438, "alphanum_fraction": 0.5553532242774963, "avg_line_length": 17.42953109741211, "blob_id": "527f5ce95d6d3ba275afbb992563c0e2805be877", "content_id": "2c33ad1bae1ef8a3193af8f3a327624a03f122ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2746, "license_type": "no_license", "max_line_length": 72, "num_lines": 149, "path": "/test/test_products_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.products_api import ProductsApi # noqa: E501\n\n\nclass TestProductsApi(unittest.TestCase):\n \"\"\"ProductsApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = ProductsApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_bill_owner(self):\n \"\"\"Test case for bill_owner\n\n \"\"\"\n pass\n\n def test_edit_application_configuration(self):\n \"\"\"Test case for edit_application_configuration\n\n \"\"\"\n pass\n\n def test_end_addon_migration(self):\n \"\"\"Test case for end_addon_migration\n\n \"\"\"\n pass\n\n def test_get_addon_provider(self):\n \"\"\"Test case for get_addon_provider\n\n \"\"\"\n pass\n\n def test_get_addon_provider_infos(self):\n \"\"\"Test case for get_addon_provider_infos\n\n \"\"\"\n pass\n\n def test_get_addon_provider_versions(self):\n \"\"\"Test case for get_addon_provider_versions\n\n \"\"\"\n pass\n\n def test_get_addon_providers(self):\n \"\"\"Test case for get_addon_providers\n\n \"\"\"\n pass\n\n def test_get_application_info(self):\n \"\"\"Test case for get_application_info\n\n \"\"\"\n pass\n\n def test_get_available_instances(self):\n \"\"\"Test case for get_available_instances\n\n \"\"\"\n pass\n\n def test_get_available_packages(self):\n \"\"\"Test case for get_available_packages\n\n \"\"\"\n pass\n\n def test_get_countries(self):\n \"\"\"Test case for get_countries\n\n \"\"\"\n pass\n\n def test_get_country_codes(self):\n \"\"\"Test case for get_country_codes\n\n \"\"\"\n pass\n\n def test_get_excahnge_rates(self):\n \"\"\"Test case for get_excahnge_rates\n\n \"\"\"\n pass\n\n def test_get_flavors(self):\n \"\"\"Test case for get_flavors\n\n \"\"\"\n pass\n\n def test_get_instance(self):\n \"\"\"Test case for get_instance\n\n \"\"\"\n pass\n\n def test_get_mfa_kinds(self):\n \"\"\"Test case for get_mfa_kinds\n\n \"\"\"\n pass\n\n def test_get_zones(self):\n \"\"\"Test case for get_zones\n\n \"\"\"\n pass\n\n def test_list_apps(self):\n \"\"\"Test case for list_apps\n\n \"\"\"\n pass\n\n def test_logscollector(self):\n \"\"\"Test case for logscollector\n\n \"\"\"\n pass\n\n def test_provision_other_addon(self):\n \"\"\"Test case for provision_other_addon\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6033375859260559, "alphanum_fraction": 0.6187419891357422, "avg_line_length": 18, "blob_id": "3bcf1516878b791038e2e09bea787ee23416e165", "content_id": "d973a37875c5732777ac8a82ac0e1839ae6f7664", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 72, "num_lines": 41, "path": "/test/test_default_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.default_api import DefaultApi # noqa: E501\n\n\nclass TestDefaultApi(unittest.TestCase):\n \"\"\"DefaultApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = DefaultApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_get_blog_feed(self):\n \"\"\"Test case for get_blog_feed\n\n \"\"\"\n pass\n\n def test_get_engineering_feed(self):\n \"\"\"Test case for get_engineering_feed\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6986691951751709, "alphanum_fraction": 0.7176806330680847, "avg_line_length": 25.299999237060547, "blob_id": "e7918ec5f8d4413bf308640a23e82ae13e26a0d4", "content_id": "5ed2a6de63bb791bac95b66a97999a3d05642738", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1052, "license_type": "no_license", "max_line_length": 80, "num_lines": 40, "path": "/test/test_o_auth1_access_token_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.o_auth_rights_view import OAuthRightsView\nglobals()['OAuth1ConsumerView'] = OAuth1ConsumerView\nglobals()['OAuthRightsView'] = OAuthRightsView\nfrom openapi_client.model.o_auth1_access_token_view import OAuth1AccessTokenView\n\n\nclass TestOAuth1AccessTokenView(unittest.TestCase):\n \"\"\"OAuth1AccessTokenView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOAuth1AccessTokenView(self):\n \"\"\"Test OAuth1AccessTokenView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = OAuth1AccessTokenView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.713798999786377, "alphanum_fraction": 0.7214650511741638, "avg_line_length": 25.68181800842285, "blob_id": "9b807dbe7a907d5d652db31b9323da14983c416a", "content_id": "68b03cb50cf7fb99874282de153273f6f29db26b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1174, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/test/test_application_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.deployment_info_view import DeploymentInfoView\nfrom openapi_client.model.flavor_view import FlavorView\nfrom openapi_client.model.instance_view import InstanceView\nfrom openapi_client.model.vhost_view import VhostView\nglobals()['DeploymentInfoView'] = DeploymentInfoView\nglobals()['FlavorView'] = FlavorView\nglobals()['InstanceView'] = InstanceView\nglobals()['VhostView'] = VhostView\nfrom openapi_client.model.application_view import ApplicationView\n\n\nclass TestApplicationView(unittest.TestCase):\n \"\"\"ApplicationView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testApplicationView(self):\n \"\"\"Test ApplicationView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = ApplicationView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5347721576690674, "alphanum_fraction": 0.5347721576690674, "avg_line_length": 45.27777862548828, "blob_id": "309a394db8f2f889d26aa395ca11b78076e40e28", "content_id": "2bc4914f234f49a5138eccecfe57913df94e1a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1668, "license_type": "no_license", "max_line_length": 168, "num_lines": 36, "path": "/docs/WannabeApplication.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# WannabeApplication\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**name** | **str** | | [optional] \n**description** | **str** | | [optional] \n**zone** | **str** | | [optional] \n**deploy** | **str** | | [optional] \n**shutdownable** | **bool** | | [optional] \n**instance_type** | **str** | | [optional] \n**instance_version** | **str** | | [optional] \n**instance_variant** | **str** | | [optional] \n**instance_lifetime** | **str** | | [optional] \n**min_instances** | **int** | | [optional] \n**max_instances** | **int** | | [optional] \n**min_flavor** | **str** | | [optional] \n**max_flavor** | **str** | | [optional] \n**tags** | **[str]** | | [optional] \n**archived** | **bool** | | [optional] \n**sticky_sessions** | **bool** | | [optional] \n**homogeneous** | **bool** | | [optional] \n**favourite** | **bool** | | [optional] \n**cancel_on_push** | **bool** | | [optional] \n**separate_build** | **bool** | | [optional] \n**build_flavor** | **str** | | [optional] \n**oauth_service** | **str** | | [optional] \n**oauth_app_id** | **str** | | [optional] \n**oauth_app** | [**WannabeOauthApp**](WannabeOauthApp.md) | | [optional] \n**appliance_id** | **str** | | [optional] \n**branch** | **str** | | [optional] \n**force_https** | **str** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.5413580536842346, "alphanum_fraction": 0.5413580536842346, "avg_line_length": 46.588233947753906, "blob_id": "2006bdba3c4f7926f45632c5e0cdc7a86dd0a87a", "content_id": "1ca2d8babcd4222580c7984c907ea8cb7e424348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1620, "license_type": "no_license", "max_line_length": 168, "num_lines": 34, "path": "/docs/ApplicationView.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# ApplicationView\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**name** | **str** | | [optional] \n**description** | **str** | | [optional] \n**zone** | **str** | | [optional] \n**instance** | [**InstanceView**](InstanceView.md) | | [optional] \n**deployment** | [**DeploymentInfoView**](DeploymentInfoView.md) | | [optional] \n**vhosts** | [**[VhostView]**](VhostView.md) | | [optional] \n**creation_date** | **int** | | [optional] \n**last_deploy** | **int** | | [optional] \n**archived** | **bool** | | [optional] \n**sticky_sessions** | **bool** | | [optional] \n**homogeneous** | **bool** | | [optional] \n**favourite** | **bool** | | [optional] \n**cancel_on_push** | **bool** | | [optional] \n**webhook_url** | **str** | | [optional] \n**webhook_secret** | **str** | | [optional] \n**separate_build** | **bool** | | [optional] \n**build_flavor** | [**FlavorView**](FlavorView.md) | | [optional] \n**owner_id** | **str** | | [optional] \n**state** | **str** | | [optional] \n**commit_id** | **str** | | [optional] \n**appliance** | **str** | | [optional] \n**branch** | **str** | | [optional] \n**force_https** | **str** | | [optional] \n**deploy_url** | **str** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.6597086191177368, "alphanum_fraction": 0.6623644828796387, "avg_line_length": 28.66008949279785, "blob_id": "b6ee7964881d6072b0e71d505da438494205addd", "content_id": "da2a507bb3d1f2819f3c162557dcfb88d1d90f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 198427, "license_type": "no_license", "max_line_length": 186, "num_lines": 6690, "path": "/docs/SelfApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.SelfApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**add_email_address**](SelfApi.md#add_email_address) | **PUT** /self/emails/{email} | \n[**add_self_addon_tag_by_addon_id**](SelfApi.md#add_self_addon_tag_by_addon_id) | **PUT** /self/addons/{addonId}/tags/{tag} | \n[**add_self_application**](SelfApi.md#add_self_application) | **POST** /self/applications | \n[**add_self_application_dependency_by_app_id**](SelfApi.md#add_self_application_dependency_by_app_id) | **PUT** /self/applications/{appId}/dependencies/{dependencyId} | \n[**add_self_application_tag_by_app_id**](SelfApi.md#add_self_application_tag_by_app_id) | **PUT** /self/applications/{appId}/tags/{tag} | \n[**add_self_payment_method**](SelfApi.md#add_self_payment_method) | **POST** /self/payments/methods | \n[**add_self_vhost_by_app_id**](SelfApi.md#add_self_vhost_by_app_id) | **PUT** /self/applications/{appId}/vhosts/{domain} | \n[**add_ssh_key**](SelfApi.md#add_ssh_key) | **PUT** /self/keys/{key} | \n[**buy_self_drops**](SelfApi.md#buy_self_drops) | **POST** /self/payments/billings | \n[**cancel_deploy**](SelfApi.md#cancel_deploy) | **DELETE** /self/applications/{appId}/deployments/{deploymentId}/instances | \n[**change_self_addon_plan_by_addon_id**](SelfApi.md#change_self_addon_plan_by_addon_id) | **PUT** /self/addons/{addonId}/plan | \n[**change_user_password**](SelfApi.md#change_user_password) | **PUT** /self/change_password | \n[**choose_self_payment_provider**](SelfApi.md#choose_self_payment_provider) | **PUT** /self/payments/billings/{bid} | \n[**create_mfa**](SelfApi.md#create_mfa) | **POST** /self/mfa/{kind} | \n[**create_self_consumer**](SelfApi.md#create_self_consumer) | **POST** /self/consumers | \n[**delete_mfa**](SelfApi.md#delete_mfa) | **DELETE** /self/mfa/{kind} | \n[**delete_self_addon_tag_by_addon_id**](SelfApi.md#delete_self_addon_tag_by_addon_id) | **DELETE** /self/addons/{addonId}/tags/{tag} | \n[**delete_self_application_by_app_id**](SelfApi.md#delete_self_application_by_app_id) | **DELETE** /self/applications/{appId} | \n[**delete_self_application_dependency_by_app_id**](SelfApi.md#delete_self_application_dependency_by_app_id) | **DELETE** /self/applications/{appId}/dependencies/{dependencyId} | \n[**delete_self_application_tag_app_id**](SelfApi.md#delete_self_application_tag_app_id) | **DELETE** /self/applications/{appId}/tags/{tag} | \n[**delete_self_card**](SelfApi.md#delete_self_card) | **DELETE** /self/payments/methods/{mId} | \n[**delete_self_consumer**](SelfApi.md#delete_self_consumer) | **DELETE** /self/consumers/{key} | \n[**delete_self_purchase_order**](SelfApi.md#delete_self_purchase_order) | **DELETE** /self/payments/billings/{bid} | \n[**delete_self_recurrent_payment**](SelfApi.md#delete_self_recurrent_payment) | **DELETE** /self/payments/recurring | \n[**delete_user**](SelfApi.md#delete_user) | **DELETE** /self | \n[**deprovision_self_addon_by_id**](SelfApi.md#deprovision_self_addon_by_id) | **DELETE** /self/addons/{addonId} | \n[**edit_self_application_by_app_id**](SelfApi.md#edit_self_application_by_app_id) | **PUT** /self/applications/{appId} | \n[**edit_self_application_env_by_app_id**](SelfApi.md#edit_self_application_env_by_app_id) | **PUT** /self/applications/{appId}/env | \n[**edit_self_application_env_by_app_id_and_env_name**](SelfApi.md#edit_self_application_env_by_app_id_and_env_name) | **PUT** /self/applications/{appId}/env/{envName} | \n[**edit_user**](SelfApi.md#edit_user) | **PUT** /self | \n[**fav_mfa**](SelfApi.md#fav_mfa) | **PUT** /self/mfa/{kind} | \n[**get_addon_sso_data**](SelfApi.md#get_addon_sso_data) | **GET** /self/addons/{addonId}/sso | \n[**get_application_deployment**](SelfApi.md#get_application_deployment) | **GET** /self/applications/{appId}/deployments/{deploymentId} | \n[**get_application_deployments**](SelfApi.md#get_application_deployments) | **GET** /self/applications/{appId}/deployments | \n[**get_backup_codes**](SelfApi.md#get_backup_codes) | **GET** /self/mfa/{kind}/backupcodes | \n[**get_confirmation_email**](SelfApi.md#get_confirmation_email) | **GET** /self/confirmation_email | \n[**get_consumptions**](SelfApi.md#get_consumptions) | **GET** /self/consumptions | \n[**get_email_addresses**](SelfApi.md#get_email_addresses) | **GET** /self/emails | \n[**get_id**](SelfApi.md#get_id) | **GET** /self/id | \n[**get_self_addon_by_id**](SelfApi.md#get_self_addon_by_id) | **GET** /self/addons/{addonId} | \n[**get_self_addon_env_by_addon_id**](SelfApi.md#get_self_addon_env_by_addon_id) | **GET** /self/addons/{addonId}/env | \n[**get_self_addon_tags_by_addon_id**](SelfApi.md#get_self_addon_tags_by_addon_id) | **GET** /self/addons/{addonId}/tags | \n[**get_self_addons**](SelfApi.md#get_self_addons) | **GET** /self/addons | \n[**get_self_addons_linked_to_application_by_app_id**](SelfApi.md#get_self_addons_linked_to_application_by_app_id) | **GET** /self/applications/{appId}/addons | \n[**get_self_amount**](SelfApi.md#get_self_amount) | **GET** /self/credits | \n[**get_self_application_branches_by_app_id**](SelfApi.md#get_self_application_branches_by_app_id) | **GET** /self/applications/{appId}/branches | \n[**get_self_application_by_app_id**](SelfApi.md#get_self_application_by_app_id) | **GET** /self/applications/{appId} | \n[**get_self_application_dependencies_by_app_id**](SelfApi.md#get_self_application_dependencies_by_app_id) | **GET** /self/applications/{appId}/dependencies | \n[**get_self_application_dependencies_env_by_app_id**](SelfApi.md#get_self_application_dependencies_env_by_app_id) | **GET** /self/applications/{appId}/dependencies/env | \n[**get_self_application_dependents**](SelfApi.md#get_self_application_dependents) | **GET** /self/applications/{appId}/dependents | \n[**get_self_application_env_by_app_id**](SelfApi.md#get_self_application_env_by_app_id) | **GET** /self/applications/{appId}/env | \n[**get_self_application_instance_by_app_and_instance_id**](SelfApi.md#get_self_application_instance_by_app_and_instance_id) | **GET** /self/applications/{appId}/instances/{instanceId} | \n[**get_self_application_instances_by_app_id**](SelfApi.md#get_self_application_instances_by_app_id) | **GET** /self/applications/{appId}/instances | \n[**get_self_application_tags_by_app_id**](SelfApi.md#get_self_application_tags_by_app_id) | **GET** /self/applications/{appId}/tags | \n[**get_self_applications**](SelfApi.md#get_self_applications) | **GET** /self/applications | \n[**get_self_applications_linked_to_addon_by_addon_id**](SelfApi.md#get_self_applications_linked_to_addon_by_addon_id) | **GET** /self/addons/{addonId}/applications | \n[**get_self_cli_tokens**](SelfApi.md#get_self_cli_tokens) | **GET** /self/cli_tokens | \n[**get_self_consumer**](SelfApi.md#get_self_consumer) | **GET** /self/consumers/{key} | \n[**get_self_consumer_secret**](SelfApi.md#get_self_consumer_secret) | **GET** /self/consumers/{key}/secret | \n[**get_self_consumers**](SelfApi.md#get_self_consumers) | **GET** /self/consumers | \n[**get_self_default_method**](SelfApi.md#get_self_default_method) | **GET** /self/payments/methods/default | \n[**get_self_env_of_addons_linked_to_application_by_app_id**](SelfApi.md#get_self_env_of_addons_linked_to_application_by_app_id) | **GET** /self/applications/{appId}/addons/env | \n[**get_self_exposed_env_by_app_id**](SelfApi.md#get_self_exposed_env_by_app_id) | **GET** /self/applications/{appId}/exposed_env | \n[**get_self_favourite_vhost_by_app_id**](SelfApi.md#get_self_favourite_vhost_by_app_id) | **GET** /self/applications/{appId}/vhosts/favourite | \n[**get_self_instances_for_all_apps**](SelfApi.md#get_self_instances_for_all_apps) | **GET** /self/instances | \n[**get_self_invoice_by_id**](SelfApi.md#get_self_invoice_by_id) | **GET** /self/payments/billings/{bid} | \n[**get_self_invoices**](SelfApi.md#get_self_invoices) | **GET** /self/payments/billings | \n[**get_self_monthly_invoice**](SelfApi.md#get_self_monthly_invoice) | **GET** /self/payments/monthlyinvoice | \n[**get_self_payment_info**](SelfApi.md#get_self_payment_info) | **GET** /self/payment-info | \n[**get_self_payment_methods**](SelfApi.md#get_self_payment_methods) | **GET** /self/payments/methods | \n[**get_self_pdf_invoice_by_id**](SelfApi.md#get_self_pdf_invoice_by_id) | **GET** /self/payments/billings/{bid}.pdf | \n[**get_self_price_with_tax**](SelfApi.md#get_self_price_with_tax) | **GET** /self/payments/fullprice/{price} | \n[**get_self_recurrent_payment**](SelfApi.md#get_self_recurrent_payment) | **GET** /self/payments/recurring | \n[**get_self_stripe_token**](SelfApi.md#get_self_stripe_token) | **GET** /self/payments/tokens/stripe | \n[**get_self_tokens**](SelfApi.md#get_self_tokens) | **GET** /self/tokens | \n[**get_self_vhost_by_app_id**](SelfApi.md#get_self_vhost_by_app_id) | **GET** /self/applications/{appId}/vhosts | \n[**get_ssh_keys**](SelfApi.md#get_ssh_keys) | **GET** /self/keys | \n[**get_summary**](SelfApi.md#get_summary) | **GET** /summary | \n[**get_user**](SelfApi.md#get_user) | **GET** /self | \n[**link_self_addon_to_application_by_app_id**](SelfApi.md#link_self_addon_to_application_by_app_id) | **POST** /self/applications/{appId}/addons | \n[**mark_self_favourite_vhost_by_app_id**](SelfApi.md#mark_self_favourite_vhost_by_app_id) | **PUT** /self/applications/{appId}/vhosts/favourite | \n[**preorder_self_addon**](SelfApi.md#preorder_self_addon) | **POST** /self/addons/preorders | \n[**provision_self_addon**](SelfApi.md#provision_self_addon) | **POST** /self/addons | \n[**redeploy_self_application_by_app_id**](SelfApi.md#redeploy_self_application_by_app_id) | **POST** /self/applications/{appId}/instances | \n[**remove_email_address**](SelfApi.md#remove_email_address) | **DELETE** /self/emails/{email} | \n[**remove_self_application_env_by_app_id_and_env_name**](SelfApi.md#remove_self_application_env_by_app_id_and_env_name) | **DELETE** /self/applications/{appId}/env/{envName} | \n[**remove_self_vhost_by_app_id**](SelfApi.md#remove_self_vhost_by_app_id) | **DELETE** /self/applications/{appId}/vhosts/{domain} | \n[**remove_ssh_key**](SelfApi.md#remove_ssh_key) | **DELETE** /self/keys/{key} | \n[**rename_addon**](SelfApi.md#rename_addon) | **PUT** /self/addons/{addonId} | \n[**revoke_all_tokens**](SelfApi.md#revoke_all_tokens) | **DELETE** /self/tokens | \n[**revoke_token**](SelfApi.md#revoke_token) | **DELETE** /self/tokens/{token} | \n[**set_self_application_branch_by_app_id**](SelfApi.md#set_self_application_branch_by_app_id) | **PUT** /self/applications/{appId}/branch | \n[**set_self_build_instance_flavor_by_app_id**](SelfApi.md#set_self_build_instance_flavor_by_app_id) | **PUT** /self/applications/{appId}/buildflavor | \n[**set_self_default_method**](SelfApi.md#set_self_default_method) | **PUT** /self/payments/methods/default | \n[**set_self_max_credits_per_month**](SelfApi.md#set_self_max_credits_per_month) | **PUT** /self/payments/monthlyinvoice/maxcredit | \n[**set_user_avatar_from_file**](SelfApi.md#set_user_avatar_from_file) | **PUT** /self/avatar | \n[**undeploy_self_application_by_app_id**](SelfApi.md#undeploy_self_application_by_app_id) | **DELETE** /self/applications/{appId}/instances | \n[**unlink_selfddon_from_application_by_app_and_addon_id**](SelfApi.md#unlink_selfddon_from_application_by_app_and_addon_id) | **DELETE** /self/applications/{appId}/addons/{addonId} | \n[**unmark_self_favourite_vhost_by_app_id**](SelfApi.md#unmark_self_favourite_vhost_by_app_id) | **DELETE** /self/applications/{appId}/vhosts/favourite | \n[**update_self_consumer**](SelfApi.md#update_self_consumer) | **PUT** /self/consumers/{key} | \n[**update_self_exposed_env_by_app_id**](SelfApi.md#update_self_exposed_env_by_app_id) | **PUT** /self/applications/{appId}/exposed_env | \n[**validate_email**](SelfApi.md#validate_email) | **GET** /self/validate_email | \n[**validate_mfa**](SelfApi.md#validate_mfa) | **POST** /self/mfa/{kind}/confirmation | \n\n\n# **add_email_address**\n> Message add_email_address(email)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n email = \"email_example\" # str | \n body = \"body_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_email_address(email)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_email_address: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.add_email_address(email, body=body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_email_address: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **email** | **str**| |\n **body** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_addon_tag_by_addon_id**\n> [str] add_self_addon_tag_by_addon_id(addon_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_addon_tag_by_addon_id(addon_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_addon_tag_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_application**\n> ApplicationView add_self_application(wannabe_application)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_application = WannabeApplication(\n name=\"name_example\",\n description=\"description_example\",\n zone=\"zone_example\",\n deploy=\"deploy_example\",\n shutdownable=True,\n instance_type=\"instance_type_example\",\n instance_version=\"instance_version_example\",\n instance_variant=\"instance_variant_example\",\n instance_lifetime=\"REGULAR\",\n min_instances=1,\n max_instances=1,\n min_flavor=\"min_flavor_example\",\n max_flavor=\"max_flavor_example\",\n tags=[\n \"tags_example\",\n ],\n archived=True,\n sticky_sessions=True,\n homogeneous=True,\n favourite=True,\n cancel_on_push=True,\n separate_build=True,\n build_flavor=\"build_flavor_example\",\n oauth_service=\"oauth_service_example\",\n oauth_app_id=\"oauth_app_id_example\",\n oauth_app=WannabeOauthApp(\n owner=\"owner_example\",\n name=\"name_example\",\n ),\n appliance_id=\"appliance_id_example\",\n branch=\"branch_example\",\n force_https=\"ENABLED\",\n ) # WannabeApplication | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_application(wannabe_application)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_application: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_application** | [**WannabeApplication**](WannabeApplication.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_application_dependency_by_app_id**\n> Message add_self_application_dependency_by_app_id(app_id, dependency_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n dependency_id = \"dependencyId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_application_dependency_by_app_id(app_id, dependency_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_application_dependency_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **dependency_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_application_tag_by_app_id**\n> [str] add_self_application_tag_by_app_id(app_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_application_tag_by_app_id(app_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_application_tag_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_payment_method**\n> PaymentMethodView add_self_payment_method(payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_payment_method(payment_data)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_payment_method: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\n[**PaymentMethodView**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_self_vhost_by_app_id**\n> Message add_self_vhost_by_app_id(app_id, domain)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n domain = \"domain_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_self_vhost_by_app_id(app_id, domain)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_self_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **domain** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **add_ssh_key**\n> Message add_ssh_key(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.add_ssh_key(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->add_ssh_key: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **buy_self_drops**\n> InvoiceRendering buy_self_drops(wanna_buy_package)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wanna_buy_package import WannaBuyPackage\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wanna_buy_package = WannaBuyPackage(\n package_id=1,\n currency=\"currency_example\",\n coupon=\"coupon_example\",\n drop_quantity=3.14,\n ) # WannaBuyPackage | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.buy_self_drops(wanna_buy_package)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->buy_self_drops: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wanna_buy_package** | [**WannaBuyPackage**](WannaBuyPackage.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **cancel_deploy**\n> Message cancel_deploy(app_id, deployment_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.cancel_deploy(app_id, deployment_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->cancel_deploy: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **deployment_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **change_self_addon_plan_by_addon_id**\n> AddonView change_self_addon_plan_by_addon_id(addon_id, wannabe_plan_change)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_plan_change import WannabePlanChange\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n wannabe_plan_change = WannabePlanChange(\n plan_id=\"plan_id_example\",\n region=\"region_example\",\n version=\"version_example\",\n ) # WannabePlanChange | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.change_self_addon_plan_by_addon_id(addon_id, wannabe_plan_change)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->change_self_addon_plan_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **wannabe_plan_change** | [**WannabePlanChange**](WannabePlanChange.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **change_user_password**\n> Message change_user_password(wannabe_password)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_password import WannabePassword\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_password = WannabePassword(\n old_password=\"old_password_example\",\n new_password=\"new_password_example\",\n drop_tokens=True,\n ) # WannabePassword | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.change_user_password(wannabe_password)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->change_user_password: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_password** | [**WannabePassword**](WannabePassword.md)| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **choose_self_payment_provider**\n> NextInPaymentFlow choose_self_payment_provider(bid, payment_provider_selection)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.next_in_payment_flow import NextInPaymentFlow\nfrom openapi_client.model.payment_provider_selection import PaymentProviderSelection\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n bid = \"bid_example\" # str | \n payment_provider_selection = PaymentProviderSelection(\n provider=\"PAYPAL\",\n ) # PaymentProviderSelection | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.choose_self_payment_provider(bid, payment_provider_selection)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->choose_self_payment_provider: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **payment_provider_selection** | [**PaymentProviderSelection**](PaymentProviderSelection.md)| |\n\n### Return type\n\n[**NextInPaymentFlow**](NextInPaymentFlow.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_mfa**\n> create_mfa(kind)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n kind = \"kind_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.create_mfa(kind)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->create_mfa: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **kind** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_self_consumer**\n> OAuth1ConsumerView create_self_consumer(wannabe_o_auth1_consumer)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_o_auth1_consumer = WannabeOAuth1Consumer(\n name=\"name_example\",\n description=\"description_example\",\n url=\"url_example\",\n picture=\"picture_example\",\n base_url=\"base_url_example\",\n rights={\n \"key\": True,\n },\n ) # WannabeOAuth1Consumer | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.create_self_consumer(wannabe_o_auth1_consumer)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->create_self_consumer: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_o_auth1_consumer** | [**WannabeOAuth1Consumer**](WannabeOAuth1Consumer.md)| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_mfa**\n> delete_mfa(kind)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n kind = \"kind_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_mfa(kind)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_mfa: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **kind** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_addon_tag_by_addon_id**\n> [str] delete_self_addon_tag_by_addon_id(addon_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_self_addon_tag_by_addon_id(addon_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_addon_tag_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_application_by_app_id**\n> Message delete_self_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_self_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_application_dependency_by_app_id**\n> delete_self_application_dependency_by_app_id(app_id, dependency_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n dependency_id = \"dependencyId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_self_application_dependency_by_app_id(app_id, dependency_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_application_dependency_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **dependency_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_application_tag_app_id**\n> [str] delete_self_application_tag_app_id(app_id, tag)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n tag = \"tag_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.delete_self_application_tag_app_id(app_id, tag)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_application_tag_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **tag** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_card**\n> delete_self_card(m_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n m_id = \"mId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_self_card(m_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_card: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **m_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_consumer**\n> delete_self_consumer(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_self_consumer(key)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_consumer: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_purchase_order**\n> delete_self_purchase_order(bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n bid = \"bid_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.delete_self_purchase_order(bid)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_purchase_order: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_self_recurrent_payment**\n> delete_self_recurrent_payment()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_instance.delete_self_recurrent_payment()\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_self_recurrent_payment: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_user**\n> Message delete_user()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.delete_user()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->delete_user: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **deprovision_self_addon_by_id**\n> Message deprovision_self_addon_by_id(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.deprovision_self_addon_by_id(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->deprovision_self_addon_by_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_self_application_by_app_id**\n> ApplicationView edit_self_application_by_app_id(app_id, wannabe_application)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n wannabe_application = WannabeApplication(\n name=\"name_example\",\n description=\"description_example\",\n zone=\"zone_example\",\n deploy=\"deploy_example\",\n shutdownable=True,\n instance_type=\"instance_type_example\",\n instance_version=\"instance_version_example\",\n instance_variant=\"instance_variant_example\",\n instance_lifetime=\"REGULAR\",\n min_instances=1,\n max_instances=1,\n min_flavor=\"min_flavor_example\",\n max_flavor=\"max_flavor_example\",\n tags=[\n \"tags_example\",\n ],\n archived=True,\n sticky_sessions=True,\n homogeneous=True,\n favourite=True,\n cancel_on_push=True,\n separate_build=True,\n build_flavor=\"build_flavor_example\",\n oauth_service=\"oauth_service_example\",\n oauth_app_id=\"oauth_app_id_example\",\n oauth_app=WannabeOauthApp(\n owner=\"owner_example\",\n name=\"name_example\",\n ),\n appliance_id=\"appliance_id_example\",\n branch=\"branch_example\",\n force_https=\"ENABLED\",\n ) # WannabeApplication | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_self_application_by_app_id(app_id, wannabe_application)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->edit_self_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **wannabe_application** | [**WannabeApplication**](WannabeApplication.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_self_application_env_by_app_id**\n> ApplicationView edit_self_application_env_by_app_id(app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_self_application_env_by_app_id(app_id, body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->edit_self_application_env_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **body** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_self_application_env_by_app_id_and_env_name**\n> ApplicationView edit_self_application_env_by_app_id_and_env_name(app_id, env_name, wannabe_value)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_value import WannabeValue\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n env_name = \"envName_example\" # str | \n wannabe_value = WannabeValue(\n value=\"value_example\",\n ) # WannabeValue | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_self_application_env_by_app_id_and_env_name(app_id, env_name, wannabe_value)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->edit_self_application_env_by_app_id_and_env_name: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **env_name** | **str**| |\n **wannabe_value** | [**WannabeValue**](WannabeValue.md)| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **edit_user**\n> UserView edit_user(wannabe_user)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.user_view import UserView\nfrom openapi_client.model.wannabe_user import WannabeUser\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_user = WannabeUser(\n email=\"email_example\",\n name=\"name_example\",\n password=\"password_example\",\n phone=\"phone_example\",\n address=\"address_example\",\n city=\"city_example\",\n zipcode=\"zipcode_example\",\n country=\"country_example\",\n lang=\"lang_example\",\n terms=True,\n subscription_source=\"subscription_source_example\",\n ) # WannabeUser | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.edit_user(wannabe_user)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->edit_user: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_user** | [**WannabeUser**](WannabeUser.md)| |\n\n### Return type\n\n[**UserView**](UserView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **fav_mfa**\n> fav_mfa(kind, wannabe_mfa_fav)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_mfa_fav import WannabeMFAFav\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n kind = \"kind_example\" # str | \n wannabe_mfa_fav = WannabeMFAFav(\n favourite=True,\n ) # WannabeMFAFav | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.fav_mfa(kind, wannabe_mfa_fav)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->fav_mfa: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **kind** | **str**| |\n **wannabe_mfa_fav** | [**WannabeMFAFav**](WannabeMFAFav.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_addon_sso_data**\n> AddonSSOData get_addon_sso_data(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_sso_data import AddonSSOData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_addon_sso_data(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_addon_sso_data: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**AddonSSOData**](AddonSSOData.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_deployment**\n> get_application_deployment(app_id, deployment_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_application_deployment(app_id, deployment_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_application_deployment: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **deployment_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_application_deployments**\n> [DeploymentView] get_application_deployments(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.deployment_view import DeploymentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n limit = \"limit_example\" # str | (optional)\n offset = \"offset_example\" # str | (optional)\n action = \"action_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_application_deployments(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_application_deployments: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_application_deployments(app_id, limit=limit, offset=offset, action=action)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_application_deployments: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **limit** | **str**| | [optional]\n **offset** | **str**| | [optional]\n **action** | **str**| | [optional]\n\n### Return type\n\n[**[DeploymentView]**](DeploymentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_backup_codes**\n> [MFARecoveryCode] get_backup_codes(kind)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.mfa_recovery_code import MFARecoveryCode\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n kind = \"kind_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_backup_codes(kind)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_backup_codes: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **kind** | **str**| |\n\n### Return type\n\n[**[MFARecoveryCode]**](MFARecoveryCode.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_confirmation_email**\n> Message get_confirmation_email()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_confirmation_email()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_confirmation_email: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_consumptions**\n> str get_consumptions()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | (optional)\n _from = \"from_example\" # str | (optional)\n to = \"to_example\" # str | (optional)\n _for = \"for_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_consumptions(app_id=app_id, _from=_from, to=to, _for=_for)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_consumptions: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| | [optional]\n **_from** | **str**| | [optional]\n **to** | **str**| | [optional]\n **_for** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_email_addresses**\n> [str] get_email_addresses()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_email_addresses()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_email_addresses: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_id**\n> str get_id()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_id()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_id: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_addon_by_id**\n> AddonView get_self_addon_by_id(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_addon_by_id(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_addon_by_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_addon_env_by_addon_id**\n> [AddonEnvironmentView] get_self_addon_env_by_addon_id(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_addon_env_by_addon_id(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_addon_env_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**[AddonEnvironmentView]**](AddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_addon_tags_by_addon_id**\n> [str] get_self_addon_tags_by_addon_id(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_addon_tags_by_addon_id(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_addon_tags_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_addons**\n> [AddonView] get_self_addons()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_addons()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_addons: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[AddonView]**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_addons_linked_to_application_by_app_id**\n> [AddonView] get_self_addons_linked_to_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_view import AddonView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_addons_linked_to_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_addons_linked_to_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[AddonView]**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_amount**\n> DropCountView get_self_amount()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.drop_count_view import DropCountView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_amount()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_amount: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**DropCountView**](DropCountView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_branches_by_app_id**\n> [str] get_self_application_branches_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_branches_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_branches_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_by_app_id**\n> ApplicationView get_self_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_dependencies_by_app_id**\n> [ApplicationView] get_self_application_dependencies_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_dependencies_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_dependencies_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_dependencies_env_by_app_id**\n> get_self_application_dependencies_env_by_app_id(id, app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n id = \"id_example\" # str | \n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_self_application_dependencies_env_by_app_id(id, app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_dependencies_env_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n **app_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_dependents**\n> [ApplicationView] get_self_application_dependents(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_dependents(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_dependents: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_env_by_app_id**\n> [AddonEnvironmentView] get_self_application_env_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_env_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_env_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[AddonEnvironmentView]**](AddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_instance_by_app_and_instance_id**\n> SuperNovaInstanceView get_self_application_instance_by_app_and_instance_id(app_id, instance_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n instance_id = \"instanceId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_instance_by_app_and_instance_id(app_id, instance_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_instance_by_app_and_instance_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **instance_id** | **str**| |\n\n### Return type\n\n[**SuperNovaInstanceView**](SuperNovaInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_instances_by_app_id**\n> [SuperNovaInstanceView] get_self_application_instances_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n deployment_id = \"deploymentId_example\" # str | (optional)\n with_deleted = \"withDeleted_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_instances_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_instances_by_app_id: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_self_application_instances_by_app_id(app_id, deployment_id=deployment_id, with_deleted=with_deleted)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_instances_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **deployment_id** | **str**| | [optional]\n **with_deleted** | **str**| | [optional]\n\n### Return type\n\n[**[SuperNovaInstanceView]**](SuperNovaInstanceView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_application_tags_by_app_id**\n> [str] get_self_application_tags_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_application_tags_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_application_tags_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_applications**\n> [ApplicationView] get_self_applications()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_applications()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_applications: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_applications_linked_to_addon_by_addon_id**\n> [ApplicationView] get_self_applications_linked_to_addon_by_addon_id(addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_applications_linked_to_addon_by_addon_id(addon_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_applications_linked_to_addon_by_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_cli_tokens**\n> [CliTokenView] get_self_cli_tokens()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.cli_token_view import CliTokenView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n cli_token = \"cli_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_self_cli_tokens(cli_token=cli_token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_cli_tokens: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **cli_token** | **str**| | [optional]\n\n### Return type\n\n[**[CliTokenView]**](CliTokenView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_consumer**\n> OAuth1ConsumerView get_self_consumer(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_consumer(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_consumer: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_consumer_secret**\n> SecretView get_self_consumer_secret(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.secret_view import SecretView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_consumer_secret(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_consumer_secret: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n\n### Return type\n\n[**SecretView**](SecretView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_consumers**\n> get_self_consumers()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_instance.get_self_consumers()\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_consumers: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_default_method**\n> PaymentMethodView get_self_default_method()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_default_method()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_default_method: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**PaymentMethodView**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_env_of_addons_linked_to_application_by_app_id**\n> [LinkedAddonEnvironmentView] get_self_env_of_addons_linked_to_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.linked_addon_environment_view import LinkedAddonEnvironmentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_env_of_addons_linked_to_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_env_of_addons_linked_to_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[LinkedAddonEnvironmentView]**](LinkedAddonEnvironmentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_exposed_env_by_app_id**\n> str get_self_exposed_env_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_exposed_env_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_exposed_env_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_favourite_vhost_by_app_id**\n> VhostView get_self_favourite_vhost_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_favourite_vhost_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_favourite_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**VhostView**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_instances_for_all_apps**\n> SuperNovaInstanceMap get_self_instances_for_all_apps()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.super_nova_instance_map import SuperNovaInstanceMap\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_instances_for_all_apps()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_instances_for_all_apps: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**SuperNovaInstanceMap**](SuperNovaInstanceMap.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_invoice_by_id**\n> InvoiceRendering get_self_invoice_by_id(bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n bid = \"bid_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_invoice_by_id(bid)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_invoice_by_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_invoices**\n> [InvoiceRendering] get_self_invoices()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_invoices()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_invoices: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[InvoiceRendering]**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_monthly_invoice**\n> str get_self_monthly_invoice()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_monthly_invoice()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_monthly_invoice: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_payment_info**\n> PaymentInfoView get_self_payment_info()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.payment_info_view import PaymentInfoView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_payment_info()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_payment_info: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**PaymentInfoView**](PaymentInfoView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_payment_methods**\n> [PaymentMethodView] get_self_payment_methods()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_payment_methods()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_payment_methods: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[PaymentMethodView]**](PaymentMethodView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_pdf_invoice_by_id**\n> get_self_pdf_invoice_by_id(bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n bid = \"bid_example\" # str | \n token = \"token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_instance.get_self_pdf_invoice_by_id(bid)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_pdf_invoice_by_id: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_self_pdf_invoice_by_id(bid, token=token)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_pdf_invoice_by_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **token** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/pdf\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_price_with_tax**\n> PriceWithTaxInfo get_self_price_with_tax(price)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.price_with_tax_info import PriceWithTaxInfo\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n price = \"price_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_price_with_tax(price)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_price_with_tax: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **price** | **str**| |\n\n### Return type\n\n[**PriceWithTaxInfo**](PriceWithTaxInfo.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_recurrent_payment**\n> RecurrentPaymentView get_self_recurrent_payment()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_recurrent_payment()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_recurrent_payment: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**RecurrentPaymentView**](RecurrentPaymentView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_stripe_token**\n> BraintreeToken get_self_stripe_token()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_stripe_token()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_stripe_token: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**BraintreeToken**](BraintreeToken.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_tokens**\n> [OAuth1AccessTokenView] get_self_tokens()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.o_auth1_access_token_view import OAuth1AccessTokenView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_self_tokens()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_tokens: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[OAuth1AccessTokenView]**](OAuth1AccessTokenView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_self_vhost_by_app_id**\n> [VhostView] get_self_vhost_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_self_vhost_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_self_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**[VhostView]**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_ssh_keys**\n> [SshKeyView] get_ssh_keys()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.ssh_key_view import SshKeyView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_ssh_keys()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_ssh_keys: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[SshKeyView]**](SshKeyView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_summary**\n> Summary get_summary()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.summary import Summary\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n full = \"full_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_summary(full=full)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_summary: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **full** | **str**| | [optional]\n\n### Return type\n\n[**Summary**](Summary.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_user**\n> UserView get_user()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.user_view import UserView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_user()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->get_user: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**UserView**](UserView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **link_self_addon_to_application_by_app_id**\n> link_self_addon_to_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.link_self_addon_to_application_by_app_id(app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->link_self_addon_to_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **mark_self_favourite_vhost_by_app_id**\n> VhostView mark_self_favourite_vhost_by_app_id(app_id, vhost_view)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.vhost_view import VhostView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n vhost_view = VhostView(\n fqdn=\"fqdn_example\",\n ) # VhostView | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.mark_self_favourite_vhost_by_app_id(app_id, vhost_view)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->mark_self_favourite_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **vhost_view** | [**VhostView**](VhostView.md)| |\n\n### Return type\n\n[**VhostView**](VhostView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **preorder_self_addon**\n> InvoiceRendering preorder_self_addon(wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.preorder_self_addon(wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->preorder_self_addon: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**InvoiceRendering**](InvoiceRendering.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **provision_self_addon**\n> AddonView provision_self_addon(wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.provision_self_addon(wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->provision_self_addon: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **redeploy_self_application_by_app_id**\n> Message redeploy_self_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n commit = \"commit_example\" # str | (optional)\n use_cache = \"useCache_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.redeploy_self_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->redeploy_self_application_by_app_id: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.redeploy_self_application_by_app_id(app_id, commit=commit, use_cache=use_cache)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->redeploy_self_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **commit** | **str**| | [optional]\n **use_cache** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_email_address**\n> Message remove_email_address(email)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n email = \"email_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_email_address(email)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->remove_email_address: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **email** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_self_application_env_by_app_id_and_env_name**\n> ApplicationView remove_self_application_env_by_app_id_and_env_name(app_id, env_name)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n env_name = \"envName_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_self_application_env_by_app_id_and_env_name(app_id, env_name)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->remove_self_application_env_by_app_id_and_env_name: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **env_name** | **str**| |\n\n### Return type\n\n[**ApplicationView**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_self_vhost_by_app_id**\n> Message remove_self_vhost_by_app_id(app_id, domain)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n domain = \"domain_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_self_vhost_by_app_id(app_id, domain)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->remove_self_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **domain** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **remove_ssh_key**\n> Message remove_ssh_key(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.remove_ssh_key(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->remove_ssh_key: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **rename_addon**\n> AddonView rename_addon(addon_id, wannabe_addon_provision)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n addon_id = \"addonId_example\" # str | \n wannabe_addon_provision = WannabeAddonProvision(\n provider_id=\"provider_id_example\",\n plan=\"plan_example\",\n linked_app=\"linked_app_example\",\n name=\"name_example\",\n region=\"region_example\",\n options={\n \"key\": \"key_example\",\n },\n version=\"version_example\",\n payment_intent=SetupIntentView(\n owner_id=\"owner_id_example\",\n id=\"id_example\",\n client_secret=\"client_secret_example\",\n customer=\"customer_example\",\n ),\n payment_method_type=\"CREDITCARD\",\n sepa_source_id=\"sepa_source_id_example\",\n ) # WannabeAddonProvision | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.rename_addon(addon_id, wannabe_addon_provision)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->rename_addon: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **addon_id** | **str**| |\n **wannabe_addon_provision** | [**WannabeAddonProvision**](WannabeAddonProvision.md)| |\n\n### Return type\n\n[**AddonView**](AddonView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **revoke_all_tokens**\n> Message revoke_all_tokens()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.revoke_all_tokens()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->revoke_all_tokens: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **revoke_token**\n> Message revoke_token(token)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n token = \"token_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.revoke_token(token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->revoke_token: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **token** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_self_application_branch_by_app_id**\n> set_self_application_branch_by_app_id(app_id, wannabe_branch)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_branch import WannabeBranch\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n wannabe_branch = WannabeBranch(\n branch=\"branch_example\",\n ) # WannabeBranch | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_self_application_branch_by_app_id(app_id, wannabe_branch)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->set_self_application_branch_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **wannabe_branch** | [**WannabeBranch**](WannabeBranch.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_self_build_instance_flavor_by_app_id**\n> set_self_build_instance_flavor_by_app_id(app_id, wannabe_build_flavor)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_build_flavor import WannabeBuildFlavor\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n wannabe_build_flavor = WannabeBuildFlavor(\n flavor_name=\"flavor_name_example\",\n ) # WannabeBuildFlavor | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_self_build_instance_flavor_by_app_id(app_id, wannabe_build_flavor)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->set_self_build_instance_flavor_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **wannabe_build_flavor** | [**WannabeBuildFlavor**](WannabeBuildFlavor.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_self_default_method**\n> set_self_default_method(payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.set_self_default_method(payment_data)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->set_self_default_method: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_self_max_credits_per_month**\n> WannabeMaxCredits set_self_max_credits_per_month(wannabe_max_credits)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_max_credits import WannabeMaxCredits\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n wannabe_max_credits = WannabeMaxCredits(\n max=3.14,\n ) # WannabeMaxCredits | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.set_self_max_credits_per_month(wannabe_max_credits)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->set_self_max_credits_per_month: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_max_credits** | [**WannabeMaxCredits**](WannabeMaxCredits.md)| |\n\n### Return type\n\n[**WannabeMaxCredits**](WannabeMaxCredits.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **set_user_avatar_from_file**\n> UrlView set_user_avatar_from_file(body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.url_view import UrlView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n body = open('/path/to/file', 'rb') # file_type | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.set_user_avatar_from_file(body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->set_user_avatar_from_file: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **body** | **file_type**| |\n\n### Return type\n\n[**UrlView**](UrlView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: image/bmp, image/gif, image/jpeg, image/png, image/tiff\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **undeploy_self_application_by_app_id**\n> Message undeploy_self_application_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.undeploy_self_application_by_app_id(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->undeploy_self_application_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **unlink_selfddon_from_application_by_app_and_addon_id**\n> unlink_selfddon_from_application_by_app_and_addon_id(app_id, addon_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n addon_id = \"addonId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.unlink_selfddon_from_application_by_app_and_addon_id(app_id, addon_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->unlink_selfddon_from_application_by_app_and_addon_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **addon_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **unmark_self_favourite_vhost_by_app_id**\n> unmark_self_favourite_vhost_by_app_id(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.unmark_self_favourite_vhost_by_app_id(app_id)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->unmark_self_favourite_vhost_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_self_consumer**\n> OAuth1ConsumerView update_self_consumer(key, wannabe_o_auth1_consumer)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n key = \"key_example\" # str | \n wannabe_o_auth1_consumer = WannabeOAuth1Consumer(\n name=\"name_example\",\n description=\"description_example\",\n url=\"url_example\",\n picture=\"picture_example\",\n base_url=\"base_url_example\",\n rights={\n \"key\": True,\n },\n ) # WannabeOAuth1Consumer | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_self_consumer(key, wannabe_o_auth1_consumer)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->update_self_consumer: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n **wannabe_o_auth1_consumer** | [**WannabeOAuth1Consumer**](WannabeOAuth1Consumer.md)| |\n\n### Return type\n\n[**OAuth1ConsumerView**](OAuth1ConsumerView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_self_exposed_env_by_app_id**\n> Message update_self_exposed_env_by_app_id(app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_self_exposed_env_by_app_id(app_id, body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->update_self_exposed_env_by_app_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **body** | **str**| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **validate_email**\n> validate_email()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n validation_key = \"validationKey_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.validate_email(validation_key=validation_key)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->validate_email: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **validation_key** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **validate_mfa**\n> validate_mfa(kind, wannabe_mfa_creds)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import self_api\nfrom openapi_client.model.wannabe_mfa_creds import WannabeMFACreds\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = self_api.SelfApi(api_client)\n kind = \"kind_example\" # str | \n wannabe_mfa_creds = WannabeMFACreds(\n revoke_tokens=True,\n code=\"code_example\",\n ) # WannabeMFACreds | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.validate_mfa(kind, wannabe_mfa_creds)\n except openapi_client.ApiException as e:\n print(\"Exception when calling SelfApi->validate_mfa: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **kind** | **str**| |\n **wannabe_mfa_creds** | [**WannabeMFACreds**](WannabeMFACreds.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.5733082890510559, "alphanum_fraction": 0.5808270573616028, "avg_line_length": 18.22891616821289, "blob_id": "5540421d52301a1bd144bc329f8dbed9df23c30e", "content_id": "0fa00e7221b621971e48186971952e46ce3bb76b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 72, "num_lines": 83, "path": "/test/test_auth_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.auth_api import AuthApi # noqa: E501\n\n\nclass TestAuthApi(unittest.TestCase):\n \"\"\"AuthApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = AuthApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_authorize_form(self):\n \"\"\"Test case for authorize_form\n\n \"\"\"\n pass\n\n def test_authorize_token(self):\n \"\"\"Test case for authorize_token\n\n \"\"\"\n pass\n\n def test_get_available_rights(self):\n \"\"\"Test case for get_available_rights\n\n \"\"\"\n pass\n\n def test_get_login_data(self):\n \"\"\"Test case for get_login_data\n\n \"\"\"\n pass\n\n def test_post_access_token_request(self):\n \"\"\"Test case for post_access_token_request\n\n \"\"\"\n pass\n\n def test_post_access_token_request_query(self):\n \"\"\"Test case for post_access_token_request_query\n\n \"\"\"\n pass\n\n def test_post_authorize(self):\n \"\"\"Test case for post_authorize\n\n \"\"\"\n pass\n\n def test_post_req_token_request(self):\n \"\"\"Test case for post_req_token_request\n\n \"\"\"\n pass\n\n def test_post_req_token_request_query_string(self):\n \"\"\"Test case for post_req_token_request_query_string\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6860189437866211, "alphanum_fraction": 0.6966824531555176, "avg_line_length": 22.44444465637207, "blob_id": "13641e37dc61e643280bdd3cb8b3872c1099c2a4", "content_id": "5d8ca5fae4db4ce3d557fec1872bd1291b86999b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 89, "num_lines": 36, "path": "/test/test_organisation_member_user_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.organisation_member_user_view import OrganisationMemberUserView\n\n\nclass TestOrganisationMemberUserView(unittest.TestCase):\n \"\"\"OrganisationMemberUserView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOrganisationMemberUserView(self):\n \"\"\"Test OrganisationMemberUserView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = OrganisationMemberUserView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4288138747215271, "alphanum_fraction": 0.4298468828201294, "avg_line_length": 37.80458068847656, "blob_id": "5d0b5db559acbe05fbb6ddbd603a2b8b141e67b3", "content_id": "a99c4a1030284d26fe1dd1dff1685848c4d3fee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54210, "license_type": "no_license", "max_line_length": 113, "num_lines": 1397, "path": "/openapi_client/api/auth_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.wannabe_authorization import WannabeAuthorization\n\n\nclass AuthApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __authorize_form(\n self,\n **kwargs\n ):\n \"\"\"authorize_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.authorize_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n ccid (str): [optional]\n cctk (str): [optional]\n oauth_token (str): [optional]\n ccid2 (str): [optional]\n cli_token (str): [optional]\n from_oauth (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.authorize_form = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/oauth/authorize',\n 'operation_id': 'authorize_form',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'ccid',\n 'cctk',\n 'oauth_token',\n 'ccid2',\n 'cli_token',\n 'from_oauth',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'ccid':\n (str,),\n 'cctk':\n (str,),\n 'oauth_token':\n (str,),\n 'ccid2':\n (str,),\n 'cli_token':\n (str,),\n 'from_oauth':\n (str,),\n },\n 'attribute_map': {\n 'ccid': 'ccid',\n 'cctk': 'cctk',\n 'oauth_token': 'oauth_token',\n 'ccid2': 'ccid',\n 'cli_token': 'cli_token',\n 'from_oauth': 'from_oauth',\n },\n 'location_map': {\n 'ccid': 'cookie',\n 'cctk': 'cookie',\n 'oauth_token': 'query',\n 'ccid2': 'query',\n 'cli_token': 'query',\n 'from_oauth': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__authorize_form\n )\n\n def __authorize_token(\n self,\n **kwargs\n ):\n \"\"\"authorize_token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.authorize_token(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n ccid (str): [optional]\n cctk (str): [optional]\n almighty (str): [optional]\n access_organisations (str): [optional]\n manage_organisations (str): [optional]\n manage_organisations_services (str): [optional]\n manage_organisations_applications (str): [optional]\n manage_organisations_members (str): [optional]\n access_organisations_bills (str): [optional]\n access_organisations_credit_count (str): [optional]\n access_organisations_consumption_statistics (str): [optional]\n access_personal_information (str): [optional]\n manage_personal_information (str): [optional]\n manage_ssh_keys (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.authorize_token = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/oauth/authorize',\n 'operation_id': 'authorize_token',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'ccid',\n 'cctk',\n 'almighty',\n 'access_organisations',\n 'manage_organisations',\n 'manage_organisations_services',\n 'manage_organisations_applications',\n 'manage_organisations_members',\n 'access_organisations_bills',\n 'access_organisations_credit_count',\n 'access_organisations_consumption_statistics',\n 'access_personal_information',\n 'manage_personal_information',\n 'manage_ssh_keys',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'ccid':\n (str,),\n 'cctk':\n (str,),\n 'almighty':\n (str,),\n 'access_organisations':\n (str,),\n 'manage_organisations':\n (str,),\n 'manage_organisations_services':\n (str,),\n 'manage_organisations_applications':\n (str,),\n 'manage_organisations_members':\n (str,),\n 'access_organisations_bills':\n (str,),\n 'access_organisations_credit_count':\n (str,),\n 'access_organisations_consumption_statistics':\n (str,),\n 'access_personal_information':\n (str,),\n 'manage_personal_information':\n (str,),\n 'manage_ssh_keys':\n (str,),\n },\n 'attribute_map': {\n 'ccid': 'ccid',\n 'cctk': 'cctk',\n 'almighty': 'almighty',\n 'access_organisations': 'access_organisations',\n 'manage_organisations': 'manage_organisations',\n 'manage_organisations_services': 'manage_organisations_services',\n 'manage_organisations_applications': 'manage_organisations_applications',\n 'manage_organisations_members': 'manage_organisations_members',\n 'access_organisations_bills': 'access_organisations_bills',\n 'access_organisations_credit_count': 'access_organisations_credit_count',\n 'access_organisations_consumption_statistics': 'access_organisations_consumption_statistics',\n 'access_personal_information': 'access_personal_information',\n 'manage_personal_information': 'manage_personal_information',\n 'manage_ssh_keys': 'manage_ssh_keys',\n },\n 'location_map': {\n 'ccid': 'cookie',\n 'cctk': 'cookie',\n 'almighty': 'form',\n 'access_organisations': 'form',\n 'manage_organisations': 'form',\n 'manage_organisations_services': 'form',\n 'manage_organisations_applications': 'form',\n 'manage_organisations_members': 'form',\n 'access_organisations_bills': 'form',\n 'access_organisations_credit_count': 'form',\n 'access_organisations_consumption_statistics': 'form',\n 'access_personal_information': 'form',\n 'manage_personal_information': 'form',\n 'manage_ssh_keys': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html',\n 'application/json'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__authorize_token\n )\n\n def __get_available_rights(\n self,\n **kwargs\n ):\n \"\"\"get_available_rights # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_available_rights(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_available_rights = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/oauth/rights',\n 'operation_id': 'get_available_rights',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_available_rights\n )\n\n def __get_login_data(\n self,\n **kwargs\n ):\n \"\"\"get_login_data # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_login_data(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n oauth_key (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_login_data = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/oauth/login_data',\n 'operation_id': 'get_login_data',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'oauth_key',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'oauth_key':\n (str,),\n },\n 'attribute_map': {\n 'oauth_key': 'oauth_key',\n },\n 'location_map': {\n 'oauth_key': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_login_data\n )\n\n def __post_access_token_request(\n self,\n **kwargs\n ):\n \"\"\"post_access_token_request # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_access_token_request(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n oauth_consumer_key (str): [optional]\n oauth_token (str): [optional]\n oauth_signature_method (str): [optional]\n oauth_signature (str): [optional]\n oauth_timestamp (str): [optional]\n oauth_nonce (str): [optional]\n oauth_version (str): [optional]\n oauth_verifier (str): [optional]\n oauth_callback (str): [optional]\n oauth_token_secret (str): [optional]\n oauth_callback_confirmed (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.post_access_token_request = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/oauth/access_token',\n 'operation_id': 'post_access_token_request',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'oauth_consumer_key',\n 'oauth_token',\n 'oauth_signature_method',\n 'oauth_signature',\n 'oauth_timestamp',\n 'oauth_nonce',\n 'oauth_version',\n 'oauth_verifier',\n 'oauth_callback',\n 'oauth_token_secret',\n 'oauth_callback_confirmed',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'oauth_consumer_key':\n (str,),\n 'oauth_token':\n (str,),\n 'oauth_signature_method':\n (str,),\n 'oauth_signature':\n (str,),\n 'oauth_timestamp':\n (str,),\n 'oauth_nonce':\n (str,),\n 'oauth_version':\n (str,),\n 'oauth_verifier':\n (str,),\n 'oauth_callback':\n (str,),\n 'oauth_token_secret':\n (str,),\n 'oauth_callback_confirmed':\n (str,),\n },\n 'attribute_map': {\n 'oauth_consumer_key': 'oauth_consumer_key',\n 'oauth_token': 'oauth_token',\n 'oauth_signature_method': 'oauth_signature_method',\n 'oauth_signature': 'oauth_signature',\n 'oauth_timestamp': 'oauth_timestamp',\n 'oauth_nonce': 'oauth_nonce',\n 'oauth_version': 'oauth_version',\n 'oauth_verifier': 'oauth_verifier',\n 'oauth_callback': 'oauth_callback',\n 'oauth_token_secret': 'oauth_token_secret',\n 'oauth_callback_confirmed': 'oauth_callback_confirmed',\n },\n 'location_map': {\n 'oauth_consumer_key': 'form',\n 'oauth_token': 'form',\n 'oauth_signature_method': 'form',\n 'oauth_signature': 'form',\n 'oauth_timestamp': 'form',\n 'oauth_nonce': 'form',\n 'oauth_version': 'form',\n 'oauth_verifier': 'form',\n 'oauth_callback': 'form',\n 'oauth_token_secret': 'form',\n 'oauth_callback_confirmed': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/x-www-form-urlencoded'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__post_access_token_request\n )\n\n def __post_access_token_request_query(\n self,\n **kwargs\n ):\n \"\"\"post_access_token_request_query # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_access_token_request_query(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n oauth_consumer_key (str): [optional]\n oauth_token (str): [optional]\n oauth_signature_method (str): [optional]\n oauth_signature (str): [optional]\n oauth_timestamp (str): [optional]\n oauth_nonce (str): [optional]\n oauth_version (str): [optional]\n oauth_verifier (str): [optional]\n oauth_callback (str): [optional]\n oauth_token_secret (str): [optional]\n oauth_callback_confirmed (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.post_access_token_request_query = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/oauth/access_token_query',\n 'operation_id': 'post_access_token_request_query',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'oauth_consumer_key',\n 'oauth_token',\n 'oauth_signature_method',\n 'oauth_signature',\n 'oauth_timestamp',\n 'oauth_nonce',\n 'oauth_version',\n 'oauth_verifier',\n 'oauth_callback',\n 'oauth_token_secret',\n 'oauth_callback_confirmed',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'oauth_consumer_key':\n (str,),\n 'oauth_token':\n (str,),\n 'oauth_signature_method':\n (str,),\n 'oauth_signature':\n (str,),\n 'oauth_timestamp':\n (str,),\n 'oauth_nonce':\n (str,),\n 'oauth_version':\n (str,),\n 'oauth_verifier':\n (str,),\n 'oauth_callback':\n (str,),\n 'oauth_token_secret':\n (str,),\n 'oauth_callback_confirmed':\n (str,),\n },\n 'attribute_map': {\n 'oauth_consumer_key': 'oauth_consumer_key',\n 'oauth_token': 'oauth_token',\n 'oauth_signature_method': 'oauth_signature_method',\n 'oauth_signature': 'oauth_signature',\n 'oauth_timestamp': 'oauth_timestamp',\n 'oauth_nonce': 'oauth_nonce',\n 'oauth_version': 'oauth_version',\n 'oauth_verifier': 'oauth_verifier',\n 'oauth_callback': 'oauth_callback',\n 'oauth_token_secret': 'oauth_token_secret',\n 'oauth_callback_confirmed': 'oauth_callback_confirmed',\n },\n 'location_map': {\n 'oauth_consumer_key': 'query',\n 'oauth_token': 'query',\n 'oauth_signature_method': 'query',\n 'oauth_signature': 'query',\n 'oauth_timestamp': 'query',\n 'oauth_nonce': 'query',\n 'oauth_version': 'query',\n 'oauth_verifier': 'query',\n 'oauth_callback': 'query',\n 'oauth_token_secret': 'query',\n 'oauth_callback_confirmed': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/x-www-form-urlencoded'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__post_access_token_request_query\n )\n\n def __post_authorize(\n self,\n wannabe_authorization,\n **kwargs\n ):\n \"\"\"post_authorize # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_authorize(wannabe_authorization, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_authorization (WannabeAuthorization):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_authorization'] = \\\n wannabe_authorization\n return self.call_with_http_info(**kwargs)\n\n self.post_authorize = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/authorize',\n 'operation_id': 'post_authorize',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_authorization',\n ],\n 'required': [\n 'wannabe_authorization',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_authorization':\n (WannabeAuthorization,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_authorization': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__post_authorize\n )\n\n def __post_req_token_request(\n self,\n **kwargs\n ):\n \"\"\"post_req_token_request # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_req_token_request(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n clever_flavor (str): [optional]\n oauth_consumer_key (str): [optional]\n oauth_token (str): [optional]\n oauth_signature_method (str): [optional]\n oauth_signature (str): [optional]\n oauth_timestamp (str): [optional]\n oauth_nonce (str): [optional]\n oauth_version (str): [optional]\n oauth_verifier (str): [optional]\n oauth_callback (str): [optional]\n oauth_token_secret (str): [optional]\n oauth_callback_confirmed (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.post_req_token_request = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/oauth/request_token',\n 'operation_id': 'post_req_token_request',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'clever_flavor',\n 'oauth_consumer_key',\n 'oauth_token',\n 'oauth_signature_method',\n 'oauth_signature',\n 'oauth_timestamp',\n 'oauth_nonce',\n 'oauth_version',\n 'oauth_verifier',\n 'oauth_callback',\n 'oauth_token_secret',\n 'oauth_callback_confirmed',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'clever_flavor':\n (str,),\n 'oauth_consumer_key':\n (str,),\n 'oauth_token':\n (str,),\n 'oauth_signature_method':\n (str,),\n 'oauth_signature':\n (str,),\n 'oauth_timestamp':\n (str,),\n 'oauth_nonce':\n (str,),\n 'oauth_version':\n (str,),\n 'oauth_verifier':\n (str,),\n 'oauth_callback':\n (str,),\n 'oauth_token_secret':\n (str,),\n 'oauth_callback_confirmed':\n (str,),\n },\n 'attribute_map': {\n 'clever_flavor': 'clever_flavor',\n 'oauth_consumer_key': 'oauth_consumer_key',\n 'oauth_token': 'oauth_token',\n 'oauth_signature_method': 'oauth_signature_method',\n 'oauth_signature': 'oauth_signature',\n 'oauth_timestamp': 'oauth_timestamp',\n 'oauth_nonce': 'oauth_nonce',\n 'oauth_version': 'oauth_version',\n 'oauth_verifier': 'oauth_verifier',\n 'oauth_callback': 'oauth_callback',\n 'oauth_token_secret': 'oauth_token_secret',\n 'oauth_callback_confirmed': 'oauth_callback_confirmed',\n },\n 'location_map': {\n 'clever_flavor': 'query',\n 'oauth_consumer_key': 'form',\n 'oauth_token': 'form',\n 'oauth_signature_method': 'form',\n 'oauth_signature': 'form',\n 'oauth_timestamp': 'form',\n 'oauth_nonce': 'form',\n 'oauth_version': 'form',\n 'oauth_verifier': 'form',\n 'oauth_callback': 'form',\n 'oauth_token_secret': 'form',\n 'oauth_callback_confirmed': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/x-www-form-urlencoded'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__post_req_token_request\n )\n\n def __post_req_token_request_query_string(\n self,\n **kwargs\n ):\n \"\"\"post_req_token_request_query_string # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_req_token_request_query_string(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n clever_flavor (str): [optional]\n oauth_consumer_key (str): [optional]\n oauth_token (str): [optional]\n oauth_signature_method (str): [optional]\n oauth_signature (str): [optional]\n oauth_timestamp (str): [optional]\n oauth_nonce (str): [optional]\n oauth_version (str): [optional]\n oauth_verifier (str): [optional]\n oauth_callback (str): [optional]\n oauth_token_secret (str): [optional]\n oauth_callback_confirmed (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.post_req_token_request_query_string = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/oauth/request_token_query',\n 'operation_id': 'post_req_token_request_query_string',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'clever_flavor',\n 'oauth_consumer_key',\n 'oauth_token',\n 'oauth_signature_method',\n 'oauth_signature',\n 'oauth_timestamp',\n 'oauth_nonce',\n 'oauth_version',\n 'oauth_verifier',\n 'oauth_callback',\n 'oauth_token_secret',\n 'oauth_callback_confirmed',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'clever_flavor':\n (str,),\n 'oauth_consumer_key':\n (str,),\n 'oauth_token':\n (str,),\n 'oauth_signature_method':\n (str,),\n 'oauth_signature':\n (str,),\n 'oauth_timestamp':\n (str,),\n 'oauth_nonce':\n (str,),\n 'oauth_version':\n (str,),\n 'oauth_verifier':\n (str,),\n 'oauth_callback':\n (str,),\n 'oauth_token_secret':\n (str,),\n 'oauth_callback_confirmed':\n (str,),\n },\n 'attribute_map': {\n 'clever_flavor': 'clever_flavor',\n 'oauth_consumer_key': 'oauth_consumer_key',\n 'oauth_token': 'oauth_token',\n 'oauth_signature_method': 'oauth_signature_method',\n 'oauth_signature': 'oauth_signature',\n 'oauth_timestamp': 'oauth_timestamp',\n 'oauth_nonce': 'oauth_nonce',\n 'oauth_version': 'oauth_version',\n 'oauth_verifier': 'oauth_verifier',\n 'oauth_callback': 'oauth_callback',\n 'oauth_token_secret': 'oauth_token_secret',\n 'oauth_callback_confirmed': 'oauth_callback_confirmed',\n },\n 'location_map': {\n 'clever_flavor': 'query',\n 'oauth_consumer_key': 'query',\n 'oauth_token': 'query',\n 'oauth_signature_method': 'query',\n 'oauth_signature': 'query',\n 'oauth_timestamp': 'query',\n 'oauth_nonce': 'query',\n 'oauth_version': 'query',\n 'oauth_verifier': 'query',\n 'oauth_callback': 'query',\n 'oauth_token_secret': 'query',\n 'oauth_callback_confirmed': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/x-www-form-urlencoded'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__post_req_token_request_query_string\n )\n" }, { "alpha_fraction": 0.8664746880531311, "alphanum_fraction": 0.8676528334617615, "avg_line_length": 65.42608642578125, "blob_id": "367877993b38e9f483d126e72036ef8b03d0d486", "content_id": "6bb4dd6f13f8f6eeba899d195b815d617262ab67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7639, "license_type": "no_license", "max_line_length": 97, "num_lines": 115, "path": "/openapi_client/models/__init__.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# flake8: noqa\n\n# import all models into this package\n# if you have many models here with many references from one model to another this may\n# raise a RecursionError\n# to avoid this, import only the models that you directly need like:\n# from from openapi_client.model.pet import Pet\n# or import this package, but before doing it, use:\n# import sys\n# sys.setrecursionlimit(n)\n\nfrom openapi_client.model.addon_application_info import AddonApplicationInfo\nfrom openapi_client.model.addon_application_summary import AddonApplicationSummary\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom openapi_client.model.addon_feature_instance_view import AddonFeatureInstanceView\nfrom openapi_client.model.addon_feature_view import AddonFeatureView\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom openapi_client.model.addon_provider_info_view import AddonProviderInfoView\nfrom openapi_client.model.addon_provider_sso_data import AddonProviderSSOData\nfrom openapi_client.model.addon_sso_data import AddonSSOData\nfrom openapi_client.model.addon_summary import AddonSummary\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.application_summary import ApplicationSummary\nfrom openapi_client.model.application_view import ApplicationView\nfrom openapi_client.model.author import Author\nfrom openapi_client.model.available_instance_view import AvailableInstanceView\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom openapi_client.model.cli_token_view import CliTokenView\nfrom openapi_client.model.coupon_view import CouponView\nfrom openapi_client.model.deployment_info_view import DeploymentInfoView\nfrom openapi_client.model.deployment_view import DeploymentView\nfrom openapi_client.model.drop_count_view import DropCountView\nfrom openapi_client.model.drop_price_view import DropPriceView\nfrom openapi_client.model.end_of_invoice_error import EndOfInvoiceError\nfrom openapi_client.model.end_of_invoice_response import EndOfInvoiceResponse\nfrom openapi_client.model.flavor_view import FlavorView\nfrom openapi_client.model.github_commit import GithubCommit\nfrom openapi_client.model.github_webhook_payload import GithubWebhookPayload\nfrom openapi_client.model.github_webhook_pusher import GithubWebhookPusher\nfrom openapi_client.model.github_webhook_repository import GithubWebhookRepository\nfrom openapi_client.model.github_webhook_sender import GithubWebhookSender\nfrom openapi_client.model.instance_variant_view import InstanceVariantView\nfrom openapi_client.model.instance_view import InstanceView\nfrom openapi_client.model.invoice_line_rendering import InvoiceLineRendering\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom openapi_client.model.linked_addon_environment_view import LinkedAddonEnvironmentView\nfrom openapi_client.model.mfa_recovery_code import MFARecoveryCode\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.namespace_view import NamespaceView\nfrom openapi_client.model.next_in_payment_flow import NextInPaymentFlow\nfrom openapi_client.model.o_auth1_access_token_view import OAuth1AccessTokenView\nfrom openapi_client.model.o_auth1_consumer_summary import OAuth1ConsumerSummary\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.o_auth_application_view import OAuthApplicationView\nfrom openapi_client.model.o_auth_rights_view import OAuthRightsView\nfrom openapi_client.model.o_auth_transaction_view import OAuthTransactionView\nfrom openapi_client.model.organisation_member_user_view import OrganisationMemberUserView\nfrom openapi_client.model.organisation_member_view import OrganisationMemberView\nfrom openapi_client.model.organisation_summary import OrganisationSummary\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom openapi_client.model.owner_view import OwnerView\nfrom openapi_client.model.package_view import PackageView\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_info_view import PaymentInfoView\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom openapi_client.model.payment_provider_selection import PaymentProviderSelection\nfrom openapi_client.model.payment_provider_view import PaymentProviderView\nfrom openapi_client.model.price_with_tax_info import PriceWithTaxInfo\nfrom openapi_client.model.provider_summary import ProviderSummary\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\nfrom openapi_client.model.secret_view import SecretView\nfrom openapi_client.model.setup_intent_view import SetupIntentView\nfrom openapi_client.model.ssh_key_view import SshKeyView\nfrom openapi_client.model.stripe_confirmation_error_message import StripeConfirmationErrorMessage\nfrom openapi_client.model.summary import Summary\nfrom openapi_client.model.super_nova_flavor import SuperNovaFlavor\nfrom openapi_client.model.super_nova_instance_map import SuperNovaInstanceMap\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom openapi_client.model.tcp_redir_view import TcpRedirView\nfrom openapi_client.model.url_view import UrlView\nfrom openapi_client.model.user_summary import UserSummary\nfrom openapi_client.model.user_view import UserView\nfrom openapi_client.model.value_with_unit import ValueWithUnit\nfrom openapi_client.model.vat_result import VatResult\nfrom openapi_client.model.vhost_view import VhostView\nfrom openapi_client.model.wanna_buy_package import WannaBuyPackage\nfrom openapi_client.model.wannabe_addon_billing import WannabeAddonBilling\nfrom openapi_client.model.wannabe_addon_config import WannabeAddonConfig\nfrom openapi_client.model.wannabe_addon_feature import WannabeAddonFeature\nfrom openapi_client.model.wannabe_addon_plan import WannabeAddonPlan\nfrom openapi_client.model.wannabe_addon_provider import WannabeAddonProvider\nfrom openapi_client.model.wannabe_addon_provider_api import WannabeAddonProviderAPI\nfrom openapi_client.model.wannabe_addon_provider_api_url import WannabeAddonProviderAPIUrl\nfrom openapi_client.model.wannabe_addon_provider_infos import WannabeAddonProviderInfos\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.wannabe_authorization import WannabeAuthorization\nfrom openapi_client.model.wannabe_avatar_source import WannabeAvatarSource\nfrom openapi_client.model.wannabe_branch import WannabeBranch\nfrom openapi_client.model.wannabe_build_flavor import WannabeBuildFlavor\nfrom openapi_client.model.wannabe_inter_addon_provision import WannabeInterAddonProvision\nfrom openapi_client.model.wannabe_mfa_creds import WannabeMFACreds\nfrom openapi_client.model.wannabe_mfa_fav import WannabeMFAFav\nfrom openapi_client.model.wannabe_max_credits import WannabeMaxCredits\nfrom openapi_client.model.wannabe_member import WannabeMember\nfrom openapi_client.model.wannabe_namespace import WannabeNamespace\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom openapi_client.model.wannabe_oauth_app import WannabeOauthApp\nfrom openapi_client.model.wannabe_organisation import WannabeOrganisation\nfrom openapi_client.model.wannabe_password import WannabePassword\nfrom openapi_client.model.wannabe_plan_change import WannabePlanChange\nfrom openapi_client.model.wannabe_user import WannabeUser\nfrom openapi_client.model.wannabe_value import WannabeValue\nfrom openapi_client.model.zone_view import ZoneView\n" }, { "alpha_fraction": 0.7408257126808167, "alphanum_fraction": 0.747706413269043, "avg_line_length": 28.727272033691406, "blob_id": "799f01c2c2ea89069f2d54a0a478c406c4d3318a", "content_id": "17104279217d8696c73c21f75bd5774e3e43cc18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 82, "num_lines": 44, "path": "/test/test_github_webhook_payload.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.github_commit import GithubCommit\nfrom openapi_client.model.github_webhook_pusher import GithubWebhookPusher\nfrom openapi_client.model.github_webhook_repository import GithubWebhookRepository\nfrom openapi_client.model.github_webhook_sender import GithubWebhookSender\nglobals()['GithubCommit'] = GithubCommit\nglobals()['GithubWebhookPusher'] = GithubWebhookPusher\nglobals()['GithubWebhookRepository'] = GithubWebhookRepository\nglobals()['GithubWebhookSender'] = GithubWebhookSender\nfrom openapi_client.model.github_webhook_payload import GithubWebhookPayload\n\n\nclass TestGithubWebhookPayload(unittest.TestCase):\n \"\"\"GithubWebhookPayload unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testGithubWebhookPayload(self):\n \"\"\"Test GithubWebhookPayload\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = GithubWebhookPayload() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4155939221382141, "alphanum_fraction": 0.4165467619895935, "avg_line_length": 35.01567459106445, "blob_id": "da2aaf9f3f49ce40f9993cc8a269112b82d239f6", "content_id": "66c4833c369b215c209b261d4019d5aa3d2db9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569876, "license_type": "no_license", "max_line_length": 140, "num_lines": 15823, "path": "/openapi_client/api/organisation_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom openapi_client.model.addon_feature_instance_view import AddonFeatureInstanceView\nfrom openapi_client.model.addon_feature_view import AddonFeatureView\nfrom openapi_client.model.addon_plan_view import AddonPlanView\nfrom openapi_client.model.addon_provider_info_full_view import AddonProviderInfoFullView\nfrom openapi_client.model.addon_provider_info_view import AddonProviderInfoView\nfrom openapi_client.model.addon_provider_sso_data import AddonProviderSSOData\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.application_view import ApplicationView\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom openapi_client.model.deployment_view import DeploymentView\nfrom openapi_client.model.drop_count_view import DropCountView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom openapi_client.model.linked_addon_environment_view import LinkedAddonEnvironmentView\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.namespace_view import NamespaceView\nfrom openapi_client.model.next_in_payment_flow import NextInPaymentFlow\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.organisation_member_view import OrganisationMemberView\nfrom openapi_client.model.organisation_view import OrganisationView\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_info_view import PaymentInfoView\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom openapi_client.model.payment_provider_selection import PaymentProviderSelection\nfrom openapi_client.model.price_with_tax_info import PriceWithTaxInfo\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\nfrom openapi_client.model.secret_view import SecretView\nfrom openapi_client.model.setup_intent_view import SetupIntentView\nfrom openapi_client.model.stripe_confirmation_error_message import StripeConfirmationErrorMessage\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom openapi_client.model.tcp_redir_view import TcpRedirView\nfrom openapi_client.model.url_view import UrlView\nfrom openapi_client.model.vhost_view import VhostView\nfrom openapi_client.model.wanna_buy_package import WannaBuyPackage\nfrom openapi_client.model.wannabe_addon_feature import WannabeAddonFeature\nfrom openapi_client.model.wannabe_addon_plan import WannabeAddonPlan\nfrom openapi_client.model.wannabe_addon_provider import WannabeAddonProvider\nfrom openapi_client.model.wannabe_addon_provider_infos import WannabeAddonProviderInfos\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.wannabe_branch import WannabeBranch\nfrom openapi_client.model.wannabe_build_flavor import WannabeBuildFlavor\nfrom openapi_client.model.wannabe_max_credits import WannabeMaxCredits\nfrom openapi_client.model.wannabe_member import WannabeMember\nfrom openapi_client.model.wannabe_namespace import WannabeNamespace\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom openapi_client.model.wannabe_organisation import WannabeOrganisation\nfrom openapi_client.model.wannabe_plan_change import WannabePlanChange\nfrom openapi_client.model.wannabe_value import WannabeValue\n\n\nclass OrganisationApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __abort_addon_migration(\n self,\n id,\n addon_id,\n migration_id,\n **kwargs\n ):\n \"\"\"abort_addon_migration # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.abort_addon_migration(id, addon_id, migration_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n migration_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['migration_id'] = \\\n migration_id\n return self.call_with_http_info(**kwargs)\n\n self.abort_addon_migration = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/migrations/{migrationId}',\n 'operation_id': 'abort_addon_migration',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'migration_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'migration_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'migration_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'migration_id': 'migrationId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'migration_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__abort_addon_migration\n )\n\n def __add_addon_tag_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n tag,\n **kwargs\n ):\n \"\"\"add_addon_tag_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_addon_tag_by_orga_and_addon_id(id, addon_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.add_addon_tag_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/tags/{tag}',\n 'operation_id': 'add_addon_tag_by_orga_and_addon_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'tag',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_addon_tag_by_orga_and_addon_id\n )\n\n def __add_application_by_orga(\n self,\n id,\n wannabe_application,\n **kwargs\n ):\n \"\"\"add_application_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_application_by_orga(id, wannabe_application, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_application (WannabeApplication):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_application'] = \\\n wannabe_application\n return self.call_with_http_info(**kwargs)\n\n self.add_application_by_orga = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications',\n 'operation_id': 'add_application_by_orga',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_application',\n ],\n 'required': [\n 'id',\n 'wannabe_application',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_application':\n (WannabeApplication,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_application': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_application_by_orga\n )\n\n def __add_application_dependency_by_orga_and_app_id(\n self,\n id,\n app_id,\n dependency_id,\n **kwargs\n ):\n \"\"\"add_application_dependency_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_application_dependency_by_orga_and_app_id(id, app_id, dependency_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n dependency_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['dependency_id'] = \\\n dependency_id\n return self.call_with_http_info(**kwargs)\n\n self.add_application_dependency_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/dependencies/{dependencyId}',\n 'operation_id': 'add_application_dependency_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'dependency_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'dependency_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'dependency_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'dependency_id': 'dependencyId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'dependency_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_application_dependency_by_orga_and_app_id\n )\n\n def __add_application_tag_by_orga_and_app_id(\n self,\n id,\n app_id,\n tag,\n **kwargs\n ):\n \"\"\"add_application_tag_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_application_tag_by_orga_and_app_id(id, app_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.add_application_tag_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tags/{tag}',\n 'operation_id': 'add_application_tag_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'tag',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_application_tag_by_orga_and_app_id\n )\n\n def __add_beta_tester(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"add_beta_tester # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_beta_tester(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.add_beta_tester = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/testers',\n 'operation_id': 'add_beta_tester',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_beta_tester\n )\n\n def __add_organisation_member(\n self,\n id,\n wannabe_member,\n **kwargs\n ):\n \"\"\"add_organisation_member # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_organisation_member(id, wannabe_member, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_member (WannabeMember):\n\n Keyword Args:\n invitation_key (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_member'] = \\\n wannabe_member\n return self.call_with_http_info(**kwargs)\n\n self.add_organisation_member = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/members',\n 'operation_id': 'add_organisation_member',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_member',\n 'invitation_key',\n ],\n 'required': [\n 'id',\n 'wannabe_member',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_member':\n (WannabeMember,),\n 'invitation_key':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'invitation_key': 'invitationKey',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_member': 'body',\n 'invitation_key': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_organisation_member\n )\n\n def __add_payment_method_by_orga(\n self,\n id,\n payment_data,\n **kwargs\n ):\n \"\"\"add_payment_method_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_payment_method_by_orga(id, payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentMethodView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.add_payment_method_by_orga = _Endpoint(\n settings={\n 'response_type': (PaymentMethodView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods',\n 'operation_id': 'add_payment_method_by_orga',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'payment_data',\n ],\n 'required': [\n 'id',\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_payment_method_by_orga\n )\n\n def __add_provider_feature(\n self,\n id,\n provider_id,\n wannabe_addon_feature,\n **kwargs\n ):\n \"\"\"add_provider_feature # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_provider_feature(id, provider_id, wannabe_addon_feature, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n wannabe_addon_feature (WannabeAddonFeature):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonFeatureView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['wannabe_addon_feature'] = \\\n wannabe_addon_feature\n return self.call_with_http_info(**kwargs)\n\n self.add_provider_feature = _Endpoint(\n settings={\n 'response_type': (AddonFeatureView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/features',\n 'operation_id': 'add_provider_feature',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'wannabe_addon_feature',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'wannabe_addon_feature',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'wannabe_addon_feature':\n (WannabeAddonFeature,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'wannabe_addon_feature': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_provider_feature\n )\n\n def __add_provider_plan(\n self,\n id,\n provider_id,\n wannabe_addon_plan,\n **kwargs\n ):\n \"\"\"add_provider_plan # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_provider_plan(id, provider_id, wannabe_addon_plan, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n wannabe_addon_plan (WannabeAddonPlan):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonPlanView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['wannabe_addon_plan'] = \\\n wannabe_addon_plan\n return self.call_with_http_info(**kwargs)\n\n self.add_provider_plan = _Endpoint(\n settings={\n 'response_type': (AddonPlanView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans',\n 'operation_id': 'add_provider_plan',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'wannabe_addon_plan',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'wannabe_addon_plan',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'wannabe_addon_plan':\n (WannabeAddonPlan,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'wannabe_addon_plan': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_provider_plan\n )\n\n def __add_tcp_redir(\n self,\n id,\n app_id,\n wannabe_namespace,\n **kwargs\n ):\n \"\"\"add_tcp_redir # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_tcp_redir(id, app_id, wannabe_namespace, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n wannabe_namespace (WannabeNamespace):\n\n Keyword Args:\n payment (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n TcpRedirView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_namespace'] = \\\n wannabe_namespace\n return self.call_with_http_info(**kwargs)\n\n self.add_tcp_redir = _Endpoint(\n settings={\n 'response_type': (TcpRedirView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tcpRedirs',\n 'operation_id': 'add_tcp_redir',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'wannabe_namespace',\n 'payment',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'wannabe_namespace',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'wannabe_namespace':\n (WannabeNamespace,),\n 'payment':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'payment': 'payment',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'wannabe_namespace': 'body',\n 'payment': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_tcp_redir\n )\n\n def __add_vhosts_by_orga_and_app_id(\n self,\n id,\n app_id,\n domain,\n **kwargs\n ):\n \"\"\"add_vhosts_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_vhosts_by_orga_and_app_id(id, app_id, domain, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n domain (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['domain'] = \\\n domain\n return self.call_with_http_info(**kwargs)\n\n self.add_vhosts_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts/{domain}',\n 'operation_id': 'add_vhosts_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'domain',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'domain',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'domain':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'domain': 'domain',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'domain': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_vhosts_by_orga_and_app_id\n )\n\n def __buy_drops_by_orga(\n self,\n id,\n wanna_buy_package,\n **kwargs\n ):\n \"\"\"buy_drops_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.buy_drops_by_orga(id, wanna_buy_package, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wanna_buy_package (WannaBuyPackage):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wanna_buy_package'] = \\\n wanna_buy_package\n return self.call_with_http_info(**kwargs)\n\n self.buy_drops_by_orga = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings',\n 'operation_id': 'buy_drops_by_orga',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wanna_buy_package',\n ],\n 'required': [\n 'id',\n 'wanna_buy_package',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wanna_buy_package':\n (WannaBuyPackage,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wanna_buy_package': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__buy_drops_by_orga\n )\n\n def __cancel_application_deployment_for_orga(\n self,\n id,\n app_id,\n deployment_id,\n **kwargs\n ):\n \"\"\"cancel_application_deployment_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.cancel_application_deployment_for_orga(id, app_id, deployment_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n deployment_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['deployment_id'] = \\\n deployment_id\n return self.call_with_http_info(**kwargs)\n\n self.cancel_application_deployment_for_orga = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/deployments/{deploymentId}/instances',\n 'operation_id': 'cancel_application_deployment_for_orga',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'deployment_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'deployment_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'deployment_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__cancel_application_deployment_for_orga\n )\n\n def __change_plan_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n wannabe_plan_change,\n **kwargs\n ):\n \"\"\"change_plan_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.change_plan_by_orga_and_addon_id(id, addon_id, wannabe_plan_change, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n wannabe_plan_change (WannabePlanChange):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_plan_change'] = \\\n wannabe_plan_change\n return self.call_with_http_info(**kwargs)\n\n self.change_plan_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/migrations',\n 'operation_id': 'change_plan_by_orga_and_addon_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'wannabe_plan_change',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'wannabe_plan_change',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'wannabe_plan_change':\n (WannabePlanChange,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'wannabe_plan_change': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__change_plan_by_orga_and_addon_id\n )\n\n def __choose_payment_provider_by_orga(\n self,\n id,\n bid,\n payment_provider_selection,\n **kwargs\n ):\n \"\"\"choose_payment_provider_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.choose_payment_provider_by_orga(id, bid, payment_provider_selection, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n bid (str):\n payment_provider_selection (PaymentProviderSelection):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n NextInPaymentFlow\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['bid'] = \\\n bid\n kwargs['payment_provider_selection'] = \\\n payment_provider_selection\n return self.call_with_http_info(**kwargs)\n\n self.choose_payment_provider_by_orga = _Endpoint(\n settings={\n 'response_type': (NextInPaymentFlow,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings/{bid}',\n 'operation_id': 'choose_payment_provider_by_orga',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'bid',\n 'payment_provider_selection',\n ],\n 'required': [\n 'id',\n 'bid',\n 'payment_provider_selection',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'bid':\n (str,),\n 'payment_provider_selection':\n (PaymentProviderSelection,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'bid': 'bid',\n },\n 'location_map': {\n 'id': 'path',\n 'bid': 'path',\n 'payment_provider_selection': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__choose_payment_provider_by_orga\n )\n\n def __create_consumer_by_orga(\n self,\n id,\n wannabe_o_auth1_consumer,\n **kwargs\n ):\n \"\"\"create_consumer_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_consumer_by_orga(id, wannabe_o_auth1_consumer, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_o_auth1_consumer (WannabeOAuth1Consumer):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_o_auth1_consumer'] = \\\n wannabe_o_auth1_consumer\n return self.call_with_http_info(**kwargs)\n\n self.create_consumer_by_orga = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers',\n 'operation_id': 'create_consumer_by_orga',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_o_auth1_consumer',\n ],\n 'required': [\n 'id',\n 'wannabe_o_auth1_consumer',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_o_auth1_consumer':\n (WannabeOAuth1Consumer,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_o_auth1_consumer': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__create_consumer_by_orga\n )\n\n def __create_organisation(\n self,\n wannabe_organisation,\n **kwargs\n ):\n \"\"\"create_organisation # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_organisation(wannabe_organisation, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_organisation (WannabeOrganisation):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OrganisationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_organisation'] = \\\n wannabe_organisation\n return self.call_with_http_info(**kwargs)\n\n self.create_organisation = _Endpoint(\n settings={\n 'response_type': (OrganisationView,),\n 'auth': [],\n 'endpoint_path': '/organisations',\n 'operation_id': 'create_organisation',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_organisation',\n ],\n 'required': [\n 'wannabe_organisation',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_organisation':\n (WannabeOrganisation,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_organisation': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__create_organisation\n )\n\n def __create_provider(\n self,\n id,\n wannabe_addon_provider,\n **kwargs\n ):\n \"\"\"create_provider # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_provider(id, wannabe_addon_provider, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_addon_provider (WannabeAddonProvider):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderInfoFullView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_addon_provider'] = \\\n wannabe_addon_provider\n return self.call_with_http_info(**kwargs)\n\n self.create_provider = _Endpoint(\n settings={\n 'response_type': (AddonProviderInfoFullView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders',\n 'operation_id': 'create_provider',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_addon_provider',\n ],\n 'required': [\n 'id',\n 'wannabe_addon_provider',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_addon_provider':\n (WannabeAddonProvider,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_addon_provider': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__create_provider\n )\n\n def __delete_addon_tag_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n tag,\n **kwargs\n ):\n \"\"\"delete_addon_tag_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_addon_tag_by_orga_and_addon_id(id, addon_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.delete_addon_tag_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/tags/{tag}',\n 'operation_id': 'delete_addon_tag_by_orga_and_addon_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'tag',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_addon_tag_by_orga_and_addon_id\n )\n\n def __delete_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"delete_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}',\n 'operation_id': 'delete_application_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_application_by_orga_and_app_id\n )\n\n def __delete_application_dependency_by_orga_and_app_id(\n self,\n id,\n app_id,\n dependency_id,\n **kwargs\n ):\n \"\"\"delete_application_dependency_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_application_dependency_by_orga_and_app_id(id, app_id, dependency_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n dependency_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['dependency_id'] = \\\n dependency_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_application_dependency_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/dependencies/{dependencyId}',\n 'operation_id': 'delete_application_dependency_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'dependency_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'dependency_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'dependency_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'dependency_id': 'dependencyId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'dependency_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_application_dependency_by_orga_and_app_id\n )\n\n def __delete_application_tag_by_orga_and_app_id(\n self,\n id,\n app_id,\n tag,\n **kwargs\n ):\n \"\"\"delete_application_tag_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_application_tag_by_orga_and_app_id(id, app_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.delete_application_tag_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tags/{tag}',\n 'operation_id': 'delete_application_tag_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'tag',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_application_tag_by_orga_and_app_id\n )\n\n def __delete_consumer_by_orga(\n self,\n id,\n key,\n **kwargs\n ):\n \"\"\"delete_consumer_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_consumer_by_orga(id, key, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.delete_consumer_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers/{key}',\n 'operation_id': 'delete_consumer_by_orga',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'key',\n ],\n 'required': [\n 'id',\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'key': 'key',\n },\n 'location_map': {\n 'id': 'path',\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_consumer_by_orga\n )\n\n def __delete_organisation(\n self,\n id,\n **kwargs\n ):\n \"\"\"delete_organisation # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_organisation(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.delete_organisation = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}',\n 'operation_id': 'delete_organisation',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_organisation\n )\n\n def __delete_payment_method_by_orga(\n self,\n id,\n m_id,\n **kwargs\n ):\n \"\"\"delete_payment_method_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_payment_method_by_orga(id, m_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n m_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['m_id'] = \\\n m_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_payment_method_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods/{mId}',\n 'operation_id': 'delete_payment_method_by_orga',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'm_id',\n ],\n 'required': [\n 'id',\n 'm_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'm_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'm_id': 'mId',\n },\n 'location_map': {\n 'id': 'path',\n 'm_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_payment_method_by_orga\n )\n\n def __delete_provider(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"delete_provider # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_provider(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_provider = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}',\n 'operation_id': 'delete_provider',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_provider\n )\n\n def __delete_provider_feature(\n self,\n id,\n provider_id,\n feature_id,\n **kwargs\n ):\n \"\"\"delete_provider_feature # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_provider_feature(id, provider_id, feature_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n feature_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['feature_id'] = \\\n feature_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_provider_feature = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/features/{featureId}',\n 'operation_id': 'delete_provider_feature',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'feature_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'feature_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'feature_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'feature_id': 'featureId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'feature_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_provider_feature\n )\n\n def __delete_provider_plan(\n self,\n id,\n provider_id,\n plan_id,\n **kwargs\n ):\n \"\"\"delete_provider_plan # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_provider_plan(id, provider_id, plan_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n plan_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['plan_id'] = \\\n plan_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_provider_plan = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans/{planId}',\n 'operation_id': 'delete_provider_plan',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'plan_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'plan_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'plan_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'plan_id': 'planId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'plan_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_provider_plan\n )\n\n def __delete_provider_plan_feature(\n self,\n id,\n provider_id,\n plan_id,\n feature_name,\n **kwargs\n ):\n \"\"\"delete_provider_plan_feature # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_provider_plan_feature(id, provider_id, plan_id, feature_name, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n plan_id (str):\n feature_name (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['plan_id'] = \\\n plan_id\n kwargs['feature_name'] = \\\n feature_name\n return self.call_with_http_info(**kwargs)\n\n self.delete_provider_plan_feature = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans/{planId}/features/{featureName}',\n 'operation_id': 'delete_provider_plan_feature',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'feature_name',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'feature_name',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'plan_id':\n (str,),\n 'feature_name':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'plan_id': 'planId',\n 'feature_name': 'featureName',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'plan_id': 'path',\n 'feature_name': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_provider_plan_feature\n )\n\n def __delete_purchase_order_by_orga(\n self,\n id,\n bid,\n **kwargs\n ):\n \"\"\"delete_purchase_order_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_purchase_order_by_orga(id, bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n bid (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.delete_purchase_order_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings/{bid}',\n 'operation_id': 'delete_purchase_order_by_orga',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'bid',\n ],\n 'required': [\n 'id',\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'bid':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'bid': 'bid',\n },\n 'location_map': {\n 'id': 'path',\n 'bid': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_purchase_order_by_orga\n )\n\n def __delete_recurrent_payment_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"delete_recurrent_payment_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_recurrent_payment_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.delete_recurrent_payment_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/recurring',\n 'operation_id': 'delete_recurrent_payment_by_orga',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_recurrent_payment_by_orga\n )\n\n def __deprovision_addon_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"deprovision_addon_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.deprovision_addon_by_orga_and_addon_id(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.deprovision_addon_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}',\n 'operation_id': 'deprovision_addon_by_orga_and_addon_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__deprovision_addon_by_orga_and_addon_id\n )\n\n def __edit_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n wannabe_application,\n **kwargs\n ):\n \"\"\"edit_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_application_by_orga_and_app_id(id, app_id, wannabe_application, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n wannabe_application (WannabeApplication):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_application'] = \\\n wannabe_application\n return self.call_with_http_info(**kwargs)\n\n self.edit_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}',\n 'operation_id': 'edit_application_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'wannabe_application',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'wannabe_application',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'wannabe_application':\n (WannabeApplication,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'wannabe_application': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_application_by_orga_and_app_id\n )\n\n def __edit_application_env_by_orga_and_app_id_and_env_name(\n self,\n id,\n app_id,\n env_name,\n wannabe_value,\n **kwargs\n ):\n \"\"\"edit_application_env_by_orga_and_app_id_and_env_name # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name, wannabe_value, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n env_name (str):\n wannabe_value (WannabeValue):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['env_name'] = \\\n env_name\n kwargs['wannabe_value'] = \\\n wannabe_value\n return self.call_with_http_info(**kwargs)\n\n self.edit_application_env_by_orga_and_app_id_and_env_name = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/env/{envName}',\n 'operation_id': 'edit_application_env_by_orga_and_app_id_and_env_name',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'env_name',\n 'wannabe_value',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'env_name',\n 'wannabe_value',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'env_name':\n (str,),\n 'wannabe_value':\n (WannabeValue,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'env_name': 'envName',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'env_name': 'path',\n 'wannabe_value': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_application_env_by_orga_and_app_id_and_env_name\n )\n\n def __edit_application_environment_by_orga_and_app_id(\n self,\n id,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"edit_application_environment_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_application_environment_by_orga_and_app_id(id, app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n body (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.edit_application_environment_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/env',\n 'operation_id': 'edit_application_environment_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_application_environment_by_orga_and_app_id\n )\n\n def __edit_organisation(\n self,\n id,\n wannabe_organisation,\n **kwargs\n ):\n \"\"\"edit_organisation # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_organisation(id, wannabe_organisation, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_organisation (WannabeOrganisation):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OrganisationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_organisation'] = \\\n wannabe_organisation\n return self.call_with_http_info(**kwargs)\n\n self.edit_organisation = _Endpoint(\n settings={\n 'response_type': (OrganisationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}',\n 'operation_id': 'edit_organisation',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_organisation',\n ],\n 'required': [\n 'id',\n 'wannabe_organisation',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_organisation':\n (WannabeOrganisation,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_organisation': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_organisation\n )\n\n def __edit_organisation_member(\n self,\n id,\n user_id,\n wannabe_member,\n **kwargs\n ):\n \"\"\"edit_organisation_member # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_organisation_member(id, user_id, wannabe_member, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n user_id (str):\n wannabe_member (WannabeMember):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['user_id'] = \\\n user_id\n kwargs['wannabe_member'] = \\\n wannabe_member\n return self.call_with_http_info(**kwargs)\n\n self.edit_organisation_member = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/members/{userId}',\n 'operation_id': 'edit_organisation_member',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'user_id',\n 'wannabe_member',\n ],\n 'required': [\n 'id',\n 'user_id',\n 'wannabe_member',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'user_id':\n (str,),\n 'wannabe_member':\n (WannabeMember,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'user_id': 'userId',\n },\n 'location_map': {\n 'id': 'path',\n 'user_id': 'path',\n 'wannabe_member': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_organisation_member\n )\n\n def __edit_provider_plan(\n self,\n id,\n provider_id,\n plan_id,\n wannabe_addon_plan,\n **kwargs\n ):\n \"\"\"edit_provider_plan # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_provider_plan(id, provider_id, plan_id, wannabe_addon_plan, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n plan_id (str):\n wannabe_addon_plan (WannabeAddonPlan):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonPlanView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['plan_id'] = \\\n plan_id\n kwargs['wannabe_addon_plan'] = \\\n wannabe_addon_plan\n return self.call_with_http_info(**kwargs)\n\n self.edit_provider_plan = _Endpoint(\n settings={\n 'response_type': (AddonPlanView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans/{planId}',\n 'operation_id': 'edit_provider_plan',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'wannabe_addon_plan',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'wannabe_addon_plan',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'plan_id':\n (str,),\n 'wannabe_addon_plan':\n (WannabeAddonPlan,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'plan_id': 'planId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'plan_id': 'path',\n 'wannabe_addon_plan': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_provider_plan\n )\n\n def __edit_provider_plan_feature(\n self,\n id,\n provider_id,\n plan_id,\n feature_name,\n addon_feature_instance_view,\n **kwargs\n ):\n \"\"\"edit_provider_plan_feature # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_provider_plan_feature(id, provider_id, plan_id, feature_name, addon_feature_instance_view, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n plan_id (str):\n feature_name (str):\n addon_feature_instance_view (AddonFeatureInstanceView):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonPlanView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['plan_id'] = \\\n plan_id\n kwargs['feature_name'] = \\\n feature_name\n kwargs['addon_feature_instance_view'] = \\\n addon_feature_instance_view\n return self.call_with_http_info(**kwargs)\n\n self.edit_provider_plan_feature = _Endpoint(\n settings={\n 'response_type': (AddonPlanView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans/{planId}/features/{featureName}',\n 'operation_id': 'edit_provider_plan_feature',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'feature_name',\n 'addon_feature_instance_view',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'plan_id',\n 'feature_name',\n 'addon_feature_instance_view',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'plan_id':\n (str,),\n 'feature_name':\n (str,),\n 'addon_feature_instance_view':\n (AddonFeatureInstanceView,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'plan_id': 'planId',\n 'feature_name': 'featureName',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'plan_id': 'path',\n 'feature_name': 'path',\n 'addon_feature_instance_view': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_provider_plan_feature\n )\n\n def __get_addon_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_by_orga_and_addon_id(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}',\n 'operation_id': 'get_addon_by_orga_and_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_by_orga_and_addon_id\n )\n\n def __get_addon_env_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_env_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_env_by_orga_and_addon_id(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_env_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': ([AddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/env',\n 'operation_id': 'get_addon_env_by_orga_and_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_env_by_orga_and_addon_id\n )\n\n def __get_addon_instance(\n self,\n id,\n addon_id,\n instance_id,\n **kwargs\n ):\n \"\"\"get_addon_instance # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_instance(id, addon_id, instance_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n instance_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['instance_id'] = \\\n instance_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_instance = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/instances/{instanceId}',\n 'operation_id': 'get_addon_instance',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'instance_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'instance_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'instance_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'instance_id': 'instanceId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'instance_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_instance\n )\n\n def __get_addon_instances(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_instances # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_instances(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n deployment_id (str): [optional]\n with_deleted (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [SuperNovaInstanceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_instances = _Endpoint(\n settings={\n 'response_type': ([SuperNovaInstanceView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/instances',\n 'operation_id': 'get_addon_instances',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'deployment_id',\n 'with_deleted',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'deployment_id':\n (str,),\n 'with_deleted':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'deployment_id': 'deploymentId',\n 'with_deleted': 'withDeleted',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'deployment_id': 'query',\n 'with_deleted': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_instances\n )\n\n def __get_addon_migration(\n self,\n id,\n addon_id,\n migration_id,\n **kwargs\n ):\n \"\"\"get_addon_migration # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_migration(id, addon_id, migration_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n migration_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['migration_id'] = \\\n migration_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_migration = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/migrations/{migrationId}',\n 'operation_id': 'get_addon_migration',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'migration_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'migration_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'migration_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'migration_id': 'migrationId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'migration_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_migration\n )\n\n def __get_addon_migrations(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_migrations # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_migrations(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_migrations = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/migrations',\n 'operation_id': 'get_addon_migrations',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_migrations\n )\n\n def __get_addon_sso_data_for_orga(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_sso_data_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_sso_data_for_orga(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderSSOData\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_sso_data_for_orga = _Endpoint(\n settings={\n 'response_type': (AddonProviderSSOData,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/sso',\n 'operation_id': 'get_addon_sso_data_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_sso_data_for_orga\n )\n\n def __get_addon_tags_by_orga_id_and_addon_id(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_tags_by_orga_id_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_tags_by_orga_id_and_addon_id(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_tags_by_orga_id_and_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/tags',\n 'operation_id': 'get_addon_tags_by_orga_id_and_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_tags_by_orga_id_and_addon_id\n )\n\n def __get_addons_by_orga_id(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_addons_by_orga_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addons_by_orga_id(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_addons_by_orga_id = _Endpoint(\n settings={\n 'response_type': ([AddonView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons',\n 'operation_id': 'get_addons_by_orga_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addons_by_orga_id\n )\n\n def __get_addons_linked_to_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_addons_linked_to_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addons_linked_to_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addons_linked_to_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([AddonView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/addons',\n 'operation_id': 'get_addons_linked_to_application_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addons_linked_to_application_by_orga_and_app_id\n )\n\n def __get_all_applications_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_all_applications_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_all_applications_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n instance_id (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_all_applications_by_orga = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications',\n 'operation_id': 'get_all_applications_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'instance_id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'instance_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'instance_id': 'instanceId',\n },\n 'location_map': {\n 'id': 'path',\n 'instance_id': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_all_applications_by_orga\n )\n\n def __get_amount_for_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_amount_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_amount_for_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n DropCountView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_amount_for_orga = _Endpoint(\n settings={\n 'response_type': (DropCountView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/credits',\n 'operation_id': 'get_amount_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_amount_for_orga\n )\n\n def __get_application_branches_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_branches_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_branches_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_branches_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/branches',\n 'operation_id': 'get_application_branches_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_branches_by_orga_and_app_id\n )\n\n def __get_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}',\n 'operation_id': 'get_application_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_by_orga_and_app_id\n )\n\n def __get_application_dependencies_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_dependencies_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_dependencies_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_dependencies_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/dependencies',\n 'operation_id': 'get_application_dependencies_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_dependencies_by_orga_and_app_id\n )\n\n def __get_application_dependencies_env_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_dependencies_env_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_dependencies_env_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_dependencies_env_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/dependencies/env',\n 'operation_id': 'get_application_dependencies_env_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_dependencies_env_by_orga_and_app_id\n )\n\n def __get_application_dependents_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_dependents_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_dependents_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_dependents_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/dependents',\n 'operation_id': 'get_application_dependents_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_dependents_by_orga_and_app_id\n )\n\n def __get_application_deployment_for_orga(\n self,\n id,\n app_id,\n deployment_id,\n **kwargs\n ):\n \"\"\"get_application_deployment_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_deployment_for_orga(id, app_id, deployment_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n deployment_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['deployment_id'] = \\\n deployment_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_deployment_for_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/deployments/{deploymentId}',\n 'operation_id': 'get_application_deployment_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'deployment_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'deployment_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'deployment_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_deployment_for_orga\n )\n\n def __get_application_deployments_for_orga(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_deployments_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_deployments_for_orga(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n limit (str): [optional]\n offset (str): [optional]\n action (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [DeploymentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_deployments_for_orga = _Endpoint(\n settings={\n 'response_type': ([DeploymentView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/deployments',\n 'operation_id': 'get_application_deployments_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'limit',\n 'offset',\n 'action',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'limit':\n (str,),\n 'offset':\n (str,),\n 'action':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'limit': 'limit',\n 'offset': 'offset',\n 'action': 'action',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'limit': 'query',\n 'offset': 'query',\n 'action': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_deployments_for_orga\n )\n\n def __get_application_env_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_env_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_env_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_env_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([AddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/env',\n 'operation_id': 'get_application_env_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_env_by_orga_and_app_id\n )\n\n def __get_application_instance_by_orga_and_app_and_instance_id(\n self,\n id,\n app_id,\n instance_id,\n **kwargs\n ):\n \"\"\"get_application_instance_by_orga_and_app_and_instance_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_instance_by_orga_and_app_and_instance_id(id, app_id, instance_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n instance_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['instance_id'] = \\\n instance_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_instance_by_orga_and_app_and_instance_id = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/instances/{instanceId}',\n 'operation_id': 'get_application_instance_by_orga_and_app_and_instance_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'instance_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'instance_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'instance_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'instance_id': 'instanceId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'instance_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_instance_by_orga_and_app_and_instance_id\n )\n\n def __get_application_instances_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_instances_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_instances_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n deployment_id (str): [optional]\n with_deleted (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [SuperNovaInstanceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_instances_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([SuperNovaInstanceView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/instances',\n 'operation_id': 'get_application_instances_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'deployment_id',\n 'with_deleted',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n 'with_deleted':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n 'with_deleted': 'withDeleted',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'deployment_id': 'query',\n 'with_deleted': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_instances_by_orga_and_app_id\n )\n\n def __get_application_tags_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_tags_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_tags_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_tags_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tags',\n 'operation_id': 'get_application_tags_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_tags_by_orga_and_app_id\n )\n\n def __get_applications_linked_to_addon_by_orga_and_addon_id(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"get_applications_linked_to_addon_by_orga_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_applications_linked_to_addon_by_orga_and_addon_id(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_applications_linked_to_addon_by_orga_and_addon_id = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/applications',\n 'operation_id': 'get_applications_linked_to_addon_by_orga_and_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_applications_linked_to_addon_by_orga_and_addon_id\n )\n\n def __get_consumer_by_orga(\n self,\n id,\n key,\n **kwargs\n ):\n \"\"\"get_consumer_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_consumer_by_orga(id, key, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.get_consumer_by_orga = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers/{key}',\n 'operation_id': 'get_consumer_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'key',\n ],\n 'required': [\n 'id',\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'key': 'key',\n },\n 'location_map': {\n 'id': 'path',\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_consumer_by_orga\n )\n\n def __get_consumer_secret_by_orga(\n self,\n id,\n key,\n **kwargs\n ):\n \"\"\"get_consumer_secret_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_consumer_secret_by_orga(id, key, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n SecretView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.get_consumer_secret_by_orga = _Endpoint(\n settings={\n 'response_type': (SecretView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers/{key}/secret',\n 'operation_id': 'get_consumer_secret_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'key',\n ],\n 'required': [\n 'id',\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'key': 'key',\n },\n 'location_map': {\n 'id': 'path',\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_consumer_secret_by_orga\n )\n\n def __get_consumers_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_consumers_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_consumers_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OAuth1ConsumerView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_consumers_by_orga = _Endpoint(\n settings={\n 'response_type': ([OAuth1ConsumerView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers',\n 'operation_id': 'get_consumers_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_consumers_by_orga\n )\n\n def __get_consumptions_for_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_consumptions_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_consumptions_for_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n app_id (str): [optional]\n _from (str): [optional]\n to (str): [optional]\n _for (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_consumptions_for_orga = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumptions',\n 'operation_id': 'get_consumptions_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n '_from',\n 'to',\n '_for',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n '_from':\n (str,),\n 'to':\n (str,),\n '_for':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n '_from': 'from',\n 'to': 'to',\n '_for': 'for',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'query',\n '_from': 'query',\n 'to': 'query',\n '_for': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_consumptions_for_orga\n )\n\n def __get_default_method_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_default_method_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_default_method_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentMethodView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_default_method_by_orga = _Endpoint(\n settings={\n 'response_type': (PaymentMethodView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods/default',\n 'operation_id': 'get_default_method_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_default_method_by_orga\n )\n\n def __get_deployments_for_all_apps(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_deployments_for_all_apps # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_deployments_for_all_apps(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n limit (int): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_deployments_for_all_apps = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/deployments',\n 'operation_id': 'get_deployments_for_all_apps',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'limit',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'limit':\n (int,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'limit': 'limit',\n },\n 'location_map': {\n 'id': 'path',\n 'limit': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_deployments_for_all_apps\n )\n\n def __get_env_of_addons_linked_to_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_env_of_addons_linked_to_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_env_of_addons_linked_to_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LinkedAddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_env_of_addons_linked_to_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([LinkedAddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/addons/env',\n 'operation_id': 'get_env_of_addons_linked_to_application_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_env_of_addons_linked_to_application_by_orga_and_app_id\n )\n\n def __get_exposed_env_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_exposed_env_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_exposed_env_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_exposed_env_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/exposed_env',\n 'operation_id': 'get_exposed_env_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_exposed_env_by_orga_and_app_id\n )\n\n def __get_favourite_vhost_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_favourite_vhost_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_favourite_vhost_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n VhostView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_favourite_vhost_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (VhostView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts/favourite',\n 'operation_id': 'get_favourite_vhost_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_favourite_vhost_by_orga_and_app_id\n )\n\n def __get_instances_for_all_apps_for_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_instances_for_all_apps_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_instances_for_all_apps_for_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_instances_for_all_apps_for_orga = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/instances',\n 'operation_id': 'get_instances_for_all_apps_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_instances_for_all_apps_for_orga\n )\n\n def __get_invoice_by_orga(\n self,\n id,\n bid,\n **kwargs\n ):\n \"\"\"get_invoice_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_invoice_by_orga(id, bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n bid (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.get_invoice_by_orga = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings/{bid}',\n 'operation_id': 'get_invoice_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'bid',\n ],\n 'required': [\n 'id',\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'bid':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'bid': 'bid',\n },\n 'location_map': {\n 'id': 'path',\n 'bid': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_invoice_by_orga\n )\n\n def __get_invoices_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_invoices_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_invoices_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [InvoiceRendering]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_invoices_by_orga = _Endpoint(\n settings={\n 'response_type': ([InvoiceRendering],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings',\n 'operation_id': 'get_invoices_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_invoices_by_orga\n )\n\n def __get_monthly_invoice_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_monthly_invoice_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_monthly_invoice_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_monthly_invoice_by_orga = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/monthlyinvoice',\n 'operation_id': 'get_monthly_invoice_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_monthly_invoice_by_orga\n )\n\n def __get_namespaces(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_namespaces # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_namespaces(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [NamespaceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_namespaces = _Endpoint(\n settings={\n 'response_type': ([NamespaceView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/namespaces',\n 'operation_id': 'get_namespaces',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_namespaces\n )\n\n def __get_new_setup_intent_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_new_setup_intent_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_new_setup_intent_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n type (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n SetupIntentView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_new_setup_intent_by_orga = _Endpoint(\n settings={\n 'response_type': (SetupIntentView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods/newintent',\n 'operation_id': 'get_new_setup_intent_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'type',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'type':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'type': 'type',\n },\n 'location_map': {\n 'id': 'path',\n 'type': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_new_setup_intent_by_orga\n )\n\n def __get_organisation(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_organisation # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_organisation(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OrganisationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_organisation = _Endpoint(\n settings={\n 'response_type': (OrganisationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}',\n 'operation_id': 'get_organisation',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_organisation\n )\n\n def __get_organisation_members(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_organisation_members # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_organisation_members(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OrganisationMemberView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_organisation_members = _Endpoint(\n settings={\n 'response_type': ([OrganisationMemberView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/members',\n 'operation_id': 'get_organisation_members',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_organisation_members\n )\n\n def __get_payment_info_for_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_payment_info_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_payment_info_for_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentInfoView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_payment_info_for_orga = _Endpoint(\n settings={\n 'response_type': (PaymentInfoView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payment-info',\n 'operation_id': 'get_payment_info_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_payment_info_for_orga\n )\n\n def __get_pdf_invoice_by_orga(\n self,\n id,\n bid,\n **kwargs\n ):\n \"\"\"get_pdf_invoice_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_pdf_invoice_by_orga(id, bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n bid (str):\n\n Keyword Args:\n token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.get_pdf_invoice_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings/{bid}.pdf',\n 'operation_id': 'get_pdf_invoice_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'bid',\n 'token',\n ],\n 'required': [\n 'id',\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'bid':\n (str,),\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'bid': 'bid',\n 'token': 'token',\n },\n 'location_map': {\n 'id': 'path',\n 'bid': 'path',\n 'token': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/pdf'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_pdf_invoice_by_orga\n )\n\n def __get_price_with_tax_by_orga(\n self,\n id,\n price,\n **kwargs\n ):\n \"\"\"get_price_with_tax_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_price_with_tax_by_orga(id, price, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n price (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PriceWithTaxInfo\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['price'] = \\\n price\n return self.call_with_http_info(**kwargs)\n\n self.get_price_with_tax_by_orga = _Endpoint(\n settings={\n 'response_type': (PriceWithTaxInfo,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/fullprice/{price}',\n 'operation_id': 'get_price_with_tax_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'price',\n ],\n 'required': [\n 'id',\n 'price',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'price':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'price': 'price',\n },\n 'location_map': {\n 'id': 'path',\n 'price': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_price_with_tax_by_orga\n )\n\n def __get_provider_features(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"get_provider_features # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_provider_features(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonFeatureView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_provider_features = _Endpoint(\n settings={\n 'response_type': ([AddonFeatureView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/features',\n 'operation_id': 'get_provider_features',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_provider_features\n )\n\n def __get_provider_info(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"get_provider_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_provider_info(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderInfoView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_provider_info = _Endpoint(\n settings={\n 'response_type': (AddonProviderInfoView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}',\n 'operation_id': 'get_provider_info',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_provider_info\n )\n\n def __get_provider_plan(\n self,\n id,\n provider_id,\n plan_id,\n **kwargs\n ):\n \"\"\"get_provider_plan # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_provider_plan(id, provider_id, plan_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n plan_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonPlanView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['plan_id'] = \\\n plan_id\n return self.call_with_http_info(**kwargs)\n\n self.get_provider_plan = _Endpoint(\n settings={\n 'response_type': (AddonPlanView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans/{planId}',\n 'operation_id': 'get_provider_plan',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'plan_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'plan_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'plan_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n 'plan_id': 'planId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'plan_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_provider_plan\n )\n\n def __get_provider_plans(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"get_provider_plans # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_provider_plans(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonPlanView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_provider_plans = _Endpoint(\n settings={\n 'response_type': ([AddonPlanView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/plans',\n 'operation_id': 'get_provider_plans',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_provider_plans\n )\n\n def __get_provider_tags(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"get_provider_tags # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_provider_tags(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_provider_tags = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/tags',\n 'operation_id': 'get_provider_tags',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_provider_tags\n )\n\n def __get_providers_info(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_providers_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_providers_info(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonProviderInfoFullView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_providers_info = _Endpoint(\n settings={\n 'response_type': ([AddonProviderInfoFullView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders',\n 'operation_id': 'get_providers_info',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_providers_info\n )\n\n def __get_recurrent_payment_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_recurrent_payment_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_recurrent_payment_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n RecurrentPaymentView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_recurrent_payment_by_orga = _Endpoint(\n settings={\n 'response_type': (RecurrentPaymentView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/recurring',\n 'operation_id': 'get_recurrent_payment_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_recurrent_payment_by_orga\n )\n\n def __get_sso_data_for_orga(\n self,\n id,\n provider_id,\n **kwargs\n ):\n \"\"\"get_sso_data_for_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_sso_data_for_orga(id, provider_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderSSOData\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n return self.call_with_http_info(**kwargs)\n\n self.get_sso_data_for_orga = _Endpoint(\n settings={\n 'response_type': (AddonProviderSSOData,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}/sso',\n 'operation_id': 'get_sso_data_for_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n ],\n 'required': [\n 'id',\n 'provider_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_sso_data_for_orga\n )\n\n def __get_stripe_token_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_stripe_token_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_stripe_token_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n BraintreeToken\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_stripe_token_by_orga = _Endpoint(\n settings={\n 'response_type': (BraintreeToken,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/tokens/stripe',\n 'operation_id': 'get_stripe_token_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_stripe_token_by_orga\n )\n\n def __get_tcp_redirs(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_tcp_redirs # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_tcp_redirs(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [TcpRedirView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_tcp_redirs = _Endpoint(\n settings={\n 'response_type': ([TcpRedirView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tcpRedirs',\n 'operation_id': 'get_tcp_redirs',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_tcp_redirs\n )\n\n def __get_unpaid_invoices_by_orga(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_unpaid_invoices_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_unpaid_invoices_by_orga(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [InvoiceRendering]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_unpaid_invoices_by_orga = _Endpoint(\n settings={\n 'response_type': ([InvoiceRendering],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/billings/unpaid',\n 'operation_id': 'get_unpaid_invoices_by_orga',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_unpaid_invoices_by_orga\n )\n\n def __get_unpaid_invoices_by_orga1(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_unpaid_invoices_by_orga1 # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_unpaid_invoices_by_orga1(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [PaymentMethodView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_unpaid_invoices_by_orga1 = _Endpoint(\n settings={\n 'response_type': ([PaymentMethodView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods',\n 'operation_id': 'get_unpaid_invoices_by_orga1',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_unpaid_invoices_by_orga1\n )\n\n def __get_user_organisationss(\n self,\n **kwargs\n ):\n \"\"\"get_user_organisationss # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_user_organisationss(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n user (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OrganisationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_user_organisationss = _Endpoint(\n settings={\n 'response_type': ([OrganisationView],),\n 'auth': [],\n 'endpoint_path': '/organisations',\n 'operation_id': 'get_user_organisationss',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'user',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'user':\n (str,),\n },\n 'attribute_map': {\n 'user': 'user',\n },\n 'location_map': {\n 'user': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_user_organisationss\n )\n\n def __get_vhosts_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_vhosts_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_vhosts_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [VhostView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_vhosts_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': ([VhostView],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts',\n 'operation_id': 'get_vhosts_by_orga_and_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_vhosts_by_orga_and_app_id\n )\n\n def __link_addon_to_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"link_addon_to_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.link_addon_to_application_by_orga_and_app_id(id, app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n body (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.link_addon_to_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/addons',\n 'operation_id': 'link_addon_to_application_by_orga_and_app_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__link_addon_to_application_by_orga_and_app_id\n )\n\n def __mark_favourite_vhost_by_orga_and_app_id(\n self,\n id,\n app_id,\n vhost_view,\n **kwargs\n ):\n \"\"\"mark_favourite_vhost_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.mark_favourite_vhost_by_orga_and_app_id(id, app_id, vhost_view, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n vhost_view (VhostView):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n VhostView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['vhost_view'] = \\\n vhost_view\n return self.call_with_http_info(**kwargs)\n\n self.mark_favourite_vhost_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (VhostView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts/favourite',\n 'operation_id': 'mark_favourite_vhost_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'vhost_view',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'vhost_view',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'vhost_view':\n (VhostView,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'vhost_view': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__mark_favourite_vhost_by_orga_and_app_id\n )\n\n def __preorder_addon_by_orga_id(\n self,\n id,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"preorder_addon_by_orga_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.preorder_addon_by_orga_id(id, wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.preorder_addon_by_orga_id = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/preorders',\n 'operation_id': 'preorder_addon_by_orga_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'id',\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__preorder_addon_by_orga_id\n )\n\n def __preorder_migration(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"preorder_migration # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.preorder_migration(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n plan_id (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.preorder_migration = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/migrations/preorders',\n 'operation_id': 'preorder_migration',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'plan_id',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'plan_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n 'plan_id': 'planId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'plan_id': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__preorder_migration\n )\n\n def __provision_addon_by_orga_id(\n self,\n id,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"provision_addon_by_orga_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.provision_addon_by_orga_id(id, wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.provision_addon_by_orga_id = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons',\n 'operation_id': 'provision_addon_by_orga_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'id',\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__provision_addon_by_orga_id\n )\n\n def __redeploy_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"redeploy_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.redeploy_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n commit (str): [optional]\n use_cache (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.redeploy_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/instances',\n 'operation_id': 'redeploy_application_by_orga_and_app_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'commit',\n 'use_cache',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'commit':\n (str,),\n 'use_cache':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'commit': 'commit',\n 'use_cache': 'useCache',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'commit': 'query',\n 'use_cache': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__redeploy_application_by_orga_and_app_id\n )\n\n def __remove_application_env_by_orga_and_app_id_and_env_name(\n self,\n id,\n app_id,\n env_name,\n **kwargs\n ):\n \"\"\"remove_application_env_by_orga_and_app_id_and_env_name # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_application_env_by_orga_and_app_id_and_env_name(id, app_id, env_name, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n env_name (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['env_name'] = \\\n env_name\n return self.call_with_http_info(**kwargs)\n\n self.remove_application_env_by_orga_and_app_id_and_env_name = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/env/{envName}',\n 'operation_id': 'remove_application_env_by_orga_and_app_id_and_env_name',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'env_name',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'env_name',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'env_name':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'env_name': 'envName',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'env_name': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_application_env_by_orga_and_app_id_and_env_name\n )\n\n def __remove_organisation_member(\n self,\n id,\n user_id,\n **kwargs\n ):\n \"\"\"remove_organisation_member # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_organisation_member(id, user_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n user_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['user_id'] = \\\n user_id\n return self.call_with_http_info(**kwargs)\n\n self.remove_organisation_member = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/members/{userId}',\n 'operation_id': 'remove_organisation_member',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'user_id',\n ],\n 'required': [\n 'id',\n 'user_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'user_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'user_id': 'userId',\n },\n 'location_map': {\n 'id': 'path',\n 'user_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_organisation_member\n )\n\n def __remove_tcp_redir(\n self,\n id,\n app_id,\n source_port,\n **kwargs\n ):\n \"\"\"remove_tcp_redir # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_tcp_redir(id, app_id, source_port, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n source_port (int):\n\n Keyword Args:\n namespace (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['source_port'] = \\\n source_port\n return self.call_with_http_info(**kwargs)\n\n self.remove_tcp_redir = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tcpRedirs/{sourcePort}',\n 'operation_id': 'remove_tcp_redir',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'source_port',\n 'namespace',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'source_port',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'source_port':\n (int,),\n 'namespace':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'source_port': 'sourcePort',\n 'namespace': 'namespace',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'source_port': 'path',\n 'namespace': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_tcp_redir\n )\n\n def __remove_vhosts_by_orga_and_app_id(\n self,\n id,\n app_id,\n domain,\n **kwargs\n ):\n \"\"\"remove_vhosts_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_vhosts_by_orga_and_app_id(id, app_id, domain, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n domain (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['domain'] = \\\n domain\n return self.call_with_http_info(**kwargs)\n\n self.remove_vhosts_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts/{domain}',\n 'operation_id': 'remove_vhosts_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'domain',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'domain',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'domain':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'domain': 'domain',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'domain': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_vhosts_by_orga_and_app_id\n )\n\n def __replace_addon_tags(\n self,\n id,\n addon_id,\n **kwargs\n ):\n \"\"\"replace_addon_tags # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.replace_addon_tags(id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n\n Keyword Args:\n body (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.replace_addon_tags = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}/tags',\n 'operation_id': 'replace_addon_tags',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'body',\n ],\n 'required': [\n 'id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__replace_addon_tags\n )\n\n def __replace_application_tags(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"replace_application_tags # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.replace_application_tags(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n body (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.replace_application_tags = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/tags',\n 'operation_id': 'replace_application_tags',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__replace_application_tags\n )\n\n def __set_application_branch_by_orga_and_app_id(\n self,\n id,\n app_id,\n wannabe_branch,\n **kwargs\n ):\n \"\"\"set_application_branch_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_application_branch_by_orga_and_app_id(id, app_id, wannabe_branch, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n wannabe_branch (WannabeBranch):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_branch'] = \\\n wannabe_branch\n return self.call_with_http_info(**kwargs)\n\n self.set_application_branch_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/branch',\n 'operation_id': 'set_application_branch_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'wannabe_branch',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'wannabe_branch',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'wannabe_branch':\n (WannabeBranch,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'wannabe_branch': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_application_branch_by_orga_and_app_id\n )\n\n def __set_build_instance_flavor_by_orga_and_app_id(\n self,\n id,\n app_id,\n wannabe_build_flavor,\n **kwargs\n ):\n \"\"\"set_build_instance_flavor_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_build_instance_flavor_by_orga_and_app_id(id, app_id, wannabe_build_flavor, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n wannabe_build_flavor (WannabeBuildFlavor):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_build_flavor'] = \\\n wannabe_build_flavor\n return self.call_with_http_info(**kwargs)\n\n self.set_build_instance_flavor_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/buildflavor',\n 'operation_id': 'set_build_instance_flavor_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'wannabe_build_flavor',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'wannabe_build_flavor',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'wannabe_build_flavor':\n (WannabeBuildFlavor,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'wannabe_build_flavor': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_build_instance_flavor_by_orga_and_app_id\n )\n\n def __set_default_method_by_orga(\n self,\n id,\n payment_data,\n **kwargs\n ):\n \"\"\"set_default_method_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_default_method_by_orga(id, payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.set_default_method_by_orga = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/methods/default',\n 'operation_id': 'set_default_method_by_orga',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'payment_data',\n ],\n 'required': [\n 'id',\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_default_method_by_orga\n )\n\n def __set_max_credits_per_month_by_orga(\n self,\n id,\n wannabe_max_credits,\n **kwargs\n ):\n \"\"\"set_max_credits_per_month_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_max_credits_per_month_by_orga(id, wannabe_max_credits, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n wannabe_max_credits (WannabeMaxCredits):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['wannabe_max_credits'] = \\\n wannabe_max_credits\n return self.call_with_http_info(**kwargs)\n\n self.set_max_credits_per_month_by_orga = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/payments/monthlyinvoice/maxcredit',\n 'operation_id': 'set_max_credits_per_month_by_orga',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'wannabe_max_credits',\n ],\n 'required': [\n 'id',\n 'wannabe_max_credits',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'wannabe_max_credits':\n (WannabeMaxCredits,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n 'wannabe_max_credits': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_max_credits_per_month_by_orga\n )\n\n def __set_orga_avatar(\n self,\n id,\n **kwargs\n ):\n \"\"\"set_orga_avatar # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_orga_avatar(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n UrlView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.set_orga_avatar = _Endpoint(\n settings={\n 'response_type': (UrlView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/avatar',\n 'operation_id': 'set_orga_avatar',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__set_orga_avatar\n )\n\n def __undeploy_application_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"undeploy_application_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.undeploy_application_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.undeploy_application_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/instances',\n 'operation_id': 'undeploy_application_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__undeploy_application_by_orga_and_app_id\n )\n\n def __unlink_addon_from_application_by_orga_and_app_andd_addon_id(\n self,\n id,\n app_id,\n addon_id,\n **kwargs\n ):\n \"\"\"unlink_addon_from_application_by_orga_and_app_andd_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unlink_addon_from_application_by_orga_and_app_andd_addon_id(id, app_id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.unlink_addon_from_application_by_orga_and_app_andd_addon_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/addons/{addonId}',\n 'operation_id': 'unlink_addon_from_application_by_orga_and_app_andd_addon_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'addon_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__unlink_addon_from_application_by_orga_and_app_andd_addon_id\n )\n\n def __unmark_favourite_vhost_by_orga_and_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"unmark_favourite_vhost_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unmark_favourite_vhost_by_orga_and_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.unmark_favourite_vhost_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/vhosts/favourite',\n 'operation_id': 'unmark_favourite_vhost_by_orga_and_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__unmark_favourite_vhost_by_orga_and_app_id\n )\n\n def __update_addon_info(\n self,\n id,\n addon_id,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"update_addon_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_addon_info(id, addon_id, wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n addon_id (str):\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.update_addon_info = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addons/{addonId}',\n 'operation_id': 'update_addon_info',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'addon_id',\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'id',\n 'addon_id',\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'addon_id':\n (str,),\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'id': 'path',\n 'addon_id': 'path',\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_addon_info\n )\n\n def __update_consumer_by_orga(\n self,\n id,\n key,\n wannabe_o_auth1_consumer,\n **kwargs\n ):\n \"\"\"update_consumer_by_orga # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_consumer_by_orga(id, key, wannabe_o_auth1_consumer, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n key (str):\n wannabe_o_auth1_consumer (WannabeOAuth1Consumer):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['key'] = \\\n key\n kwargs['wannabe_o_auth1_consumer'] = \\\n wannabe_o_auth1_consumer\n return self.call_with_http_info(**kwargs)\n\n self.update_consumer_by_orga = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/consumers/{key}',\n 'operation_id': 'update_consumer_by_orga',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'key',\n 'wannabe_o_auth1_consumer',\n ],\n 'required': [\n 'id',\n 'key',\n 'wannabe_o_auth1_consumer',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'key':\n (str,),\n 'wannabe_o_auth1_consumer':\n (WannabeOAuth1Consumer,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'key': 'key',\n },\n 'location_map': {\n 'id': 'path',\n 'key': 'path',\n 'wannabe_o_auth1_consumer': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_consumer_by_orga\n )\n\n def __update_exposed_env_by_orga_and_app_id(\n self,\n id,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"update_exposed_env_by_orga_and_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_exposed_env_by_orga_and_app_id(id, app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n body (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.update_exposed_env_by_orga_and_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/applications/{appId}/exposed_env',\n 'operation_id': 'update_exposed_env_by_orga_and_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'required': [\n 'id',\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_exposed_env_by_orga_and_app_id\n )\n\n def __update_provider_infos(\n self,\n id,\n provider_id,\n wannabe_addon_provider_infos,\n **kwargs\n ):\n \"\"\"update_provider_infos # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_provider_infos(id, provider_id, wannabe_addon_provider_infos, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n provider_id (str):\n wannabe_addon_provider_infos (WannabeAddonProviderInfos):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonProviderInfoView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['provider_id'] = \\\n provider_id\n kwargs['wannabe_addon_provider_infos'] = \\\n wannabe_addon_provider_infos\n return self.call_with_http_info(**kwargs)\n\n self.update_provider_infos = _Endpoint(\n settings={\n 'response_type': (AddonProviderInfoView,),\n 'auth': [],\n 'endpoint_path': '/organisations/{id}/addonproviders/{providerId}',\n 'operation_id': 'update_provider_infos',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'provider_id',\n 'wannabe_addon_provider_infos',\n ],\n 'required': [\n 'id',\n 'provider_id',\n 'wannabe_addon_provider_infos',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'provider_id':\n (str,),\n 'wannabe_addon_provider_infos':\n (WannabeAddonProviderInfos,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'provider_id': 'providerId',\n },\n 'location_map': {\n 'id': 'path',\n 'provider_id': 'path',\n 'wannabe_addon_provider_infos': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_provider_infos\n )\n" }, { "alpha_fraction": 0.680942177772522, "alphanum_fraction": 0.6905781626701355, "avg_line_length": 22.350000381469727, "blob_id": "2e694ebfd368fe81f04398c0a01ed55c6c4d1347", "content_id": "949f43422901e0211d857683ceee87bf54444a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/test/test_summary.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.organisation_summary import OrganisationSummary\nfrom openapi_client.model.user_summary import UserSummary\nglobals()['OrganisationSummary'] = OrganisationSummary\nglobals()['UserSummary'] = UserSummary\nfrom openapi_client.model.summary import Summary\n\n\nclass TestSummary(unittest.TestCase):\n \"\"\"Summary unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSummary(self):\n \"\"\"Test Summary\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = Summary() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6748143434524536, "alphanum_fraction": 0.676980197429657, "avg_line_length": 25.702478408813477, "blob_id": "d7d56a7e421adaec739c9c8efaca1c6985317310", "content_id": "9811be5e1fafc7fde8f433817519032c4523ff26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3232, "license_type": "no_license", "max_line_length": 180, "num_lines": 121, "path": "/docs/DefaultApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.DefaultApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**get_blog_feed**](DefaultApi.md#get_blog_feed) | **GET** /newsfeeds/blog | \n[**get_engineering_feed**](DefaultApi.md#get_engineering_feed) | **GET** /newsfeeds/engineering | \n\n\n# **get_blog_feed**\n> get_blog_feed()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import default_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = default_api.DefaultApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_instance.get_blog_feed()\n except openapi_client.ApiException as e:\n print(\"Exception when calling DefaultApi->get_blog_feed: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: */*\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_engineering_feed**\n> get_engineering_feed()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import default_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = default_api.DefaultApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_instance.get_engineering_feed()\n except openapi_client.ApiException as e:\n print(\"Exception when calling DefaultApi->get_engineering_feed: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: */*\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.5735206007957458, "alphanum_fraction": 0.5743574500083923, "avg_line_length": 20.98423194885254, "blob_id": "e8f3d97f1672c915d81948c87ca22a42f112e22c", "content_id": "3eec3d47be72600f5318eb8584823519ef9f2721", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16730, "license_type": "no_license", "max_line_length": 84, "num_lines": 761, "path": "/test/test_organisation_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.organisation_api import OrganisationApi # noqa: E501\n\n\nclass TestOrganisationApi(unittest.TestCase):\n \"\"\"OrganisationApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = OrganisationApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_abort_addon_migration(self):\n \"\"\"Test case for abort_addon_migration\n\n \"\"\"\n pass\n\n def test_add_addon_tag_by_orga_and_addon_id(self):\n \"\"\"Test case for add_addon_tag_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_add_application_by_orga(self):\n \"\"\"Test case for add_application_by_orga\n\n \"\"\"\n pass\n\n def test_add_application_dependency_by_orga_and_app_id(self):\n \"\"\"Test case for add_application_dependency_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_add_application_tag_by_orga_and_app_id(self):\n \"\"\"Test case for add_application_tag_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_add_beta_tester(self):\n \"\"\"Test case for add_beta_tester\n\n \"\"\"\n pass\n\n def test_add_organisation_member(self):\n \"\"\"Test case for add_organisation_member\n\n \"\"\"\n pass\n\n def test_add_payment_method_by_orga(self):\n \"\"\"Test case for add_payment_method_by_orga\n\n \"\"\"\n pass\n\n def test_add_provider_feature(self):\n \"\"\"Test case for add_provider_feature\n\n \"\"\"\n pass\n\n def test_add_provider_plan(self):\n \"\"\"Test case for add_provider_plan\n\n \"\"\"\n pass\n\n def test_add_tcp_redir(self):\n \"\"\"Test case for add_tcp_redir\n\n \"\"\"\n pass\n\n def test_add_vhosts_by_orga_and_app_id(self):\n \"\"\"Test case for add_vhosts_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_buy_drops_by_orga(self):\n \"\"\"Test case for buy_drops_by_orga\n\n \"\"\"\n pass\n\n def test_cancel_application_deployment_for_orga(self):\n \"\"\"Test case for cancel_application_deployment_for_orga\n\n \"\"\"\n pass\n\n def test_change_plan_by_orga_and_addon_id(self):\n \"\"\"Test case for change_plan_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_choose_payment_provider_by_orga(self):\n \"\"\"Test case for choose_payment_provider_by_orga\n\n \"\"\"\n pass\n\n def test_create_consumer_by_orga(self):\n \"\"\"Test case for create_consumer_by_orga\n\n \"\"\"\n pass\n\n def test_create_organisation(self):\n \"\"\"Test case for create_organisation\n\n \"\"\"\n pass\n\n def test_create_provider(self):\n \"\"\"Test case for create_provider\n\n \"\"\"\n pass\n\n def test_delete_addon_tag_by_orga_and_addon_id(self):\n \"\"\"Test case for delete_addon_tag_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_delete_application_by_orga_and_app_id(self):\n \"\"\"Test case for delete_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_delete_application_dependency_by_orga_and_app_id(self):\n \"\"\"Test case for delete_application_dependency_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_delete_application_tag_by_orga_and_app_id(self):\n \"\"\"Test case for delete_application_tag_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_delete_consumer_by_orga(self):\n \"\"\"Test case for delete_consumer_by_orga\n\n \"\"\"\n pass\n\n def test_delete_organisation(self):\n \"\"\"Test case for delete_organisation\n\n \"\"\"\n pass\n\n def test_delete_payment_method_by_orga(self):\n \"\"\"Test case for delete_payment_method_by_orga\n\n \"\"\"\n pass\n\n def test_delete_provider(self):\n \"\"\"Test case for delete_provider\n\n \"\"\"\n pass\n\n def test_delete_provider_feature(self):\n \"\"\"Test case for delete_provider_feature\n\n \"\"\"\n pass\n\n def test_delete_provider_plan(self):\n \"\"\"Test case for delete_provider_plan\n\n \"\"\"\n pass\n\n def test_delete_provider_plan_feature(self):\n \"\"\"Test case for delete_provider_plan_feature\n\n \"\"\"\n pass\n\n def test_delete_purchase_order_by_orga(self):\n \"\"\"Test case for delete_purchase_order_by_orga\n\n \"\"\"\n pass\n\n def test_delete_recurrent_payment_by_orga(self):\n \"\"\"Test case for delete_recurrent_payment_by_orga\n\n \"\"\"\n pass\n\n def test_deprovision_addon_by_orga_and_addon_id(self):\n \"\"\"Test case for deprovision_addon_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_edit_application_by_orga_and_app_id(self):\n \"\"\"Test case for edit_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_edit_application_env_by_orga_and_app_id_and_env_name(self):\n \"\"\"Test case for edit_application_env_by_orga_and_app_id_and_env_name\n\n \"\"\"\n pass\n\n def test_edit_application_environment_by_orga_and_app_id(self):\n \"\"\"Test case for edit_application_environment_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_edit_organisation(self):\n \"\"\"Test case for edit_organisation\n\n \"\"\"\n pass\n\n def test_edit_organisation_member(self):\n \"\"\"Test case for edit_organisation_member\n\n \"\"\"\n pass\n\n def test_edit_provider_plan(self):\n \"\"\"Test case for edit_provider_plan\n\n \"\"\"\n pass\n\n def test_edit_provider_plan_feature(self):\n \"\"\"Test case for edit_provider_plan_feature\n\n \"\"\"\n pass\n\n def test_get_addon_by_orga_and_addon_id(self):\n \"\"\"Test case for get_addon_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_get_addon_env_by_orga_and_addon_id(self):\n \"\"\"Test case for get_addon_env_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_get_addon_instance(self):\n \"\"\"Test case for get_addon_instance\n\n \"\"\"\n pass\n\n def test_get_addon_instances(self):\n \"\"\"Test case for get_addon_instances\n\n \"\"\"\n pass\n\n def test_get_addon_migration(self):\n \"\"\"Test case for get_addon_migration\n\n \"\"\"\n pass\n\n def test_get_addon_migrations(self):\n \"\"\"Test case for get_addon_migrations\n\n \"\"\"\n pass\n\n def test_get_addon_sso_data_for_orga(self):\n \"\"\"Test case for get_addon_sso_data_for_orga\n\n \"\"\"\n pass\n\n def test_get_addon_tags_by_orga_id_and_addon_id(self):\n \"\"\"Test case for get_addon_tags_by_orga_id_and_addon_id\n\n \"\"\"\n pass\n\n def test_get_addons_by_orga_id(self):\n \"\"\"Test case for get_addons_by_orga_id\n\n \"\"\"\n pass\n\n def test_get_addons_linked_to_application_by_orga_and_app_id(self):\n \"\"\"Test case for get_addons_linked_to_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_all_applications_by_orga(self):\n \"\"\"Test case for get_all_applications_by_orga\n\n \"\"\"\n pass\n\n def test_get_amount_for_orga(self):\n \"\"\"Test case for get_amount_for_orga\n\n \"\"\"\n pass\n\n def test_get_application_branches_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_branches_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_dependencies_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_dependencies_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_dependencies_env_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_dependencies_env_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_dependents_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_dependents_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_deployment_for_orga(self):\n \"\"\"Test case for get_application_deployment_for_orga\n\n \"\"\"\n pass\n\n def test_get_application_deployments_for_orga(self):\n \"\"\"Test case for get_application_deployments_for_orga\n\n \"\"\"\n pass\n\n def test_get_application_env_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_env_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_instance_by_orga_and_app_and_instance_id(self):\n \"\"\"Test case for get_application_instance_by_orga_and_app_and_instance_id\n\n \"\"\"\n pass\n\n def test_get_application_instances_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_instances_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_application_tags_by_orga_and_app_id(self):\n \"\"\"Test case for get_application_tags_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_applications_linked_to_addon_by_orga_and_addon_id(self):\n \"\"\"Test case for get_applications_linked_to_addon_by_orga_and_addon_id\n\n \"\"\"\n pass\n\n def test_get_consumer_by_orga(self):\n \"\"\"Test case for get_consumer_by_orga\n\n \"\"\"\n pass\n\n def test_get_consumer_secret_by_orga(self):\n \"\"\"Test case for get_consumer_secret_by_orga\n\n \"\"\"\n pass\n\n def test_get_consumers_by_orga(self):\n \"\"\"Test case for get_consumers_by_orga\n\n \"\"\"\n pass\n\n def test_get_consumptions_for_orga(self):\n \"\"\"Test case for get_consumptions_for_orga\n\n \"\"\"\n pass\n\n def test_get_default_method_by_orga(self):\n \"\"\"Test case for get_default_method_by_orga\n\n \"\"\"\n pass\n\n def test_get_deployments_for_all_apps(self):\n \"\"\"Test case for get_deployments_for_all_apps\n\n \"\"\"\n pass\n\n def test_get_env_of_addons_linked_to_application_by_orga_and_app_id(self):\n \"\"\"Test case for get_env_of_addons_linked_to_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_exposed_env_by_orga_and_app_id(self):\n \"\"\"Test case for get_exposed_env_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_favourite_vhost_by_orga_and_app_id(self):\n \"\"\"Test case for get_favourite_vhost_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_get_instances_for_all_apps_for_orga(self):\n \"\"\"Test case for get_instances_for_all_apps_for_orga\n\n \"\"\"\n pass\n\n def test_get_invoice_by_orga(self):\n \"\"\"Test case for get_invoice_by_orga\n\n \"\"\"\n pass\n\n def test_get_invoices_by_orga(self):\n \"\"\"Test case for get_invoices_by_orga\n\n \"\"\"\n pass\n\n def test_get_monthly_invoice_by_orga(self):\n \"\"\"Test case for get_monthly_invoice_by_orga\n\n \"\"\"\n pass\n\n def test_get_namespaces(self):\n \"\"\"Test case for get_namespaces\n\n \"\"\"\n pass\n\n def test_get_new_setup_intent_by_orga(self):\n \"\"\"Test case for get_new_setup_intent_by_orga\n\n \"\"\"\n pass\n\n def test_get_organisation(self):\n \"\"\"Test case for get_organisation\n\n \"\"\"\n pass\n\n def test_get_organisation_members(self):\n \"\"\"Test case for get_organisation_members\n\n \"\"\"\n pass\n\n def test_get_payment_info_for_orga(self):\n \"\"\"Test case for get_payment_info_for_orga\n\n \"\"\"\n pass\n\n def test_get_pdf_invoice_by_orga(self):\n \"\"\"Test case for get_pdf_invoice_by_orga\n\n \"\"\"\n pass\n\n def test_get_price_with_tax_by_orga(self):\n \"\"\"Test case for get_price_with_tax_by_orga\n\n \"\"\"\n pass\n\n def test_get_provider_features(self):\n \"\"\"Test case for get_provider_features\n\n \"\"\"\n pass\n\n def test_get_provider_info(self):\n \"\"\"Test case for get_provider_info\n\n \"\"\"\n pass\n\n def test_get_provider_plan(self):\n \"\"\"Test case for get_provider_plan\n\n \"\"\"\n pass\n\n def test_get_provider_plans(self):\n \"\"\"Test case for get_provider_plans\n\n \"\"\"\n pass\n\n def test_get_provider_tags(self):\n \"\"\"Test case for get_provider_tags\n\n \"\"\"\n pass\n\n def test_get_providers_info(self):\n \"\"\"Test case for get_providers_info\n\n \"\"\"\n pass\n\n def test_get_recurrent_payment_by_orga(self):\n \"\"\"Test case for get_recurrent_payment_by_orga\n\n \"\"\"\n pass\n\n def test_get_sso_data_for_orga(self):\n \"\"\"Test case for get_sso_data_for_orga\n\n \"\"\"\n pass\n\n def test_get_stripe_token_by_orga(self):\n \"\"\"Test case for get_stripe_token_by_orga\n\n \"\"\"\n pass\n\n def test_get_tcp_redirs(self):\n \"\"\"Test case for get_tcp_redirs\n\n \"\"\"\n pass\n\n def test_get_unpaid_invoices_by_orga(self):\n \"\"\"Test case for get_unpaid_invoices_by_orga\n\n \"\"\"\n pass\n\n def test_get_unpaid_invoices_by_orga1(self):\n \"\"\"Test case for get_unpaid_invoices_by_orga1\n\n \"\"\"\n pass\n\n def test_get_user_organisationss(self):\n \"\"\"Test case for get_user_organisationss\n\n \"\"\"\n pass\n\n def test_get_vhosts_by_orga_and_app_id(self):\n \"\"\"Test case for get_vhosts_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_link_addon_to_application_by_orga_and_app_id(self):\n \"\"\"Test case for link_addon_to_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_mark_favourite_vhost_by_orga_and_app_id(self):\n \"\"\"Test case for mark_favourite_vhost_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_preorder_addon_by_orga_id(self):\n \"\"\"Test case for preorder_addon_by_orga_id\n\n \"\"\"\n pass\n\n def test_preorder_migration(self):\n \"\"\"Test case for preorder_migration\n\n \"\"\"\n pass\n\n def test_provision_addon_by_orga_id(self):\n \"\"\"Test case for provision_addon_by_orga_id\n\n \"\"\"\n pass\n\n def test_redeploy_application_by_orga_and_app_id(self):\n \"\"\"Test case for redeploy_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_remove_application_env_by_orga_and_app_id_and_env_name(self):\n \"\"\"Test case for remove_application_env_by_orga_and_app_id_and_env_name\n\n \"\"\"\n pass\n\n def test_remove_organisation_member(self):\n \"\"\"Test case for remove_organisation_member\n\n \"\"\"\n pass\n\n def test_remove_tcp_redir(self):\n \"\"\"Test case for remove_tcp_redir\n\n \"\"\"\n pass\n\n def test_remove_vhosts_by_orga_and_app_id(self):\n \"\"\"Test case for remove_vhosts_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_replace_addon_tags(self):\n \"\"\"Test case for replace_addon_tags\n\n \"\"\"\n pass\n\n def test_replace_application_tags(self):\n \"\"\"Test case for replace_application_tags\n\n \"\"\"\n pass\n\n def test_set_application_branch_by_orga_and_app_id(self):\n \"\"\"Test case for set_application_branch_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_set_build_instance_flavor_by_orga_and_app_id(self):\n \"\"\"Test case for set_build_instance_flavor_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_set_default_method_by_orga(self):\n \"\"\"Test case for set_default_method_by_orga\n\n \"\"\"\n pass\n\n def test_set_max_credits_per_month_by_orga(self):\n \"\"\"Test case for set_max_credits_per_month_by_orga\n\n \"\"\"\n pass\n\n def test_set_orga_avatar(self):\n \"\"\"Test case for set_orga_avatar\n\n \"\"\"\n pass\n\n def test_undeploy_application_by_orga_and_app_id(self):\n \"\"\"Test case for undeploy_application_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_unlink_addon_from_application_by_orga_and_app_andd_addon_id(self):\n \"\"\"Test case for unlink_addon_from_application_by_orga_and_app_andd_addon_id\n\n \"\"\"\n pass\n\n def test_unmark_favourite_vhost_by_orga_and_app_id(self):\n \"\"\"Test case for unmark_favourite_vhost_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_update_addon_info(self):\n \"\"\"Test case for update_addon_info\n\n \"\"\"\n pass\n\n def test_update_consumer_by_orga(self):\n \"\"\"Test case for update_consumer_by_orga\n\n \"\"\"\n pass\n\n def test_update_exposed_env_by_orga_and_app_id(self):\n \"\"\"Test case for update_exposed_env_by_orga_and_app_id\n\n \"\"\"\n pass\n\n def test_update_provider_infos(self):\n \"\"\"Test case for update_provider_infos\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5703905820846558, "alphanum_fraction": 0.5703905820846558, "avg_line_length": 51.33333206176758, "blob_id": "ea9bbc851410e78c9aeb199f08fcc61d35723d20", "content_id": "ec2f82ecc45abc52e11369a40d0bc5031c60a97f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 168, "num_lines": 21, "path": "/docs/InstanceView.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# InstanceView\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**type** | **str** | | [optional] \n**version** | **str** | | [optional] \n**variant** | [**InstanceVariantView**](InstanceVariantView.md) | | [optional] \n**min_instances** | **int** | | [optional] \n**max_instances** | **int** | | [optional] \n**max_allowed_instances** | **int** | | [optional] \n**min_flavor** | [**FlavorView**](FlavorView.md) | | [optional] \n**max_flavor** | [**FlavorView**](FlavorView.md) | | [optional] \n**flavors** | [**[FlavorView]**](FlavorView.md) | | [optional] \n**default_env** | **{str: (str,)}** | | [optional] \n**lifetime** | **str** | | [optional] \n**instance_and_version** | **str** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7015384435653687, "avg_line_length": 23.375, "blob_id": "a3286884e2685b4b6c4143c9187c8e8ff02972b1", "content_id": "0d88aa722a5b15851912e032801465a7441ccd37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 975, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/test/test_recurrent_payment_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.owner_view import OwnerView\nfrom openapi_client.model.user_view import UserView\nglobals()['OwnerView'] = OwnerView\nglobals()['UserView'] = UserView\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\n\n\nclass TestRecurrentPaymentView(unittest.TestCase):\n \"\"\"RecurrentPaymentView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testRecurrentPaymentView(self):\n \"\"\"Test RecurrentPaymentView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = RecurrentPaymentView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5319512486457825, "alphanum_fraction": 0.5368292927742004, "avg_line_length": 17.061674118041992, "blob_id": "ed0836968acdb441d2bec56de1c7b36276a9f77a", "content_id": "15d7322b20b7fcdd6c6557467514221f2721dc4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4100, "license_type": "no_license", "max_line_length": 72, "num_lines": 227, "path": "/test/test_user_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.user_api import UserApi # noqa: E501\n\n\nclass TestUserApi(unittest.TestCase):\n \"\"\"UserApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = UserApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_ask_for_password_reset_via_form(self):\n \"\"\"Test case for ask_for_password_reset_via_form\n\n \"\"\"\n pass\n\n def test_authorize_paypal_transaction(self):\n \"\"\"Test case for authorize_paypal_transaction\n\n \"\"\"\n pass\n\n def test_cancel_paypal_transaction(self):\n \"\"\"Test case for cancel_paypal_transaction\n\n \"\"\"\n pass\n\n def test_confirm_password_reset_request(self):\n \"\"\"Test case for confirm_password_reset_request\n\n \"\"\"\n pass\n\n def test_create_user_from_form(self):\n \"\"\"Test case for create_user_from_form\n\n \"\"\"\n pass\n\n def test_delete_github_link(self):\n \"\"\"Test case for delete_github_link\n\n \"\"\"\n pass\n\n def test_finsih_github_signup(self):\n \"\"\"Test case for finsih_github_signup\n\n \"\"\"\n pass\n\n def test_get_applications(self):\n \"\"\"Test case for get_applications\n\n \"\"\"\n pass\n\n def test_get_env(self):\n \"\"\"Test case for get_env\n\n \"\"\"\n pass\n\n def test_get_git_info(self):\n \"\"\"Test case for get_git_info\n\n \"\"\"\n pass\n\n def test_get_github(self):\n \"\"\"Test case for get_github\n\n \"\"\"\n pass\n\n def test_get_github_applications(self):\n \"\"\"Test case for get_github_applications\n\n \"\"\"\n pass\n\n def test_get_github_callback(self):\n \"\"\"Test case for get_github_callback\n\n \"\"\"\n pass\n\n def test_get_github_emails(self):\n \"\"\"Test case for get_github_emails\n\n \"\"\"\n pass\n\n def test_get_github_keys(self):\n \"\"\"Test case for get_github_keys\n\n \"\"\"\n pass\n\n def test_get_github_link(self):\n \"\"\"Test case for get_github_link\n\n \"\"\"\n pass\n\n def test_get_github_login(self):\n \"\"\"Test case for get_github_login\n\n \"\"\"\n pass\n\n def test_get_github_username(self):\n \"\"\"Test case for get_github_username\n\n \"\"\"\n pass\n\n def test_get_login_form(self):\n \"\"\"Test case for get_login_form\n\n \"\"\"\n pass\n\n def test_get_login_form1(self):\n \"\"\"Test case for get_login_form1\n\n \"\"\"\n pass\n\n def test_get_password_forgotten_form(self):\n \"\"\"Test case for get_password_forgotten_form\n\n \"\"\"\n pass\n\n def test_get_signup_form(self):\n \"\"\"Test case for get_signup_form\n\n \"\"\"\n pass\n\n def test_get_signup_form1(self):\n \"\"\"Test case for get_signup_form1\n\n \"\"\"\n pass\n\n def test_get_user_by_id(self):\n \"\"\"Test case for get_user_by_id\n\n \"\"\"\n pass\n\n def test_github_signup(self):\n \"\"\"Test case for github_signup\n\n \"\"\"\n pass\n\n def test_login(self):\n \"\"\"Test case for login\n\n \"\"\"\n pass\n\n def test_login1(self):\n \"\"\"Test case for login1\n\n \"\"\"\n pass\n\n def test_mfa_login(self):\n \"\"\"Test case for mfa_login\n\n \"\"\"\n pass\n\n def test_mfa_login1(self):\n \"\"\"Test case for mfa_login1\n\n \"\"\"\n pass\n\n def test_post_github_redeploy(self):\n \"\"\"Test case for post_github_redeploy\n\n \"\"\"\n pass\n\n def test_reset_password_forgotten(self):\n \"\"\"Test case for reset_password_forgotten\n\n \"\"\"\n pass\n\n def test_update_env(self):\n \"\"\"Test case for update_env\n\n \"\"\"\n pass\n\n def test_update_invoice(self):\n \"\"\"Test case for update_invoice\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7020584940910339, "avg_line_length": 23.289474487304688, "blob_id": "c7817d3094f7db4e27642ccec0be6e9f9b3d7b3b", "content_id": "565ef165d2443aecd44a9ac78949654c48c64217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 923, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/test/test_super_nova_instance_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.super_nova_flavor import SuperNovaFlavor\nglobals()['SuperNovaFlavor'] = SuperNovaFlavor\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\n\n\nclass TestSuperNovaInstanceView(unittest.TestCase):\n \"\"\"SuperNovaInstanceView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSuperNovaInstanceView(self):\n \"\"\"Test SuperNovaInstanceView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = SuperNovaInstanceView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6766334176063538, "alphanum_fraction": 0.6943521499633789, "avg_line_length": 22.763158798217773, "blob_id": "eaa615470e53c4998ee9e1df57ddaecc766996ac", "content_id": "21d143c217eb9a02a031a06ea7596a42d5d70b32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 903, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/test/test_o_auth1_consumer_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.o_auth_rights_view import OAuthRightsView\nglobals()['OAuthRightsView'] = OAuthRightsView\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\n\n\nclass TestOAuth1ConsumerView(unittest.TestCase):\n \"\"\"OAuth1ConsumerView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOAuth1ConsumerView(self):\n \"\"\"Test OAuth1ConsumerView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = OAuth1ConsumerView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6866666674613953, "alphanum_fraction": 0.6966666579246521, "avg_line_length": 22.6842098236084, "blob_id": "e87cbf8315781efada0f89a913388bcef5d1b23c", "content_id": "32cbc90732cd678e10152a2af64c684b10495afc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 900, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/test/test_wannabe_application.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.wannabe_oauth_app import WannabeOauthApp\nglobals()['WannabeOauthApp'] = WannabeOauthApp\nfrom openapi_client.model.wannabe_application import WannabeApplication\n\n\nclass TestWannabeApplication(unittest.TestCase):\n \"\"\"WannabeApplication unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testWannabeApplication(self):\n \"\"\"Test WannabeApplication\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = WannabeApplication() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5456810593605042, "alphanum_fraction": 0.5456810593605042, "avg_line_length": 47.08000183105469, "blob_id": "26d0d6184dd6f508e4be38e4936968be0ef6aa34", "content_id": "528666a59b97a895d86f4591a16a4847847bec64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1204, "license_type": "no_license", "max_line_length": 168, "num_lines": 25, "path": "/docs/AddonProviderInfoFullView.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# AddonProviderInfoFullView\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**name** | **str** | | [optional] \n**website** | **str** | | [optional] \n**support_email** | **str** | | [optional] \n**google_plus_name** | **str** | | [optional] \n**twitter_name** | **str** | | [optional] \n**analytics_id** | **str** | | [optional] \n**short_desc** | **str** | | [optional] \n**long_desc** | **str** | | [optional] \n**logo_url** | **str** | | [optional] \n**status** | **str** | | [optional] \n**open_in_new_tab** | **bool** | | [optional] \n**can_upgrade** | **bool** | | [optional] \n**regions** | **[str]** | | [optional] \n**plans** | [**[AddonPlanView]**](AddonPlanView.md) | | [optional] \n**features** | [**[AddonFeatureView]**](AddonFeatureView.md) | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.42907971143722534, "alphanum_fraction": 0.4301290214061737, "avg_line_length": 35.37067413330078, "blob_id": "fc063f0a78c8d8fe0725bd70b7aafe8a591af40f", "content_id": "13d200374bcc3b32d02e18a5141a8e7a8ba230f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441249, "license_type": "no_license", "max_line_length": 126, "num_lines": 12132, "path": "/openapi_client/api/self_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.addon_environment_view import AddonEnvironmentView\nfrom openapi_client.model.addon_sso_data import AddonSSOData\nfrom openapi_client.model.addon_view import AddonView\nfrom openapi_client.model.application_view import ApplicationView\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom openapi_client.model.cli_token_view import CliTokenView\nfrom openapi_client.model.deployment_view import DeploymentView\nfrom openapi_client.model.drop_count_view import DropCountView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom openapi_client.model.linked_addon_environment_view import LinkedAddonEnvironmentView\nfrom openapi_client.model.mfa_recovery_code import MFARecoveryCode\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.next_in_payment_flow import NextInPaymentFlow\nfrom openapi_client.model.o_auth1_access_token_view import OAuth1AccessTokenView\nfrom openapi_client.model.o_auth1_consumer_view import OAuth1ConsumerView\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_info_view import PaymentInfoView\nfrom openapi_client.model.payment_method_view import PaymentMethodView\nfrom openapi_client.model.payment_provider_selection import PaymentProviderSelection\nfrom openapi_client.model.price_with_tax_info import PriceWithTaxInfo\nfrom openapi_client.model.recurrent_payment_view import RecurrentPaymentView\nfrom openapi_client.model.secret_view import SecretView\nfrom openapi_client.model.ssh_key_view import SshKeyView\nfrom openapi_client.model.summary import Summary\nfrom openapi_client.model.super_nova_instance_map import SuperNovaInstanceMap\nfrom openapi_client.model.super_nova_instance_view import SuperNovaInstanceView\nfrom openapi_client.model.url_view import UrlView\nfrom openapi_client.model.user_view import UserView\nfrom openapi_client.model.vhost_view import VhostView\nfrom openapi_client.model.wanna_buy_package import WannaBuyPackage\nfrom openapi_client.model.wannabe_addon_provision import WannabeAddonProvision\nfrom openapi_client.model.wannabe_application import WannabeApplication\nfrom openapi_client.model.wannabe_branch import WannabeBranch\nfrom openapi_client.model.wannabe_build_flavor import WannabeBuildFlavor\nfrom openapi_client.model.wannabe_mfa_creds import WannabeMFACreds\nfrom openapi_client.model.wannabe_mfa_fav import WannabeMFAFav\nfrom openapi_client.model.wannabe_max_credits import WannabeMaxCredits\nfrom openapi_client.model.wannabe_o_auth1_consumer import WannabeOAuth1Consumer\nfrom openapi_client.model.wannabe_password import WannabePassword\nfrom openapi_client.model.wannabe_plan_change import WannabePlanChange\nfrom openapi_client.model.wannabe_user import WannabeUser\nfrom openapi_client.model.wannabe_value import WannabeValue\n\n\nclass SelfApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __add_email_address(\n self,\n email,\n **kwargs\n ):\n \"\"\"add_email_address # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_email_address(email, async_req=True)\n >>> result = thread.get()\n\n Args:\n email (str):\n\n Keyword Args:\n body (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['email'] = \\\n email\n return self.call_with_http_info(**kwargs)\n\n self.add_email_address = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/emails/{email}',\n 'operation_id': 'add_email_address',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'email',\n 'body',\n ],\n 'required': [\n 'email',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'email':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'email': 'email',\n },\n 'location_map': {\n 'email': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_email_address\n )\n\n def __add_self_addon_tag_by_addon_id(\n self,\n addon_id,\n tag,\n **kwargs\n ):\n \"\"\"add_self_addon_tag_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_addon_tag_by_addon_id(addon_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.add_self_addon_tag_by_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/tags/{tag}',\n 'operation_id': 'add_self_addon_tag_by_addon_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'tag',\n ],\n 'required': [\n 'addon_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_self_addon_tag_by_addon_id\n )\n\n def __add_self_application(\n self,\n wannabe_application,\n **kwargs\n ):\n \"\"\"add_self_application # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_application(wannabe_application, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_application (WannabeApplication):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_application'] = \\\n wannabe_application\n return self.call_with_http_info(**kwargs)\n\n self.add_self_application = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications',\n 'operation_id': 'add_self_application',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_application',\n ],\n 'required': [\n 'wannabe_application',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_application':\n (WannabeApplication,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_application': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_self_application\n )\n\n def __add_self_application_dependency_by_app_id(\n self,\n app_id,\n dependency_id,\n **kwargs\n ):\n \"\"\"add_self_application_dependency_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_application_dependency_by_app_id(app_id, dependency_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n dependency_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['dependency_id'] = \\\n dependency_id\n return self.call_with_http_info(**kwargs)\n\n self.add_self_application_dependency_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/dependencies/{dependencyId}',\n 'operation_id': 'add_self_application_dependency_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'dependency_id',\n ],\n 'required': [\n 'app_id',\n 'dependency_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'dependency_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'dependency_id': 'dependencyId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'dependency_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_self_application_dependency_by_app_id\n )\n\n def __add_self_application_tag_by_app_id(\n self,\n app_id,\n tag,\n **kwargs\n ):\n \"\"\"add_self_application_tag_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_application_tag_by_app_id(app_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.add_self_application_tag_by_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/tags/{tag}',\n 'operation_id': 'add_self_application_tag_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'tag',\n ],\n 'required': [\n 'app_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'app_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_self_application_tag_by_app_id\n )\n\n def __add_self_payment_method(\n self,\n payment_data,\n **kwargs\n ):\n \"\"\"add_self_payment_method # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_payment_method(payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentMethodView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.add_self_payment_method = _Endpoint(\n settings={\n 'response_type': (PaymentMethodView,),\n 'auth': [],\n 'endpoint_path': '/self/payments/methods',\n 'operation_id': 'add_self_payment_method',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'payment_data',\n ],\n 'required': [\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__add_self_payment_method\n )\n\n def __add_self_vhost_by_app_id(\n self,\n app_id,\n domain,\n **kwargs\n ):\n \"\"\"add_self_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_self_vhost_by_app_id(app_id, domain, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n domain (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['domain'] = \\\n domain\n return self.call_with_http_info(**kwargs)\n\n self.add_self_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts/{domain}',\n 'operation_id': 'add_self_vhost_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'domain',\n ],\n 'required': [\n 'app_id',\n 'domain',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'domain':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'domain': 'domain',\n },\n 'location_map': {\n 'app_id': 'path',\n 'domain': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_self_vhost_by_app_id\n )\n\n def __add_ssh_key(\n self,\n key,\n **kwargs\n ):\n \"\"\"add_ssh_key # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.add_ssh_key(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.add_ssh_key = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/keys/{key}',\n 'operation_id': 'add_ssh_key',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__add_ssh_key\n )\n\n def __buy_self_drops(\n self,\n wanna_buy_package,\n **kwargs\n ):\n \"\"\"buy_self_drops # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.buy_self_drops(wanna_buy_package, async_req=True)\n >>> result = thread.get()\n\n Args:\n wanna_buy_package (WannaBuyPackage):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wanna_buy_package'] = \\\n wanna_buy_package\n return self.call_with_http_info(**kwargs)\n\n self.buy_self_drops = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/self/payments/billings',\n 'operation_id': 'buy_self_drops',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wanna_buy_package',\n ],\n 'required': [\n 'wanna_buy_package',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wanna_buy_package':\n (WannaBuyPackage,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wanna_buy_package': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__buy_self_drops\n )\n\n def __cancel_deploy(\n self,\n app_id,\n deployment_id,\n **kwargs\n ):\n \"\"\"cancel_deploy # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.cancel_deploy(app_id, deployment_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n deployment_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['deployment_id'] = \\\n deployment_id\n return self.call_with_http_info(**kwargs)\n\n self.cancel_deploy = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/deployments/{deploymentId}/instances',\n 'operation_id': 'cancel_deploy',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'deployment_id',\n ],\n 'required': [\n 'app_id',\n 'deployment_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'deployment_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__cancel_deploy\n )\n\n def __change_self_addon_plan_by_addon_id(\n self,\n addon_id,\n wannabe_plan_change,\n **kwargs\n ):\n \"\"\"change_self_addon_plan_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.change_self_addon_plan_by_addon_id(addon_id, wannabe_plan_change, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n wannabe_plan_change (WannabePlanChange):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_plan_change'] = \\\n wannabe_plan_change\n return self.call_with_http_info(**kwargs)\n\n self.change_self_addon_plan_by_addon_id = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/plan',\n 'operation_id': 'change_self_addon_plan_by_addon_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'wannabe_plan_change',\n ],\n 'required': [\n 'addon_id',\n 'wannabe_plan_change',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'wannabe_plan_change':\n (WannabePlanChange,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'wannabe_plan_change': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__change_self_addon_plan_by_addon_id\n )\n\n def __change_user_password(\n self,\n wannabe_password,\n **kwargs\n ):\n \"\"\"change_user_password # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.change_user_password(wannabe_password, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_password (WannabePassword):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_password'] = \\\n wannabe_password\n return self.call_with_http_info(**kwargs)\n\n self.change_user_password = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/change_password',\n 'operation_id': 'change_user_password',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_password',\n ],\n 'required': [\n 'wannabe_password',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_password':\n (WannabePassword,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_password': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__change_user_password\n )\n\n def __choose_self_payment_provider(\n self,\n bid,\n payment_provider_selection,\n **kwargs\n ):\n \"\"\"choose_self_payment_provider # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.choose_self_payment_provider(bid, payment_provider_selection, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n payment_provider_selection (PaymentProviderSelection):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n NextInPaymentFlow\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n kwargs['payment_provider_selection'] = \\\n payment_provider_selection\n return self.call_with_http_info(**kwargs)\n\n self.choose_self_payment_provider = _Endpoint(\n settings={\n 'response_type': (NextInPaymentFlow,),\n 'auth': [],\n 'endpoint_path': '/self/payments/billings/{bid}',\n 'operation_id': 'choose_self_payment_provider',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'payment_provider_selection',\n ],\n 'required': [\n 'bid',\n 'payment_provider_selection',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'payment_provider_selection':\n (PaymentProviderSelection,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n 'payment_provider_selection': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__choose_self_payment_provider\n )\n\n def __create_mfa(\n self,\n kind,\n **kwargs\n ):\n \"\"\"create_mfa # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_mfa(kind, async_req=True)\n >>> result = thread.get()\n\n Args:\n kind (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['kind'] = \\\n kind\n return self.call_with_http_info(**kwargs)\n\n self.create_mfa = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/mfa/{kind}',\n 'operation_id': 'create_mfa',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'kind',\n ],\n 'required': [\n 'kind',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'kind':\n (str,),\n },\n 'attribute_map': {\n 'kind': 'kind',\n },\n 'location_map': {\n 'kind': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__create_mfa\n )\n\n def __create_self_consumer(\n self,\n wannabe_o_auth1_consumer,\n **kwargs\n ):\n \"\"\"create_self_consumer # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_self_consumer(wannabe_o_auth1_consumer, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_o_auth1_consumer (WannabeOAuth1Consumer):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_o_auth1_consumer'] = \\\n wannabe_o_auth1_consumer\n return self.call_with_http_info(**kwargs)\n\n self.create_self_consumer = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/self/consumers',\n 'operation_id': 'create_self_consumer',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_o_auth1_consumer',\n ],\n 'required': [\n 'wannabe_o_auth1_consumer',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_o_auth1_consumer':\n (WannabeOAuth1Consumer,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_o_auth1_consumer': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__create_self_consumer\n )\n\n def __delete_mfa(\n self,\n kind,\n **kwargs\n ):\n \"\"\"delete_mfa # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_mfa(kind, async_req=True)\n >>> result = thread.get()\n\n Args:\n kind (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['kind'] = \\\n kind\n return self.call_with_http_info(**kwargs)\n\n self.delete_mfa = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/mfa/{kind}',\n 'operation_id': 'delete_mfa',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'kind',\n ],\n 'required': [\n 'kind',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'kind':\n (str,),\n },\n 'attribute_map': {\n 'kind': 'kind',\n },\n 'location_map': {\n 'kind': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_mfa\n )\n\n def __delete_self_addon_tag_by_addon_id(\n self,\n addon_id,\n tag,\n **kwargs\n ):\n \"\"\"delete_self_addon_tag_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_addon_tag_by_addon_id(addon_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_addon_tag_by_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/tags/{tag}',\n 'operation_id': 'delete_self_addon_tag_by_addon_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'tag',\n ],\n 'required': [\n 'addon_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_addon_tag_by_addon_id\n )\n\n def __delete_self_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"delete_self_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_application_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}',\n 'operation_id': 'delete_self_application_by_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_application_by_app_id\n )\n\n def __delete_self_application_dependency_by_app_id(\n self,\n app_id,\n dependency_id,\n **kwargs\n ):\n \"\"\"delete_self_application_dependency_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_application_dependency_by_app_id(app_id, dependency_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n dependency_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['dependency_id'] = \\\n dependency_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_application_dependency_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/dependencies/{dependencyId}',\n 'operation_id': 'delete_self_application_dependency_by_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'dependency_id',\n ],\n 'required': [\n 'app_id',\n 'dependency_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'dependency_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'dependency_id': 'dependencyId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'dependency_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_application_dependency_by_app_id\n )\n\n def __delete_self_application_tag_app_id(\n self,\n app_id,\n tag,\n **kwargs\n ):\n \"\"\"delete_self_application_tag_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_application_tag_app_id(app_id, tag, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n tag (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['tag'] = \\\n tag\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_application_tag_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/tags/{tag}',\n 'operation_id': 'delete_self_application_tag_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'tag',\n ],\n 'required': [\n 'app_id',\n 'tag',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'tag':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'tag': 'tag',\n },\n 'location_map': {\n 'app_id': 'path',\n 'tag': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_application_tag_app_id\n )\n\n def __delete_self_card(\n self,\n m_id,\n **kwargs\n ):\n \"\"\"delete_self_card # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_card(m_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n m_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['m_id'] = \\\n m_id\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_card = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/payments/methods/{mId}',\n 'operation_id': 'delete_self_card',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'm_id',\n ],\n 'required': [\n 'm_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'm_id':\n (str,),\n },\n 'attribute_map': {\n 'm_id': 'mId',\n },\n 'location_map': {\n 'm_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_card\n )\n\n def __delete_self_consumer(\n self,\n key,\n **kwargs\n ):\n \"\"\"delete_self_consumer # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_consumer(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_consumer = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/consumers/{key}',\n 'operation_id': 'delete_self_consumer',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_consumer\n )\n\n def __delete_self_purchase_order(\n self,\n bid,\n **kwargs\n ):\n \"\"\"delete_self_purchase_order # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_purchase_order(bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_purchase_order = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/payments/billings/{bid}',\n 'operation_id': 'delete_self_purchase_order',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n ],\n 'required': [\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_purchase_order\n )\n\n def __delete_self_recurrent_payment(\n self,\n **kwargs\n ):\n \"\"\"delete_self_recurrent_payment # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_self_recurrent_payment(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.delete_self_recurrent_payment = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/payments/recurring',\n 'operation_id': 'delete_self_recurrent_payment',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_self_recurrent_payment\n )\n\n def __delete_user(\n self,\n **kwargs\n ):\n \"\"\"delete_user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_user(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.delete_user = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self',\n 'operation_id': 'delete_user',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_user\n )\n\n def __deprovision_self_addon_by_id(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"deprovision_self_addon_by_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.deprovision_self_addon_by_id(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.deprovision_self_addon_by_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}',\n 'operation_id': 'deprovision_self_addon_by_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__deprovision_self_addon_by_id\n )\n\n def __edit_self_application_by_app_id(\n self,\n app_id,\n wannabe_application,\n **kwargs\n ):\n \"\"\"edit_self_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_self_application_by_app_id(app_id, wannabe_application, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n wannabe_application (WannabeApplication):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_application'] = \\\n wannabe_application\n return self.call_with_http_info(**kwargs)\n\n self.edit_self_application_by_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}',\n 'operation_id': 'edit_self_application_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'wannabe_application',\n ],\n 'required': [\n 'app_id',\n 'wannabe_application',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'wannabe_application':\n (WannabeApplication,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'wannabe_application': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_self_application_by_app_id\n )\n\n def __edit_self_application_env_by_app_id(\n self,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"edit_self_application_env_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_self_application_env_by_app_id(app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n body (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.edit_self_application_env_by_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/env',\n 'operation_id': 'edit_self_application_env_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'body',\n ],\n 'required': [\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_self_application_env_by_app_id\n )\n\n def __edit_self_application_env_by_app_id_and_env_name(\n self,\n app_id,\n env_name,\n wannabe_value,\n **kwargs\n ):\n \"\"\"edit_self_application_env_by_app_id_and_env_name # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_self_application_env_by_app_id_and_env_name(app_id, env_name, wannabe_value, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n env_name (str):\n wannabe_value (WannabeValue):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['env_name'] = \\\n env_name\n kwargs['wannabe_value'] = \\\n wannabe_value\n return self.call_with_http_info(**kwargs)\n\n self.edit_self_application_env_by_app_id_and_env_name = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/env/{envName}',\n 'operation_id': 'edit_self_application_env_by_app_id_and_env_name',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'env_name',\n 'wannabe_value',\n ],\n 'required': [\n 'app_id',\n 'env_name',\n 'wannabe_value',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'env_name':\n (str,),\n 'wannabe_value':\n (WannabeValue,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'env_name': 'envName',\n },\n 'location_map': {\n 'app_id': 'path',\n 'env_name': 'path',\n 'wannabe_value': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_self_application_env_by_app_id_and_env_name\n )\n\n def __edit_user(\n self,\n wannabe_user,\n **kwargs\n ):\n \"\"\"edit_user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.edit_user(wannabe_user, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_user (WannabeUser):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n UserView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_user'] = \\\n wannabe_user\n return self.call_with_http_info(**kwargs)\n\n self.edit_user = _Endpoint(\n settings={\n 'response_type': (UserView,),\n 'auth': [],\n 'endpoint_path': '/self',\n 'operation_id': 'edit_user',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_user',\n ],\n 'required': [\n 'wannabe_user',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_user':\n (WannabeUser,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_user': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__edit_user\n )\n\n def __fav_mfa(\n self,\n kind,\n wannabe_mfa_fav,\n **kwargs\n ):\n \"\"\"fav_mfa # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.fav_mfa(kind, wannabe_mfa_fav, async_req=True)\n >>> result = thread.get()\n\n Args:\n kind (str):\n wannabe_mfa_fav (WannabeMFAFav):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['kind'] = \\\n kind\n kwargs['wannabe_mfa_fav'] = \\\n wannabe_mfa_fav\n return self.call_with_http_info(**kwargs)\n\n self.fav_mfa = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/mfa/{kind}',\n 'operation_id': 'fav_mfa',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'kind',\n 'wannabe_mfa_fav',\n ],\n 'required': [\n 'kind',\n 'wannabe_mfa_fav',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'kind':\n (str,),\n 'wannabe_mfa_fav':\n (WannabeMFAFav,),\n },\n 'attribute_map': {\n 'kind': 'kind',\n },\n 'location_map': {\n 'kind': 'path',\n 'wannabe_mfa_fav': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__fav_mfa\n )\n\n def __get_addon_sso_data(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_addon_sso_data # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_addon_sso_data(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonSSOData\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_addon_sso_data = _Endpoint(\n settings={\n 'response_type': (AddonSSOData,),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/sso',\n 'operation_id': 'get_addon_sso_data',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_addon_sso_data\n )\n\n def __get_application_deployment(\n self,\n app_id,\n deployment_id,\n **kwargs\n ):\n \"\"\"get_application_deployment # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_deployment(app_id, deployment_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n deployment_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['deployment_id'] = \\\n deployment_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_deployment = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/deployments/{deploymentId}',\n 'operation_id': 'get_application_deployment',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'deployment_id',\n ],\n 'required': [\n 'app_id',\n 'deployment_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'deployment_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_deployment\n )\n\n def __get_application_deployments(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_application_deployments # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_application_deployments(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n limit (str): [optional]\n offset (str): [optional]\n action (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [DeploymentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_application_deployments = _Endpoint(\n settings={\n 'response_type': ([DeploymentView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/deployments',\n 'operation_id': 'get_application_deployments',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'limit',\n 'offset',\n 'action',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'limit':\n (str,),\n 'offset':\n (str,),\n 'action':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'limit': 'limit',\n 'offset': 'offset',\n 'action': 'action',\n },\n 'location_map': {\n 'app_id': 'path',\n 'limit': 'query',\n 'offset': 'query',\n 'action': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_application_deployments\n )\n\n def __get_backup_codes(\n self,\n kind,\n **kwargs\n ):\n \"\"\"get_backup_codes # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_backup_codes(kind, async_req=True)\n >>> result = thread.get()\n\n Args:\n kind (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [MFARecoveryCode]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['kind'] = \\\n kind\n return self.call_with_http_info(**kwargs)\n\n self.get_backup_codes = _Endpoint(\n settings={\n 'response_type': ([MFARecoveryCode],),\n 'auth': [],\n 'endpoint_path': '/self/mfa/{kind}/backupcodes',\n 'operation_id': 'get_backup_codes',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'kind',\n ],\n 'required': [\n 'kind',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'kind':\n (str,),\n },\n 'attribute_map': {\n 'kind': 'kind',\n },\n 'location_map': {\n 'kind': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_backup_codes\n )\n\n def __get_confirmation_email(\n self,\n **kwargs\n ):\n \"\"\"get_confirmation_email # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_confirmation_email(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_confirmation_email = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/confirmation_email',\n 'operation_id': 'get_confirmation_email',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_confirmation_email\n )\n\n def __get_consumptions(\n self,\n **kwargs\n ):\n \"\"\"get_consumptions # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_consumptions(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n app_id (str): [optional]\n _from (str): [optional]\n to (str): [optional]\n _for (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_consumptions = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/self/consumptions',\n 'operation_id': 'get_consumptions',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n '_from',\n 'to',\n '_for',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n '_from':\n (str,),\n 'to':\n (str,),\n '_for':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n '_from': 'from',\n 'to': 'to',\n '_for': 'for',\n },\n 'location_map': {\n 'app_id': 'query',\n '_from': 'query',\n 'to': 'query',\n '_for': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_consumptions\n )\n\n def __get_email_addresses(\n self,\n **kwargs\n ):\n \"\"\"get_email_addresses # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_email_addresses(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_email_addresses = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/emails',\n 'operation_id': 'get_email_addresses',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_email_addresses\n )\n\n def __get_id(\n self,\n **kwargs\n ):\n \"\"\"get_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_id(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_id = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/self/id',\n 'operation_id': 'get_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_id\n )\n\n def __get_self_addon_by_id(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_self_addon_by_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_addon_by_id(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_addon_by_id = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}',\n 'operation_id': 'get_self_addon_by_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_addon_by_id\n )\n\n def __get_self_addon_env_by_addon_id(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_self_addon_env_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_addon_env_by_addon_id(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_addon_env_by_addon_id = _Endpoint(\n settings={\n 'response_type': ([AddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/env',\n 'operation_id': 'get_self_addon_env_by_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_addon_env_by_addon_id\n )\n\n def __get_self_addon_tags_by_addon_id(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_self_addon_tags_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_addon_tags_by_addon_id(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_addon_tags_by_addon_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/tags',\n 'operation_id': 'get_self_addon_tags_by_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_addon_tags_by_addon_id\n )\n\n def __get_self_addons(\n self,\n **kwargs\n ):\n \"\"\"get_self_addons # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_addons(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_addons = _Endpoint(\n settings={\n 'response_type': ([AddonView],),\n 'auth': [],\n 'endpoint_path': '/self/addons',\n 'operation_id': 'get_self_addons',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_addons\n )\n\n def __get_self_addons_linked_to_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_addons_linked_to_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_addons_linked_to_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_addons_linked_to_application_by_app_id = _Endpoint(\n settings={\n 'response_type': ([AddonView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/addons',\n 'operation_id': 'get_self_addons_linked_to_application_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_addons_linked_to_application_by_app_id\n )\n\n def __get_self_amount(\n self,\n **kwargs\n ):\n \"\"\"get_self_amount # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_amount(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n DropCountView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_amount = _Endpoint(\n settings={\n 'response_type': (DropCountView,),\n 'auth': [],\n 'endpoint_path': '/self/credits',\n 'operation_id': 'get_self_amount',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_amount\n )\n\n def __get_self_application_branches_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_branches_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_branches_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_branches_by_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/branches',\n 'operation_id': 'get_self_application_branches_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_branches_by_app_id\n )\n\n def __get_self_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_by_app_id = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}',\n 'operation_id': 'get_self_application_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_by_app_id\n )\n\n def __get_self_application_dependencies_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_dependencies_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_dependencies_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_dependencies_by_app_id = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/dependencies',\n 'operation_id': 'get_self_application_dependencies_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_dependencies_by_app_id\n )\n\n def __get_self_application_dependencies_env_by_app_id(\n self,\n id,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_dependencies_env_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_dependencies_env_by_app_id(id, app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_dependencies_env_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/dependencies/env',\n 'operation_id': 'get_self_application_dependencies_env_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n 'app_id',\n ],\n 'required': [\n 'id',\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n 'app_id': 'appId',\n },\n 'location_map': {\n 'id': 'path',\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_dependencies_env_by_app_id\n )\n\n def __get_self_application_dependents(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_dependents # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_dependents(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_dependents = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/dependents',\n 'operation_id': 'get_self_application_dependents',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_dependents\n )\n\n def __get_self_application_env_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_env_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_env_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [AddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_env_by_app_id = _Endpoint(\n settings={\n 'response_type': ([AddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/env',\n 'operation_id': 'get_self_application_env_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_env_by_app_id\n )\n\n def __get_self_application_instance_by_app_and_instance_id(\n self,\n app_id,\n instance_id,\n **kwargs\n ):\n \"\"\"get_self_application_instance_by_app_and_instance_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_instance_by_app_and_instance_id(app_id, instance_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n instance_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n SuperNovaInstanceView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['instance_id'] = \\\n instance_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_instance_by_app_and_instance_id = _Endpoint(\n settings={\n 'response_type': (SuperNovaInstanceView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/instances/{instanceId}',\n 'operation_id': 'get_self_application_instance_by_app_and_instance_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'instance_id',\n ],\n 'required': [\n 'app_id',\n 'instance_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'instance_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'instance_id': 'instanceId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'instance_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_instance_by_app_and_instance_id\n )\n\n def __get_self_application_instances_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_instances_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_instances_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n deployment_id (str): [optional]\n with_deleted (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [SuperNovaInstanceView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_instances_by_app_id = _Endpoint(\n settings={\n 'response_type': ([SuperNovaInstanceView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/instances',\n 'operation_id': 'get_self_application_instances_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'deployment_id',\n 'with_deleted',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'deployment_id':\n (str,),\n 'with_deleted':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'deployment_id': 'deploymentId',\n 'with_deleted': 'withDeleted',\n },\n 'location_map': {\n 'app_id': 'path',\n 'deployment_id': 'query',\n 'with_deleted': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_instances_by_app_id\n )\n\n def __get_self_application_tags_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_application_tags_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_application_tags_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_application_tags_by_app_id = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/tags',\n 'operation_id': 'get_self_application_tags_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_application_tags_by_app_id\n )\n\n def __get_self_applications(\n self,\n **kwargs\n ):\n \"\"\"get_self_applications # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_applications(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_applications = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/self/applications',\n 'operation_id': 'get_self_applications',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_applications\n )\n\n def __get_self_applications_linked_to_addon_by_addon_id(\n self,\n addon_id,\n **kwargs\n ):\n \"\"\"get_self_applications_linked_to_addon_by_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_applications_linked_to_addon_by_addon_id(addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_applications_linked_to_addon_by_addon_id = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}/applications',\n 'operation_id': 'get_self_applications_linked_to_addon_by_addon_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n ],\n 'required': [\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_applications_linked_to_addon_by_addon_id\n )\n\n def __get_self_cli_tokens(\n self,\n **kwargs\n ):\n \"\"\"get_self_cli_tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_cli_tokens(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n cli_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [CliTokenView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_cli_tokens = _Endpoint(\n settings={\n 'response_type': ([CliTokenView],),\n 'auth': [],\n 'endpoint_path': '/self/cli_tokens',\n 'operation_id': 'get_self_cli_tokens',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'cli_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'cli_token':\n (str,),\n },\n 'attribute_map': {\n 'cli_token': 'cli_token',\n },\n 'location_map': {\n 'cli_token': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_cli_tokens\n )\n\n def __get_self_consumer(\n self,\n key,\n **kwargs\n ):\n \"\"\"get_self_consumer # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_consumer(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.get_self_consumer = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/self/consumers/{key}',\n 'operation_id': 'get_self_consumer',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_consumer\n )\n\n def __get_self_consumer_secret(\n self,\n key,\n **kwargs\n ):\n \"\"\"get_self_consumer_secret # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_consumer_secret(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n SecretView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.get_self_consumer_secret = _Endpoint(\n settings={\n 'response_type': (SecretView,),\n 'auth': [],\n 'endpoint_path': '/self/consumers/{key}/secret',\n 'operation_id': 'get_self_consumer_secret',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_consumer_secret\n )\n\n def __get_self_consumers(\n self,\n **kwargs\n ):\n \"\"\"get_self_consumers # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_consumers(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_consumers = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/consumers',\n 'operation_id': 'get_self_consumers',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_consumers\n )\n\n def __get_self_default_method(\n self,\n **kwargs\n ):\n \"\"\"get_self_default_method # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_default_method(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentMethodView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_default_method = _Endpoint(\n settings={\n 'response_type': (PaymentMethodView,),\n 'auth': [],\n 'endpoint_path': '/self/payments/methods/default',\n 'operation_id': 'get_self_default_method',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_default_method\n )\n\n def __get_self_env_of_addons_linked_to_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_env_of_addons_linked_to_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_env_of_addons_linked_to_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [LinkedAddonEnvironmentView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_env_of_addons_linked_to_application_by_app_id = _Endpoint(\n settings={\n 'response_type': ([LinkedAddonEnvironmentView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/addons/env',\n 'operation_id': 'get_self_env_of_addons_linked_to_application_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_env_of_addons_linked_to_application_by_app_id\n )\n\n def __get_self_exposed_env_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_exposed_env_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_exposed_env_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_exposed_env_by_app_id = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/exposed_env',\n 'operation_id': 'get_self_exposed_env_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_exposed_env_by_app_id\n )\n\n def __get_self_favourite_vhost_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_favourite_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_favourite_vhost_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n VhostView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_favourite_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': (VhostView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts/favourite',\n 'operation_id': 'get_self_favourite_vhost_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_favourite_vhost_by_app_id\n )\n\n def __get_self_instances_for_all_apps(\n self,\n **kwargs\n ):\n \"\"\"get_self_instances_for_all_apps # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_instances_for_all_apps(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n SuperNovaInstanceMap\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_instances_for_all_apps = _Endpoint(\n settings={\n 'response_type': (SuperNovaInstanceMap,),\n 'auth': [],\n 'endpoint_path': '/self/instances',\n 'operation_id': 'get_self_instances_for_all_apps',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_instances_for_all_apps\n )\n\n def __get_self_invoice_by_id(\n self,\n bid,\n **kwargs\n ):\n \"\"\"get_self_invoice_by_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_invoice_by_id(bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.get_self_invoice_by_id = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/self/payments/billings/{bid}',\n 'operation_id': 'get_self_invoice_by_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n ],\n 'required': [\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_invoice_by_id\n )\n\n def __get_self_invoices(\n self,\n **kwargs\n ):\n \"\"\"get_self_invoices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_invoices(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [InvoiceRendering]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_invoices = _Endpoint(\n settings={\n 'response_type': ([InvoiceRendering],),\n 'auth': [],\n 'endpoint_path': '/self/payments/billings',\n 'operation_id': 'get_self_invoices',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_invoices\n )\n\n def __get_self_monthly_invoice(\n self,\n **kwargs\n ):\n \"\"\"get_self_monthly_invoice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_monthly_invoice(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_monthly_invoice = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/self/payments/monthlyinvoice',\n 'operation_id': 'get_self_monthly_invoice',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_monthly_invoice\n )\n\n def __get_self_payment_info(\n self,\n **kwargs\n ):\n \"\"\"get_self_payment_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_payment_info(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PaymentInfoView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_payment_info = _Endpoint(\n settings={\n 'response_type': (PaymentInfoView,),\n 'auth': [],\n 'endpoint_path': '/self/payment-info',\n 'operation_id': 'get_self_payment_info',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_payment_info\n )\n\n def __get_self_payment_methods(\n self,\n **kwargs\n ):\n \"\"\"get_self_payment_methods # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_payment_methods(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [PaymentMethodView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_payment_methods = _Endpoint(\n settings={\n 'response_type': ([PaymentMethodView],),\n 'auth': [],\n 'endpoint_path': '/self/payments/methods',\n 'operation_id': 'get_self_payment_methods',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_payment_methods\n )\n\n def __get_self_pdf_invoice_by_id(\n self,\n bid,\n **kwargs\n ):\n \"\"\"get_self_pdf_invoice_by_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_pdf_invoice_by_id(bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n\n Keyword Args:\n token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.get_self_pdf_invoice_by_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/payments/billings/{bid}.pdf',\n 'operation_id': 'get_self_pdf_invoice_by_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'token',\n ],\n 'required': [\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n 'token': 'token',\n },\n 'location_map': {\n 'bid': 'path',\n 'token': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/pdf'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_pdf_invoice_by_id\n )\n\n def __get_self_price_with_tax(\n self,\n price,\n **kwargs\n ):\n \"\"\"get_self_price_with_tax # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_price_with_tax(price, async_req=True)\n >>> result = thread.get()\n\n Args:\n price (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n PriceWithTaxInfo\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['price'] = \\\n price\n return self.call_with_http_info(**kwargs)\n\n self.get_self_price_with_tax = _Endpoint(\n settings={\n 'response_type': (PriceWithTaxInfo,),\n 'auth': [],\n 'endpoint_path': '/self/payments/fullprice/{price}',\n 'operation_id': 'get_self_price_with_tax',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'price',\n ],\n 'required': [\n 'price',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'price':\n (str,),\n },\n 'attribute_map': {\n 'price': 'price',\n },\n 'location_map': {\n 'price': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_price_with_tax\n )\n\n def __get_self_recurrent_payment(\n self,\n **kwargs\n ):\n \"\"\"get_self_recurrent_payment # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_recurrent_payment(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n RecurrentPaymentView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_recurrent_payment = _Endpoint(\n settings={\n 'response_type': (RecurrentPaymentView,),\n 'auth': [],\n 'endpoint_path': '/self/payments/recurring',\n 'operation_id': 'get_self_recurrent_payment',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_recurrent_payment\n )\n\n def __get_self_stripe_token(\n self,\n **kwargs\n ):\n \"\"\"get_self_stripe_token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_stripe_token(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n BraintreeToken\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_stripe_token = _Endpoint(\n settings={\n 'response_type': (BraintreeToken,),\n 'auth': [],\n 'endpoint_path': '/self/payments/tokens/stripe',\n 'operation_id': 'get_self_stripe_token',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_stripe_token\n )\n\n def __get_self_tokens(\n self,\n **kwargs\n ):\n \"\"\"get_self_tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_tokens(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OAuth1AccessTokenView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_self_tokens = _Endpoint(\n settings={\n 'response_type': ([OAuth1AccessTokenView],),\n 'auth': [],\n 'endpoint_path': '/self/tokens',\n 'operation_id': 'get_self_tokens',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_tokens\n )\n\n def __get_self_vhost_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_self_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_self_vhost_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [VhostView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_self_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': ([VhostView],),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts',\n 'operation_id': 'get_self_vhost_by_app_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_self_vhost_by_app_id\n )\n\n def __get_ssh_keys(\n self,\n **kwargs\n ):\n \"\"\"get_ssh_keys # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_ssh_keys(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [SshKeyView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_ssh_keys = _Endpoint(\n settings={\n 'response_type': ([SshKeyView],),\n 'auth': [],\n 'endpoint_path': '/self/keys',\n 'operation_id': 'get_ssh_keys',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_ssh_keys\n )\n\n def __get_summary(\n self,\n **kwargs\n ):\n \"\"\"get_summary # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_summary(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n full (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Summary\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_summary = _Endpoint(\n settings={\n 'response_type': (Summary,),\n 'auth': [],\n 'endpoint_path': '/summary',\n 'operation_id': 'get_summary',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'full',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'full':\n (str,),\n },\n 'attribute_map': {\n 'full': 'full',\n },\n 'location_map': {\n 'full': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_summary\n )\n\n def __get_user(\n self,\n **kwargs\n ):\n \"\"\"get_user # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_user(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n UserView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_user = _Endpoint(\n settings={\n 'response_type': (UserView,),\n 'auth': [],\n 'endpoint_path': '/self',\n 'operation_id': 'get_user',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_user\n )\n\n def __link_self_addon_to_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"link_self_addon_to_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.link_self_addon_to_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.link_self_addon_to_application_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/addons',\n 'operation_id': 'link_self_addon_to_application_by_app_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__link_self_addon_to_application_by_app_id\n )\n\n def __mark_self_favourite_vhost_by_app_id(\n self,\n app_id,\n vhost_view,\n **kwargs\n ):\n \"\"\"mark_self_favourite_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.mark_self_favourite_vhost_by_app_id(app_id, vhost_view, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n vhost_view (VhostView):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n VhostView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['vhost_view'] = \\\n vhost_view\n return self.call_with_http_info(**kwargs)\n\n self.mark_self_favourite_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': (VhostView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts/favourite',\n 'operation_id': 'mark_self_favourite_vhost_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'vhost_view',\n ],\n 'required': [\n 'app_id',\n 'vhost_view',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'vhost_view':\n (VhostView,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'vhost_view': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__mark_self_favourite_vhost_by_app_id\n )\n\n def __preorder_self_addon(\n self,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"preorder_self_addon # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.preorder_self_addon(wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.preorder_self_addon = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/self/addons/preorders',\n 'operation_id': 'preorder_self_addon',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__preorder_self_addon\n )\n\n def __provision_self_addon(\n self,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"provision_self_addon # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.provision_self_addon(wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.provision_self_addon = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/self/addons',\n 'operation_id': 'provision_self_addon',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__provision_self_addon\n )\n\n def __redeploy_self_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"redeploy_self_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.redeploy_self_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n commit (str): [optional]\n use_cache (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.redeploy_self_application_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/instances',\n 'operation_id': 'redeploy_self_application_by_app_id',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'commit',\n 'use_cache',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'commit':\n (str,),\n 'use_cache':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'commit': 'commit',\n 'use_cache': 'useCache',\n },\n 'location_map': {\n 'app_id': 'path',\n 'commit': 'query',\n 'use_cache': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__redeploy_self_application_by_app_id\n )\n\n def __remove_email_address(\n self,\n email,\n **kwargs\n ):\n \"\"\"remove_email_address # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_email_address(email, async_req=True)\n >>> result = thread.get()\n\n Args:\n email (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['email'] = \\\n email\n return self.call_with_http_info(**kwargs)\n\n self.remove_email_address = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/emails/{email}',\n 'operation_id': 'remove_email_address',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'email',\n ],\n 'required': [\n 'email',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'email':\n (str,),\n },\n 'attribute_map': {\n 'email': 'email',\n },\n 'location_map': {\n 'email': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_email_address\n )\n\n def __remove_self_application_env_by_app_id_and_env_name(\n self,\n app_id,\n env_name,\n **kwargs\n ):\n \"\"\"remove_self_application_env_by_app_id_and_env_name # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_self_application_env_by_app_id_and_env_name(app_id, env_name, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n env_name (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n ApplicationView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['env_name'] = \\\n env_name\n return self.call_with_http_info(**kwargs)\n\n self.remove_self_application_env_by_app_id_and_env_name = _Endpoint(\n settings={\n 'response_type': (ApplicationView,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/env/{envName}',\n 'operation_id': 'remove_self_application_env_by_app_id_and_env_name',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'env_name',\n ],\n 'required': [\n 'app_id',\n 'env_name',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'env_name':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'env_name': 'envName',\n },\n 'location_map': {\n 'app_id': 'path',\n 'env_name': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_self_application_env_by_app_id_and_env_name\n )\n\n def __remove_self_vhost_by_app_id(\n self,\n app_id,\n domain,\n **kwargs\n ):\n \"\"\"remove_self_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_self_vhost_by_app_id(app_id, domain, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n domain (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['domain'] = \\\n domain\n return self.call_with_http_info(**kwargs)\n\n self.remove_self_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts/{domain}',\n 'operation_id': 'remove_self_vhost_by_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'domain',\n ],\n 'required': [\n 'app_id',\n 'domain',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'domain':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'domain': 'domain',\n },\n 'location_map': {\n 'app_id': 'path',\n 'domain': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_self_vhost_by_app_id\n )\n\n def __remove_ssh_key(\n self,\n key,\n **kwargs\n ):\n \"\"\"remove_ssh_key # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.remove_ssh_key(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.remove_ssh_key = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/keys/{key}',\n 'operation_id': 'remove_ssh_key',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__remove_ssh_key\n )\n\n def __rename_addon(\n self,\n addon_id,\n wannabe_addon_provision,\n **kwargs\n ):\n \"\"\"rename_addon # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.rename_addon(addon_id, wannabe_addon_provision, async_req=True)\n >>> result = thread.get()\n\n Args:\n addon_id (str):\n wannabe_addon_provision (WannabeAddonProvision):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n AddonView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['addon_id'] = \\\n addon_id\n kwargs['wannabe_addon_provision'] = \\\n wannabe_addon_provision\n return self.call_with_http_info(**kwargs)\n\n self.rename_addon = _Endpoint(\n settings={\n 'response_type': (AddonView,),\n 'auth': [],\n 'endpoint_path': '/self/addons/{addonId}',\n 'operation_id': 'rename_addon',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'addon_id',\n 'wannabe_addon_provision',\n ],\n 'required': [\n 'addon_id',\n 'wannabe_addon_provision',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'addon_id':\n (str,),\n 'wannabe_addon_provision':\n (WannabeAddonProvision,),\n },\n 'attribute_map': {\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'addon_id': 'path',\n 'wannabe_addon_provision': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__rename_addon\n )\n\n def __revoke_all_tokens(\n self,\n **kwargs\n ):\n \"\"\"revoke_all_tokens # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.revoke_all_tokens(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.revoke_all_tokens = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/tokens',\n 'operation_id': 'revoke_all_tokens',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__revoke_all_tokens\n )\n\n def __revoke_token(\n self,\n token,\n **kwargs\n ):\n \"\"\"revoke_token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.revoke_token(token, async_req=True)\n >>> result = thread.get()\n\n Args:\n token (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['token'] = \\\n token\n return self.call_with_http_info(**kwargs)\n\n self.revoke_token = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/tokens/{token}',\n 'operation_id': 'revoke_token',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'token',\n ],\n 'required': [\n 'token',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'token': 'token',\n },\n 'location_map': {\n 'token': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__revoke_token\n )\n\n def __set_self_application_branch_by_app_id(\n self,\n app_id,\n wannabe_branch,\n **kwargs\n ):\n \"\"\"set_self_application_branch_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_self_application_branch_by_app_id(app_id, wannabe_branch, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n wannabe_branch (WannabeBranch):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_branch'] = \\\n wannabe_branch\n return self.call_with_http_info(**kwargs)\n\n self.set_self_application_branch_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/branch',\n 'operation_id': 'set_self_application_branch_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'wannabe_branch',\n ],\n 'required': [\n 'app_id',\n 'wannabe_branch',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'wannabe_branch':\n (WannabeBranch,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'wannabe_branch': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_self_application_branch_by_app_id\n )\n\n def __set_self_build_instance_flavor_by_app_id(\n self,\n app_id,\n wannabe_build_flavor,\n **kwargs\n ):\n \"\"\"set_self_build_instance_flavor_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_self_build_instance_flavor_by_app_id(app_id, wannabe_build_flavor, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n wannabe_build_flavor (WannabeBuildFlavor):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['wannabe_build_flavor'] = \\\n wannabe_build_flavor\n return self.call_with_http_info(**kwargs)\n\n self.set_self_build_instance_flavor_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/buildflavor',\n 'operation_id': 'set_self_build_instance_flavor_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'wannabe_build_flavor',\n ],\n 'required': [\n 'app_id',\n 'wannabe_build_flavor',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'wannabe_build_flavor':\n (WannabeBuildFlavor,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'wannabe_build_flavor': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_self_build_instance_flavor_by_app_id\n )\n\n def __set_self_default_method(\n self,\n payment_data,\n **kwargs\n ):\n \"\"\"set_self_default_method # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_self_default_method(payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.set_self_default_method = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/payments/methods/default',\n 'operation_id': 'set_self_default_method',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'payment_data',\n ],\n 'required': [\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_self_default_method\n )\n\n def __set_self_max_credits_per_month(\n self,\n wannabe_max_credits,\n **kwargs\n ):\n \"\"\"set_self_max_credits_per_month # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_self_max_credits_per_month(wannabe_max_credits, async_req=True)\n >>> result = thread.get()\n\n Args:\n wannabe_max_credits (WannabeMaxCredits):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n WannabeMaxCredits\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['wannabe_max_credits'] = \\\n wannabe_max_credits\n return self.call_with_http_info(**kwargs)\n\n self.set_self_max_credits_per_month = _Endpoint(\n settings={\n 'response_type': (WannabeMaxCredits,),\n 'auth': [],\n 'endpoint_path': '/self/payments/monthlyinvoice/maxcredit',\n 'operation_id': 'set_self_max_credits_per_month',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'wannabe_max_credits',\n ],\n 'required': [\n 'wannabe_max_credits',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'wannabe_max_credits':\n (WannabeMaxCredits,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'wannabe_max_credits': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__set_self_max_credits_per_month\n )\n\n def __set_user_avatar_from_file(\n self,\n body,\n **kwargs\n ):\n \"\"\"set_user_avatar_from_file # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.set_user_avatar_from_file(body, async_req=True)\n >>> result = thread.get()\n\n Args:\n body (file_type):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n UrlView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.set_user_avatar_from_file = _Endpoint(\n settings={\n 'response_type': (UrlView,),\n 'auth': [],\n 'endpoint_path': '/self/avatar',\n 'operation_id': 'set_user_avatar_from_file',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'body',\n ],\n 'required': [\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'body':\n (file_type,),\n },\n 'attribute_map': {\n },\n 'location_map': {\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'image/bmp',\n 'image/gif',\n 'image/jpeg',\n 'image/png',\n 'image/tiff'\n ]\n },\n api_client=api_client,\n callable=__set_user_avatar_from_file\n )\n\n def __undeploy_self_application_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"undeploy_self_application_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.undeploy_self_application_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.undeploy_self_application_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/instances',\n 'operation_id': 'undeploy_self_application_by_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__undeploy_self_application_by_app_id\n )\n\n def __unlink_selfddon_from_application_by_app_and_addon_id(\n self,\n app_id,\n addon_id,\n **kwargs\n ):\n \"\"\"unlink_selfddon_from_application_by_app_and_addon_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unlink_selfddon_from_application_by_app_and_addon_id(app_id, addon_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n addon_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['addon_id'] = \\\n addon_id\n return self.call_with_http_info(**kwargs)\n\n self.unlink_selfddon_from_application_by_app_and_addon_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/addons/{addonId}',\n 'operation_id': 'unlink_selfddon_from_application_by_app_and_addon_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'addon_id',\n ],\n 'required': [\n 'app_id',\n 'addon_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'addon_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'addon_id': 'addonId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'addon_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__unlink_selfddon_from_application_by_app_and_addon_id\n )\n\n def __unmark_self_favourite_vhost_by_app_id(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"unmark_self_favourite_vhost_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.unmark_self_favourite_vhost_by_app_id(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.unmark_self_favourite_vhost_by_app_id = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/vhosts/favourite',\n 'operation_id': 'unmark_self_favourite_vhost_by_app_id',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__unmark_self_favourite_vhost_by_app_id\n )\n\n def __update_self_consumer(\n self,\n key,\n wannabe_o_auth1_consumer,\n **kwargs\n ):\n \"\"\"update_self_consumer # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_self_consumer(key, wannabe_o_auth1_consumer, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n wannabe_o_auth1_consumer (WannabeOAuth1Consumer):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuth1ConsumerView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n kwargs['wannabe_o_auth1_consumer'] = \\\n wannabe_o_auth1_consumer\n return self.call_with_http_info(**kwargs)\n\n self.update_self_consumer = _Endpoint(\n settings={\n 'response_type': (OAuth1ConsumerView,),\n 'auth': [],\n 'endpoint_path': '/self/consumers/{key}',\n 'operation_id': 'update_self_consumer',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n 'wannabe_o_auth1_consumer',\n ],\n 'required': [\n 'key',\n 'wannabe_o_auth1_consumer',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n 'wannabe_o_auth1_consumer':\n (WannabeOAuth1Consumer,),\n },\n 'attribute_map': {\n 'key': 'key',\n },\n 'location_map': {\n 'key': 'path',\n 'wannabe_o_auth1_consumer': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_self_consumer\n )\n\n def __update_self_exposed_env_by_app_id(\n self,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"update_self_exposed_env_by_app_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_self_exposed_env_by_app_id(app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n body (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.update_self_exposed_env_by_app_id = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/self/applications/{appId}/exposed_env',\n 'operation_id': 'update_self_exposed_env_by_app_id',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'body',\n ],\n 'required': [\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n },\n 'location_map': {\n 'app_id': 'path',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_self_exposed_env_by_app_id\n )\n\n def __validate_email(\n self,\n **kwargs\n ):\n \"\"\"validate_email # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.validate_email(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n validation_key (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.validate_email = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/validate_email',\n 'operation_id': 'validate_email',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'validation_key',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'validation_key':\n (str,),\n },\n 'attribute_map': {\n 'validation_key': 'validationKey',\n },\n 'location_map': {\n 'validation_key': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__validate_email\n )\n\n def __validate_mfa(\n self,\n kind,\n wannabe_mfa_creds,\n **kwargs\n ):\n \"\"\"validate_mfa # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.validate_mfa(kind, wannabe_mfa_creds, async_req=True)\n >>> result = thread.get()\n\n Args:\n kind (str):\n wannabe_mfa_creds (WannabeMFACreds):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['kind'] = \\\n kind\n kwargs['wannabe_mfa_creds'] = \\\n wannabe_mfa_creds\n return self.call_with_http_info(**kwargs)\n\n self.validate_mfa = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/self/mfa/{kind}/confirmation',\n 'operation_id': 'validate_mfa',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'kind',\n 'wannabe_mfa_creds',\n ],\n 'required': [\n 'kind',\n 'wannabe_mfa_creds',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'kind':\n (str,),\n 'wannabe_mfa_creds':\n (WannabeMFACreds,),\n },\n 'attribute_map': {\n 'kind': 'kind',\n },\n 'location_map': {\n 'kind': 'path',\n 'wannabe_mfa_creds': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__validate_mfa\n )\n" }, { "alpha_fraction": 0.5457317233085632, "alphanum_fraction": 0.5457317233085632, "avg_line_length": 45.761905670166016, "blob_id": "335886fdaac50d1058c6d9dccc8926b76f828c41", "content_id": "ea40e4c2bf68198625035b6f3a41e724ef141c77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 984, "license_type": "no_license", "max_line_length": 168, "num_lines": 21, "path": "/docs/SuperNovaInstanceView.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# SuperNovaInstanceView\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**app_id** | **str** | | [optional] \n**ip** | **str** | | [optional] \n**app_port** | **int** | | [optional] \n**state** | **str** | | [optional] \n**flavor** | [**SuperNovaFlavor**](SuperNovaFlavor.md) | | [optional] \n**commit** | **str** | | [optional] \n**deploy_number** | **int** | | [optional] \n**deploy_id** | **str** | | [optional] \n**instance_number** | **int** | | [optional] \n**display_name** | **str** | | [optional] \n**creation_date** | **int** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.6626794338226318, "alphanum_fraction": 0.6734449863433838, "avg_line_length": 21, "blob_id": "394eddf41d6ef07765b867eac2ac269d177af893", "content_id": "41957ce2df8ca59919bc3fbb62b9705571ca91b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/test/test_flavor_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.value_with_unit import ValueWithUnit\nglobals()['ValueWithUnit'] = ValueWithUnit\nfrom openapi_client.model.flavor_view import FlavorView\n\n\nclass TestFlavorView(unittest.TestCase):\n \"\"\"FlavorView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testFlavorView(self):\n \"\"\"Test FlavorView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = FlavorView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5159010887145996, "alphanum_fraction": 0.5159010887145996, "avg_line_length": 42.46154022216797, "blob_id": "166078a1e1ce41cf2eb11d1213caa72e4fc9d3c1", "content_id": "bb0cde6649924876a7a6b4c076bba825dab42b85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1132, "license_type": "no_license", "max_line_length": 168, "num_lines": 26, "path": "/docs/UserView.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# UserView\n\n\n## Properties\nName | Type | Description | Notes\n------------ | ------------- | ------------- | -------------\n**id** | **str** | | [optional] \n**email** | **str** | | [optional] \n**name** | **str** | | [optional] \n**phone** | **str** | | [optional] \n**address** | **str** | | [optional] \n**city** | **str** | | [optional] \n**zipcode** | **str** | | [optional] \n**country** | **str** | | [optional] \n**avatar** | **str** | | [optional] \n**creation_date** | **int** | | [optional] \n**lang** | **str** | | [optional] \n**email_validated** | **bool** | | [optional] \n**oauth_apps** | **[str]** | | [optional] \n**admin** | **bool** | | [optional] \n**can_pay** | **bool** | | [optional] \n**preferred_mfa** | **str** | | [optional] \n**has_password** | **bool** | | [optional] \n**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]\n\n[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)\n\n\n" }, { "alpha_fraction": 0.6477333903312683, "alphanum_fraction": 0.6503808498382568, "avg_line_length": 29.709999084472656, "blob_id": "232d6b230232e74f71160a1527f6604968f37e67", "content_id": "d1021f20eec4b631545bcc88a9cdc4ae3d8f0b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70634, "license_type": "no_license", "max_line_length": 279, "num_lines": 2300, "path": "/docs/UserApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.UserApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**ask_for_password_reset_via_form**](UserApi.md#ask_for_password_reset_via_form) | **POST** /password_forgotten | \n[**authorize_paypal_transaction**](UserApi.md#authorize_paypal_transaction) | **PUT** /invoice/external/paypal/{bid} | \n[**cancel_paypal_transaction**](UserApi.md#cancel_paypal_transaction) | **DELETE** /invoice/external/paypal/{bid} | \n[**confirm_password_reset_request**](UserApi.md#confirm_password_reset_request) | **GET** /password_forgotten/{key} | \n[**create_user_from_form**](UserApi.md#create_user_from_form) | **POST** /users | \n[**delete_github_link**](UserApi.md#delete_github_link) | **DELETE** /github/link | \n[**finsih_github_signup**](UserApi.md#finsih_github_signup) | **POST** /github/signup | \n[**get_applications**](UserApi.md#get_applications) | **GET** /users/{id}/applications | \n[**get_env**](UserApi.md#get_env) | **GET** /application/{appId}/environment | \n[**get_git_info**](UserApi.md#get_git_info) | **GET** /users/{userId}/git-info | \n[**get_github**](UserApi.md#get_github) | **GET** /github | \n[**get_github_applications**](UserApi.md#get_github_applications) | **GET** /github/applications | \n[**get_github_callback**](UserApi.md#get_github_callback) | **GET** /github/callback | \n[**get_github_emails**](UserApi.md#get_github_emails) | **GET** /github/emails | \n[**get_github_keys**](UserApi.md#get_github_keys) | **GET** /github/keys | \n[**get_github_link**](UserApi.md#get_github_link) | **GET** /github/link | \n[**get_github_login**](UserApi.md#get_github_login) | **GET** /github/login | \n[**get_github_username**](UserApi.md#get_github_username) | **GET** /github/username | \n[**get_login_form**](UserApi.md#get_login_form) | **GET** /session/login | \n[**get_login_form1**](UserApi.md#get_login_form1) | **GET** /sessions/login | \n[**get_password_forgotten_form**](UserApi.md#get_password_forgotten_form) | **GET** /password_forgotten | \n[**get_signup_form**](UserApi.md#get_signup_form) | **GET** /session/signup | \n[**get_signup_form1**](UserApi.md#get_signup_form1) | **GET** /sessions/signup | \n[**get_user_by_id**](UserApi.md#get_user_by_id) | **GET** /users/{id} | \n[**github_signup**](UserApi.md#github_signup) | **GET** /github/signup | \n[**login**](UserApi.md#login) | **POST** /session/login | \n[**login1**](UserApi.md#login1) | **POST** /sessions/login | \n[**mfa_login**](UserApi.md#mfa_login) | **POST** /session/mfa_login | \n[**mfa_login1**](UserApi.md#mfa_login1) | **POST** /sessions/mfa_login | \n[**post_github_redeploy**](UserApi.md#post_github_redeploy) | **POST** /github/redeploy | \n[**reset_password_forgotten**](UserApi.md#reset_password_forgotten) | **POST** /password_forgotten/{key} | \n[**update_env**](UserApi.md#update_env) | **PUT** /application/{appId}/environment | \n[**update_invoice**](UserApi.md#update_invoice) | **POST** /invoice/external/{bid} | \n\n\n# **ask_for_password_reset_via_form**\n> str ask_for_password_reset_via_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n login = \"login_example\" # str | (optional)\n drop_tokens = \"drop_tokens_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.ask_for_password_reset_via_form(login=login, drop_tokens=drop_tokens, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->ask_for_password_reset_via_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **login** | **str**| | [optional]\n **drop_tokens** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **authorize_paypal_transaction**\n> authorize_paypal_transaction(bid, payment_data)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.payment_data import PaymentData\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n bid = \"bid_example\" # str | \n payment_data = PaymentData(\n type=\"NEW_CARD\",\n token=\"token_example\",\n device_data=\"device_data_example\",\n ) # PaymentData | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.authorize_paypal_transaction(bid, payment_data)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->authorize_paypal_transaction: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **payment_data** | [**PaymentData**](PaymentData.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **cancel_paypal_transaction**\n> cancel_paypal_transaction(bid)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n bid = \"bid_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.cancel_paypal_transaction(bid)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->cancel_paypal_transaction: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **confirm_password_reset_request**\n> str confirm_password_reset_request(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n key = \"key_example\" # str | \n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.confirm_password_reset_request(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->confirm_password_reset_request: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.confirm_password_reset_request(key, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->confirm_password_reset_request: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **create_user_from_form**\n> create_user_from_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n invitation_key = \"invitation_key_example\" # str | (optional)\n addon_beta_invitation_key = \"addon_beta_invitation_key_example\" # str | (optional)\n email = \"email_example\" # str | (optional)\n _pass = \"_pass_example\" # str | (optional)\n url_next = \"url_next_example\" # str | (optional)\n terms = \"terms_example\" # str | (optional)\n subscription_source = \"subscription_source_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.create_user_from_form(invitation_key=invitation_key, addon_beta_invitation_key=addon_beta_invitation_key, email=email, _pass=_pass, url_next=url_next, terms=terms, subscription_source=subscription_source, clever_flavor=clever_flavor, oauth_token=oauth_token)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->create_user_from_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **invitation_key** | **str**| | [optional]\n **addon_beta_invitation_key** | **str**| | [optional]\n **email** | **str**| | [optional]\n **_pass** | **str**| | [optional]\n **url_next** | **str**| | [optional]\n **terms** | **str**| | [optional]\n **subscription_source** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **delete_github_link**\n> Message delete_github_link()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.delete_github_link()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->delete_github_link: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **finsih_github_signup**\n> str finsih_github_signup()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n transaction_id = \"transaction_id_example\" # str | (optional)\n name = \"name_example\" # str | (optional)\n other_id = \"other_id_example\" # str | (optional)\n other_email = \"other_email_example\" # str | (optional)\n password = \"password_example\" # str | (optional)\n auto_link = \"auto_link_example\" # str | (optional)\n terms = \"terms_example\" # str | (optional)\n invitation_key = \"invitation_key_example\" # str | (optional)\n mfa_kind = \"mfa_kind_example\" # str | (optional)\n mfa_attempt = \"mfa_attempt_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.finsih_github_signup(transaction_id=transaction_id, name=name, other_id=other_id, other_email=other_email, password=password, auto_link=auto_link, terms=terms, invitation_key=invitation_key, mfa_kind=mfa_kind, mfa_attempt=mfa_attempt)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->finsih_github_signup: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **transaction_id** | **str**| | [optional]\n **name** | **str**| | [optional]\n **other_id** | **str**| | [optional]\n **other_email** | **str**| | [optional]\n **password** | **str**| | [optional]\n **auto_link** | **str**| | [optional]\n **terms** | **str**| | [optional]\n **invitation_key** | **str**| | [optional]\n **mfa_kind** | **str**| | [optional]\n **mfa_attempt** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_applications**\n> [ApplicationView] get_applications(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.application_view import ApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_applications(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_applications: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**[ApplicationView]**](ApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_env**\n> str get_env(app_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n app_id = \"appId_example\" # str | \n token = \"token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_env(app_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_env: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_env(app_id, token=token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_env: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **token** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n**0** | getEnv | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_git_info**\n> str get_git_info(user_id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n user_id = \"userId_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_git_info(user_id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_git_info: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **user_id** | **str**| |\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github**\n> OAuthTransactionView get_github()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.o_auth_transaction_view import OAuthTransactionView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_github()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**OAuthTransactionView**](OAuthTransactionView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_applications**\n> [OAuthApplicationView] get_github_applications()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.o_auth_application_view import OAuthApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_github_applications()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_applications: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[OAuthApplicationView]**](OAuthApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_callback**\n> get_github_callback()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n cc_o_auth_data = \"CcOAuthData_example\" # str | (optional)\n code = \"code_example\" # str | (optional)\n state = \"state_example\" # str | (optional)\n error = \"error_example\" # str | (optional)\n error_description = \"error_description_example\" # str | (optional)\n error_uri = \"error_uri_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_github_callback(cc_o_auth_data=cc_o_auth_data, code=code, state=state, error=error, error_description=error_description, error_uri=error_uri)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_callback: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **cc_o_auth_data** | **str**| | [optional]\n **code** | **str**| | [optional]\n **state** | **str**| | [optional]\n **error** | **str**| | [optional]\n **error_description** | **str**| | [optional]\n **error_uri** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_emails**\n> [str] get_github_emails()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_github_emails()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_emails: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**[str]**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_keys**\n> [SshKeyView] get_github_keys()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.ssh_key_view import SshKeyView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_github_keys()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_keys: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n[**[SshKeyView]**](SshKeyView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_link**\n> str get_github_link()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n transaction_id = \"transactionId_example\" # str | (optional)\n redirect_url = \"redirectUrl_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_github_link(transaction_id=transaction_id, redirect_url=redirect_url)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_link: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **transaction_id** | **str**| | [optional]\n **redirect_url** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_login**\n> get_github_login()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n redirect_url = \"redirectUrl_example\" # str | (optional)\n from_authorize = \"fromAuthorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n invitation_key = \"invitationKey_example\" # str | (optional)\n subscription_source = \"subscriptionSource_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_github_login(redirect_url=redirect_url, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token, invitation_key=invitation_key, subscription_source=subscription_source)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_login: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **redirect_url** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **invitation_key** | **str**| | [optional]\n **subscription_source** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_github_username**\n> str get_github_username()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_response = api_instance.get_github_username()\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_github_username: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_login_form**\n> str get_login_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n secondary_email_key = \"secondaryEmailKey_example\" # str | (optional)\n deletion_key = \"deletionKey_example\" # str | (optional)\n from_authorize = \"fromAuthorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_login_form(secondary_email_key=secondary_email_key, deletion_key=deletion_key, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_login_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **secondary_email_key** | **str**| | [optional]\n **deletion_key** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_login_form1**\n> str get_login_form1()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n secondary_email_key = \"secondaryEmailKey_example\" # str | (optional)\n deletion_key = \"deletionKey_example\" # str | (optional)\n from_authorize = \"fromAuthorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_login_form1(secondary_email_key=secondary_email_key, deletion_key=deletion_key, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_login_form1: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **secondary_email_key** | **str**| | [optional]\n **deletion_key** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_password_forgotten_form**\n> str get_password_forgotten_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_password_forgotten_form(clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_password_forgotten_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_signup_form**\n> str get_signup_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n invitation_key = \"invitationKey_example\" # str | (optional)\n url_next = \"url_next_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_signup_form(invitation_key=invitation_key, url_next=url_next, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_signup_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **invitation_key** | **str**| | [optional]\n **url_next** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_signup_form1**\n> str get_signup_form1()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n invitation_key = \"invitationKey_example\" # str | (optional)\n url_next = \"url_next_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.get_signup_form1(invitation_key=invitation_key, url_next=url_next, clever_flavor=clever_flavor)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_signup_form1: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **invitation_key** | **str**| | [optional]\n **url_next** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_user_by_id**\n> UserView get_user_by_id(id)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.user_view import UserView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n id = \"id_example\" # str | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.get_user_by_id(id)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->get_user_by_id: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **id** | **str**| |\n\n### Return type\n\n[**UserView**](UserView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **github_signup**\n> github_signup()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n redirect_url = \"redirectUrl_example\" # str | (optional)\n from_authorize = \"fromAuthorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n invitation_key = \"invitationKey_example\" # str | (optional)\n subscription_source = \"subscriptionSource_example\" # str | (optional)\n terms = \"terms_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.github_signup(redirect_url=redirect_url, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token, invitation_key=invitation_key, subscription_source=subscription_source, terms=terms)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->github_signup: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **redirect_url** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **invitation_key** | **str**| | [optional]\n **subscription_source** | **str**| | [optional]\n **terms** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **login**\n> str login()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n email = \"email_example\" # str | (optional)\n _pass = \"_pass_example\" # str | (optional)\n from_authorize = \"from_authorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.login(email=email, _pass=_pass, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->login: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **email** | **str**| | [optional]\n **_pass** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **login1**\n> str login1()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n email = \"email_example\" # str | (optional)\n _pass = \"_pass_example\" # str | (optional)\n from_authorize = \"from_authorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.login1(email=email, _pass=_pass, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->login1: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **email** | **str**| | [optional]\n **_pass** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **mfa_login**\n> [OAuthApplicationView] mfa_login()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.o_auth_application_view import OAuthApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n mfa_kind = \"mfa_kind_example\" # str | (optional)\n mfa_attempt = \"mfa_attempt_example\" # str | (optional)\n email = \"email_example\" # str | (optional)\n auth_id = \"auth_id_example\" # str | (optional)\n from_authorize = \"from_authorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.mfa_login(mfa_kind=mfa_kind, mfa_attempt=mfa_attempt, email=email, auth_id=auth_id, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->mfa_login: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **mfa_kind** | **str**| | [optional]\n **mfa_attempt** | **str**| | [optional]\n **email** | **str**| | [optional]\n **auth_id** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n\n### Return type\n\n[**[OAuthApplicationView]**](OAuthApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **mfa_login1**\n> [OAuthApplicationView] mfa_login1()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.o_auth_application_view import OAuthApplicationView\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n mfa_kind = \"mfa_kind_example\" # str | (optional)\n mfa_attempt = \"mfa_attempt_example\" # str | (optional)\n email = \"email_example\" # str | (optional)\n auth_id = \"auth_id_example\" # str | (optional)\n from_authorize = \"from_authorize_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.mfa_login1(mfa_kind=mfa_kind, mfa_attempt=mfa_attempt, email=email, auth_id=auth_id, from_authorize=from_authorize, cli_token=cli_token, clever_flavor=clever_flavor, oauth_token=oauth_token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->mfa_login1: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **mfa_kind** | **str**| | [optional]\n **mfa_attempt** | **str**| | [optional]\n **email** | **str**| | [optional]\n **auth_id** | **str**| | [optional]\n **from_authorize** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **clever_flavor** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n\n### Return type\n\n[**[OAuthApplicationView]**](OAuthApplicationView.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_github_redeploy**\n> Message post_github_redeploy(github_webhook_payload)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.github_webhook_payload import GithubWebhookPayload\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n github_webhook_payload = GithubWebhookPayload(\n ref=\"ref_example\",\n after=\"after_example\",\n repository=GithubWebhookRepository(\n id=\"id_example\",\n ),\n sender=GithubWebhookSender(\n id=\"id_example\",\n ),\n pusher=GithubWebhookPusher(\n email=\"email_example\",\n ),\n head_commit=GithubCommit(\n sha=\"sha_example\",\n message=\"message_example\",\n ),\n ) # GithubWebhookPayload | \n user_agent = \"User-Agent_example\" # str | (optional)\n x_github_event = \"X-Github-Event_example\" # str | (optional)\n x_hub_signature = \"X-Hub-Signature_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.post_github_redeploy(github_webhook_payload)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->post_github_redeploy: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.post_github_redeploy(github_webhook_payload, user_agent=user_agent, x_github_event=x_github_event, x_hub_signature=x_hub_signature)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->post_github_redeploy: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **github_webhook_payload** | [**GithubWebhookPayload**](GithubWebhookPayload.md)| |\n **user_agent** | **str**| | [optional]\n **x_github_event** | **str**| | [optional]\n **x_hub_signature** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **reset_password_forgotten**\n> str reset_password_forgotten(key)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n key = \"key_example\" # str | \n _pass = \"_pass_example\" # str | (optional)\n pass2 = \"pass2_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.reset_password_forgotten(key)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->reset_password_forgotten: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.reset_password_forgotten(key, _pass=_pass, pass2=pass2)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->reset_password_forgotten: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **key** | **str**| |\n **_pass** | **str**| | [optional]\n **pass2** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_env**\n> Message update_env(app_id, body)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n app_id = \"appId_example\" # str | \n body = \"body_example\" # str | \n token = \"token_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.update_env(app_id, body)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->update_env: %s\\n\" % e)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.update_env(app_id, body, token=token)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->update_env: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **app_id** | **str**| |\n **body** | **str**| |\n **token** | **str**| | [optional]\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **update_invoice**\n> update_invoice(bid, end_of_invoice_response)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import user_api\nfrom openapi_client.model.end_of_invoice_response import EndOfInvoiceResponse\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = user_api.UserApi(api_client)\n bid = \"bid_example\" # str | \n end_of_invoice_response = EndOfInvoiceResponse(\n id=\"id_example\",\n url=\"url_example\",\n pos_data=\"pos_data_example\",\n status=\"new\",\n btc_price=\"btc_price_example\",\n price=3.14,\n currency=\"currency_example\",\n invoice_time=1,\n current_time=1,\n expiration_time=1,\n error=EndOfInvoiceError(\n type=\"type_example\",\n message=\"message_example\",\n messages={\n \"key\": \"key_example\",\n },\n ),\n ) # EndOfInvoiceResponse | \n\n # example passing only required values which don't have defaults set\n try:\n api_instance.update_invoice(bid, end_of_invoice_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling UserApi->update_invoice: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **bid** | **str**| |\n **end_of_invoice_response** | [**EndOfInvoiceResponse**](EndOfInvoiceResponse.md)| |\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.560287356376648, "alphanum_fraction": 0.5611761212348938, "avg_line_length": 19.868623733520508, "blob_id": "2b5b0ef77c2c5053ff7c04c0ca8795a2a0f72734", "content_id": "bdc6a160dd5fd2e512e8a4097eeddf8eb28aee23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13502, "license_type": "no_license", "max_line_length": 79, "num_lines": 647, "path": "/test/test_self_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.self_api import SelfApi # noqa: E501\n\n\nclass TestSelfApi(unittest.TestCase):\n \"\"\"SelfApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = SelfApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_add_email_address(self):\n \"\"\"Test case for add_email_address\n\n \"\"\"\n pass\n\n def test_add_self_addon_tag_by_addon_id(self):\n \"\"\"Test case for add_self_addon_tag_by_addon_id\n\n \"\"\"\n pass\n\n def test_add_self_application(self):\n \"\"\"Test case for add_self_application\n\n \"\"\"\n pass\n\n def test_add_self_application_dependency_by_app_id(self):\n \"\"\"Test case for add_self_application_dependency_by_app_id\n\n \"\"\"\n pass\n\n def test_add_self_application_tag_by_app_id(self):\n \"\"\"Test case for add_self_application_tag_by_app_id\n\n \"\"\"\n pass\n\n def test_add_self_payment_method(self):\n \"\"\"Test case for add_self_payment_method\n\n \"\"\"\n pass\n\n def test_add_self_vhost_by_app_id(self):\n \"\"\"Test case for add_self_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_add_ssh_key(self):\n \"\"\"Test case for add_ssh_key\n\n \"\"\"\n pass\n\n def test_buy_self_drops(self):\n \"\"\"Test case for buy_self_drops\n\n \"\"\"\n pass\n\n def test_cancel_deploy(self):\n \"\"\"Test case for cancel_deploy\n\n \"\"\"\n pass\n\n def test_change_self_addon_plan_by_addon_id(self):\n \"\"\"Test case for change_self_addon_plan_by_addon_id\n\n \"\"\"\n pass\n\n def test_change_user_password(self):\n \"\"\"Test case for change_user_password\n\n \"\"\"\n pass\n\n def test_choose_self_payment_provider(self):\n \"\"\"Test case for choose_self_payment_provider\n\n \"\"\"\n pass\n\n def test_create_mfa(self):\n \"\"\"Test case for create_mfa\n\n \"\"\"\n pass\n\n def test_create_self_consumer(self):\n \"\"\"Test case for create_self_consumer\n\n \"\"\"\n pass\n\n def test_delete_mfa(self):\n \"\"\"Test case for delete_mfa\n\n \"\"\"\n pass\n\n def test_delete_self_addon_tag_by_addon_id(self):\n \"\"\"Test case for delete_self_addon_tag_by_addon_id\n\n \"\"\"\n pass\n\n def test_delete_self_application_by_app_id(self):\n \"\"\"Test case for delete_self_application_by_app_id\n\n \"\"\"\n pass\n\n def test_delete_self_application_dependency_by_app_id(self):\n \"\"\"Test case for delete_self_application_dependency_by_app_id\n\n \"\"\"\n pass\n\n def test_delete_self_application_tag_app_id(self):\n \"\"\"Test case for delete_self_application_tag_app_id\n\n \"\"\"\n pass\n\n def test_delete_self_card(self):\n \"\"\"Test case for delete_self_card\n\n \"\"\"\n pass\n\n def test_delete_self_consumer(self):\n \"\"\"Test case for delete_self_consumer\n\n \"\"\"\n pass\n\n def test_delete_self_purchase_order(self):\n \"\"\"Test case for delete_self_purchase_order\n\n \"\"\"\n pass\n\n def test_delete_self_recurrent_payment(self):\n \"\"\"Test case for delete_self_recurrent_payment\n\n \"\"\"\n pass\n\n def test_delete_user(self):\n \"\"\"Test case for delete_user\n\n \"\"\"\n pass\n\n def test_deprovision_self_addon_by_id(self):\n \"\"\"Test case for deprovision_self_addon_by_id\n\n \"\"\"\n pass\n\n def test_edit_self_application_by_app_id(self):\n \"\"\"Test case for edit_self_application_by_app_id\n\n \"\"\"\n pass\n\n def test_edit_self_application_env_by_app_id(self):\n \"\"\"Test case for edit_self_application_env_by_app_id\n\n \"\"\"\n pass\n\n def test_edit_self_application_env_by_app_id_and_env_name(self):\n \"\"\"Test case for edit_self_application_env_by_app_id_and_env_name\n\n \"\"\"\n pass\n\n def test_edit_user(self):\n \"\"\"Test case for edit_user\n\n \"\"\"\n pass\n\n def test_fav_mfa(self):\n \"\"\"Test case for fav_mfa\n\n \"\"\"\n pass\n\n def test_get_addon_sso_data(self):\n \"\"\"Test case for get_addon_sso_data\n\n \"\"\"\n pass\n\n def test_get_application_deployment(self):\n \"\"\"Test case for get_application_deployment\n\n \"\"\"\n pass\n\n def test_get_application_deployments(self):\n \"\"\"Test case for get_application_deployments\n\n \"\"\"\n pass\n\n def test_get_backup_codes(self):\n \"\"\"Test case for get_backup_codes\n\n \"\"\"\n pass\n\n def test_get_confirmation_email(self):\n \"\"\"Test case for get_confirmation_email\n\n \"\"\"\n pass\n\n def test_get_consumptions(self):\n \"\"\"Test case for get_consumptions\n\n \"\"\"\n pass\n\n def test_get_email_addresses(self):\n \"\"\"Test case for get_email_addresses\n\n \"\"\"\n pass\n\n def test_get_id(self):\n \"\"\"Test case for get_id\n\n \"\"\"\n pass\n\n def test_get_self_addon_by_id(self):\n \"\"\"Test case for get_self_addon_by_id\n\n \"\"\"\n pass\n\n def test_get_self_addon_env_by_addon_id(self):\n \"\"\"Test case for get_self_addon_env_by_addon_id\n\n \"\"\"\n pass\n\n def test_get_self_addon_tags_by_addon_id(self):\n \"\"\"Test case for get_self_addon_tags_by_addon_id\n\n \"\"\"\n pass\n\n def test_get_self_addons(self):\n \"\"\"Test case for get_self_addons\n\n \"\"\"\n pass\n\n def test_get_self_addons_linked_to_application_by_app_id(self):\n \"\"\"Test case for get_self_addons_linked_to_application_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_amount(self):\n \"\"\"Test case for get_self_amount\n\n \"\"\"\n pass\n\n def test_get_self_application_branches_by_app_id(self):\n \"\"\"Test case for get_self_application_branches_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_by_app_id(self):\n \"\"\"Test case for get_self_application_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_dependencies_by_app_id(self):\n \"\"\"Test case for get_self_application_dependencies_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_dependencies_env_by_app_id(self):\n \"\"\"Test case for get_self_application_dependencies_env_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_dependents(self):\n \"\"\"Test case for get_self_application_dependents\n\n \"\"\"\n pass\n\n def test_get_self_application_env_by_app_id(self):\n \"\"\"Test case for get_self_application_env_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_instance_by_app_and_instance_id(self):\n \"\"\"Test case for get_self_application_instance_by_app_and_instance_id\n\n \"\"\"\n pass\n\n def test_get_self_application_instances_by_app_id(self):\n \"\"\"Test case for get_self_application_instances_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_application_tags_by_app_id(self):\n \"\"\"Test case for get_self_application_tags_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_applications(self):\n \"\"\"Test case for get_self_applications\n\n \"\"\"\n pass\n\n def test_get_self_applications_linked_to_addon_by_addon_id(self):\n \"\"\"Test case for get_self_applications_linked_to_addon_by_addon_id\n\n \"\"\"\n pass\n\n def test_get_self_cli_tokens(self):\n \"\"\"Test case for get_self_cli_tokens\n\n \"\"\"\n pass\n\n def test_get_self_consumer(self):\n \"\"\"Test case for get_self_consumer\n\n \"\"\"\n pass\n\n def test_get_self_consumer_secret(self):\n \"\"\"Test case for get_self_consumer_secret\n\n \"\"\"\n pass\n\n def test_get_self_consumers(self):\n \"\"\"Test case for get_self_consumers\n\n \"\"\"\n pass\n\n def test_get_self_default_method(self):\n \"\"\"Test case for get_self_default_method\n\n \"\"\"\n pass\n\n def test_get_self_env_of_addons_linked_to_application_by_app_id(self):\n \"\"\"Test case for get_self_env_of_addons_linked_to_application_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_exposed_env_by_app_id(self):\n \"\"\"Test case for get_self_exposed_env_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_favourite_vhost_by_app_id(self):\n \"\"\"Test case for get_self_favourite_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_get_self_instances_for_all_apps(self):\n \"\"\"Test case for get_self_instances_for_all_apps\n\n \"\"\"\n pass\n\n def test_get_self_invoice_by_id(self):\n \"\"\"Test case for get_self_invoice_by_id\n\n \"\"\"\n pass\n\n def test_get_self_invoices(self):\n \"\"\"Test case for get_self_invoices\n\n \"\"\"\n pass\n\n def test_get_self_monthly_invoice(self):\n \"\"\"Test case for get_self_monthly_invoice\n\n \"\"\"\n pass\n\n def test_get_self_payment_info(self):\n \"\"\"Test case for get_self_payment_info\n\n \"\"\"\n pass\n\n def test_get_self_payment_methods(self):\n \"\"\"Test case for get_self_payment_methods\n\n \"\"\"\n pass\n\n def test_get_self_pdf_invoice_by_id(self):\n \"\"\"Test case for get_self_pdf_invoice_by_id\n\n \"\"\"\n pass\n\n def test_get_self_price_with_tax(self):\n \"\"\"Test case for get_self_price_with_tax\n\n \"\"\"\n pass\n\n def test_get_self_recurrent_payment(self):\n \"\"\"Test case for get_self_recurrent_payment\n\n \"\"\"\n pass\n\n def test_get_self_stripe_token(self):\n \"\"\"Test case for get_self_stripe_token\n\n \"\"\"\n pass\n\n def test_get_self_tokens(self):\n \"\"\"Test case for get_self_tokens\n\n \"\"\"\n pass\n\n def test_get_self_vhost_by_app_id(self):\n \"\"\"Test case for get_self_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_get_ssh_keys(self):\n \"\"\"Test case for get_ssh_keys\n\n \"\"\"\n pass\n\n def test_get_summary(self):\n \"\"\"Test case for get_summary\n\n \"\"\"\n pass\n\n def test_get_user(self):\n \"\"\"Test case for get_user\n\n \"\"\"\n pass\n\n def test_link_self_addon_to_application_by_app_id(self):\n \"\"\"Test case for link_self_addon_to_application_by_app_id\n\n \"\"\"\n pass\n\n def test_mark_self_favourite_vhost_by_app_id(self):\n \"\"\"Test case for mark_self_favourite_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_preorder_self_addon(self):\n \"\"\"Test case for preorder_self_addon\n\n \"\"\"\n pass\n\n def test_provision_self_addon(self):\n \"\"\"Test case for provision_self_addon\n\n \"\"\"\n pass\n\n def test_redeploy_self_application_by_app_id(self):\n \"\"\"Test case for redeploy_self_application_by_app_id\n\n \"\"\"\n pass\n\n def test_remove_email_address(self):\n \"\"\"Test case for remove_email_address\n\n \"\"\"\n pass\n\n def test_remove_self_application_env_by_app_id_and_env_name(self):\n \"\"\"Test case for remove_self_application_env_by_app_id_and_env_name\n\n \"\"\"\n pass\n\n def test_remove_self_vhost_by_app_id(self):\n \"\"\"Test case for remove_self_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_remove_ssh_key(self):\n \"\"\"Test case for remove_ssh_key\n\n \"\"\"\n pass\n\n def test_rename_addon(self):\n \"\"\"Test case for rename_addon\n\n \"\"\"\n pass\n\n def test_revoke_all_tokens(self):\n \"\"\"Test case for revoke_all_tokens\n\n \"\"\"\n pass\n\n def test_revoke_token(self):\n \"\"\"Test case for revoke_token\n\n \"\"\"\n pass\n\n def test_set_self_application_branch_by_app_id(self):\n \"\"\"Test case for set_self_application_branch_by_app_id\n\n \"\"\"\n pass\n\n def test_set_self_build_instance_flavor_by_app_id(self):\n \"\"\"Test case for set_self_build_instance_flavor_by_app_id\n\n \"\"\"\n pass\n\n def test_set_self_default_method(self):\n \"\"\"Test case for set_self_default_method\n\n \"\"\"\n pass\n\n def test_set_self_max_credits_per_month(self):\n \"\"\"Test case for set_self_max_credits_per_month\n\n \"\"\"\n pass\n\n def test_set_user_avatar_from_file(self):\n \"\"\"Test case for set_user_avatar_from_file\n\n \"\"\"\n pass\n\n def test_undeploy_self_application_by_app_id(self):\n \"\"\"Test case for undeploy_self_application_by_app_id\n\n \"\"\"\n pass\n\n def test_unlink_selfddon_from_application_by_app_and_addon_id(self):\n \"\"\"Test case for unlink_selfddon_from_application_by_app_and_addon_id\n\n \"\"\"\n pass\n\n def test_unmark_self_favourite_vhost_by_app_id(self):\n \"\"\"Test case for unmark_self_favourite_vhost_by_app_id\n\n \"\"\"\n pass\n\n def test_update_self_consumer(self):\n \"\"\"Test case for update_self_consumer\n\n \"\"\"\n pass\n\n def test_update_self_exposed_env_by_app_id(self):\n \"\"\"Test case for update_self_exposed_env_by_app_id\n\n \"\"\"\n pass\n\n def test_validate_email(self):\n \"\"\"Test case for validate_email\n\n \"\"\"\n pass\n\n def test_validate_mfa(self):\n \"\"\"Test case for validate_mfa\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7084189057350159, "alphanum_fraction": 0.7176591157913208, "avg_line_length": 24.63157844543457, "blob_id": "412a6330f392395992a47a726308a48090606efa", "content_id": "d7fd6fcf42b5227811325ca5b818c66fcffc09af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 89, "num_lines": 38, "path": "/test/test_organisation_member_view.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.organisation_member_user_view import OrganisationMemberUserView\nglobals()['OrganisationMemberUserView'] = OrganisationMemberUserView\nfrom openapi_client.model.organisation_member_view import OrganisationMemberView\n\n\nclass TestOrganisationMemberView(unittest.TestCase):\n \"\"\"OrganisationMemberView unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOrganisationMemberView(self):\n \"\"\"Test OrganisationMemberView\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = OrganisationMemberView() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7317073345184326, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 27.886363983154297, "blob_id": "b388b0a6f98f44998e8389494783f2b7db7f9839", "content_id": "3d0a354d07f6a51fd68b04a5cea4c7bc1bc26126", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1271, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/test/test_organisation_summary.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport openapi_client\nfrom openapi_client.model.addon_summary import AddonSummary\nfrom openapi_client.model.application_summary import ApplicationSummary\nfrom openapi_client.model.o_auth1_consumer_summary import OAuth1ConsumerSummary\nfrom openapi_client.model.provider_summary import ProviderSummary\nglobals()['AddonSummary'] = AddonSummary\nglobals()['ApplicationSummary'] = ApplicationSummary\nglobals()['OAuth1ConsumerSummary'] = OAuth1ConsumerSummary\nglobals()['ProviderSummary'] = ProviderSummary\nfrom openapi_client.model.organisation_summary import OrganisationSummary\n\n\nclass TestOrganisationSummary(unittest.TestCase):\n \"\"\"OrganisationSummary unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOrganisationSummary(self):\n \"\"\"Test OrganisationSummary\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = OrganisationSummary() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4149181842803955, "alphanum_fraction": 0.41605687141418457, "avg_line_length": 35.41918182373047, "blob_id": "d2e11d4103c272a9596e060633e0331091df4bb5", "content_id": "0d074a7879b98ef3437cd0f745300b202b1ea8a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154563, "license_type": "no_license", "max_line_length": 92, "num_lines": 4244, "path": "/openapi_client/api/user_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.application_view import ApplicationView\nfrom openapi_client.model.end_of_invoice_response import EndOfInvoiceResponse\nfrom openapi_client.model.github_webhook_payload import GithubWebhookPayload\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.o_auth_application_view import OAuthApplicationView\nfrom openapi_client.model.o_auth_transaction_view import OAuthTransactionView\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.ssh_key_view import SshKeyView\nfrom openapi_client.model.user_view import UserView\n\n\nclass UserApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __ask_for_password_reset_via_form(\n self,\n **kwargs\n ):\n \"\"\"ask_for_password_reset_via_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.ask_for_password_reset_via_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n login (str): [optional]\n drop_tokens (str): [optional]\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.ask_for_password_reset_via_form = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/password_forgotten',\n 'operation_id': 'ask_for_password_reset_via_form',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'login',\n 'drop_tokens',\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'login':\n (str,),\n 'drop_tokens':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'login': 'login',\n 'drop_tokens': 'drop_tokens',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'login': 'form',\n 'drop_tokens': 'form',\n 'clever_flavor': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__ask_for_password_reset_via_form\n )\n\n def __authorize_paypal_transaction(\n self,\n bid,\n payment_data,\n **kwargs\n ):\n \"\"\"authorize_paypal_transaction # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.authorize_paypal_transaction(bid, payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.authorize_paypal_transaction = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/invoice/external/paypal/{bid}',\n 'operation_id': 'authorize_paypal_transaction',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'payment_data',\n ],\n 'required': [\n 'bid',\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__authorize_paypal_transaction\n )\n\n def __cancel_paypal_transaction(\n self,\n bid,\n **kwargs\n ):\n \"\"\"cancel_paypal_transaction # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.cancel_paypal_transaction(bid, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n return self.call_with_http_info(**kwargs)\n\n self.cancel_paypal_transaction = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/invoice/external/paypal/{bid}',\n 'operation_id': 'cancel_paypal_transaction',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n ],\n 'required': [\n 'bid',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__cancel_paypal_transaction\n )\n\n def __confirm_password_reset_request(\n self,\n key,\n **kwargs\n ):\n \"\"\"confirm_password_reset_request # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.confirm_password_reset_request(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.confirm_password_reset_request = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/password_forgotten/{key}',\n 'operation_id': 'confirm_password_reset_request',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n 'clever_flavor',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'key': 'path',\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__confirm_password_reset_request\n )\n\n def __create_user_from_form(\n self,\n **kwargs\n ):\n \"\"\"create_user_from_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.create_user_from_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n invitation_key (str): [optional]\n addon_beta_invitation_key (str): [optional]\n email (str): [optional]\n _pass (str): [optional]\n url_next (str): [optional]\n terms (str): [optional]\n subscription_source (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.create_user_from_form = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/users',\n 'operation_id': 'create_user_from_form',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'invitation_key',\n 'addon_beta_invitation_key',\n 'email',\n '_pass',\n 'url_next',\n 'terms',\n 'subscription_source',\n 'clever_flavor',\n 'oauth_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'invitation_key':\n (str,),\n 'addon_beta_invitation_key':\n (str,),\n 'email':\n (str,),\n '_pass':\n (str,),\n 'url_next':\n (str,),\n 'terms':\n (str,),\n 'subscription_source':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n },\n 'attribute_map': {\n 'invitation_key': 'invitationKey',\n 'addon_beta_invitation_key': 'addonBetaInvitationKey',\n 'email': 'email',\n '_pass': 'pass',\n 'url_next': 'url_next',\n 'terms': 'terms',\n 'subscription_source': 'subscription_source',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n },\n 'location_map': {\n 'invitation_key': 'form',\n 'addon_beta_invitation_key': 'form',\n 'email': 'form',\n '_pass': 'form',\n 'url_next': 'form',\n 'terms': 'form',\n 'subscription_source': 'form',\n 'clever_flavor': 'form',\n 'oauth_token': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__create_user_from_form\n )\n\n def __delete_github_link(\n self,\n **kwargs\n ):\n \"\"\"delete_github_link # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.delete_github_link(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.delete_github_link = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/github/link',\n 'operation_id': 'delete_github_link',\n 'http_method': 'DELETE',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__delete_github_link\n )\n\n def __finsih_github_signup(\n self,\n **kwargs\n ):\n \"\"\"finsih_github_signup # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.finsih_github_signup(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n transaction_id (str): [optional]\n name (str): [optional]\n other_id (str): [optional]\n other_email (str): [optional]\n password (str): [optional]\n auto_link (str): [optional]\n terms (str): [optional]\n invitation_key (str): [optional]\n mfa_kind (str): [optional]\n mfa_attempt (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.finsih_github_signup = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/github/signup',\n 'operation_id': 'finsih_github_signup',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'transaction_id',\n 'name',\n 'other_id',\n 'other_email',\n 'password',\n 'auto_link',\n 'terms',\n 'invitation_key',\n 'mfa_kind',\n 'mfa_attempt',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'transaction_id':\n (str,),\n 'name':\n (str,),\n 'other_id':\n (str,),\n 'other_email':\n (str,),\n 'password':\n (str,),\n 'auto_link':\n (str,),\n 'terms':\n (str,),\n 'invitation_key':\n (str,),\n 'mfa_kind':\n (str,),\n 'mfa_attempt':\n (str,),\n },\n 'attribute_map': {\n 'transaction_id': 'transactionId',\n 'name': 'name',\n 'other_id': 'otherId',\n 'other_email': 'otherEmail',\n 'password': 'password',\n 'auto_link': 'autoLink',\n 'terms': 'terms',\n 'invitation_key': 'invitationKey',\n 'mfa_kind': 'mfa_kind',\n 'mfa_attempt': 'mfa_attempt',\n },\n 'location_map': {\n 'transaction_id': 'form',\n 'name': 'form',\n 'other_id': 'form',\n 'other_email': 'form',\n 'password': 'form',\n 'auto_link': 'form',\n 'terms': 'form',\n 'invitation_key': 'form',\n 'mfa_kind': 'form',\n 'mfa_attempt': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__finsih_github_signup\n )\n\n def __get_applications(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_applications # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_applications(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [ApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_applications = _Endpoint(\n settings={\n 'response_type': ([ApplicationView],),\n 'auth': [],\n 'endpoint_path': '/users/{id}/applications',\n 'operation_id': 'get_applications',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_applications\n )\n\n def __get_env(\n self,\n app_id,\n **kwargs\n ):\n \"\"\"get_env # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_env(app_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n\n Keyword Args:\n token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n return self.call_with_http_info(**kwargs)\n\n self.get_env = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/application/{appId}/environment',\n 'operation_id': 'get_env',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'token',\n ],\n 'required': [\n 'app_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'token': 'token',\n },\n 'location_map': {\n 'app_id': 'path',\n 'token': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_env\n )\n\n def __get_git_info(\n self,\n user_id,\n **kwargs\n ):\n \"\"\"get_git_info # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_git_info(user_id, async_req=True)\n >>> result = thread.get()\n\n Args:\n user_id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['user_id'] = \\\n user_id\n return self.call_with_http_info(**kwargs)\n\n self.get_git_info = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/users/{userId}/git-info',\n 'operation_id': 'get_git_info',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'user_id',\n ],\n 'required': [\n 'user_id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'user_id':\n (str,),\n },\n 'attribute_map': {\n 'user_id': 'userId',\n },\n 'location_map': {\n 'user_id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_git_info\n )\n\n def __get_github(\n self,\n **kwargs\n ):\n \"\"\"get_github # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n OAuthTransactionView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github = _Endpoint(\n settings={\n 'response_type': (OAuthTransactionView,),\n 'auth': [],\n 'endpoint_path': '/github',\n 'operation_id': 'get_github',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github\n )\n\n def __get_github_applications(\n self,\n **kwargs\n ):\n \"\"\"get_github_applications # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_applications(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OAuthApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_applications = _Endpoint(\n settings={\n 'response_type': ([OAuthApplicationView],),\n 'auth': [],\n 'endpoint_path': '/github/applications',\n 'operation_id': 'get_github_applications',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_applications\n )\n\n def __get_github_callback(\n self,\n **kwargs\n ):\n \"\"\"get_github_callback # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_callback(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n cc_o_auth_data (str): [optional]\n code (str): [optional]\n state (str): [optional]\n error (str): [optional]\n error_description (str): [optional]\n error_uri (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_callback = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/github/callback',\n 'operation_id': 'get_github_callback',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'cc_o_auth_data',\n 'code',\n 'state',\n 'error',\n 'error_description',\n 'error_uri',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'cc_o_auth_data':\n (str,),\n 'code':\n (str,),\n 'state':\n (str,),\n 'error':\n (str,),\n 'error_description':\n (str,),\n 'error_uri':\n (str,),\n },\n 'attribute_map': {\n 'cc_o_auth_data': 'CcOAuthData',\n 'code': 'code',\n 'state': 'state',\n 'error': 'error',\n 'error_description': 'error_description',\n 'error_uri': 'error_uri',\n },\n 'location_map': {\n 'cc_o_auth_data': 'cookie',\n 'code': 'query',\n 'state': 'query',\n 'error': 'query',\n 'error_description': 'query',\n 'error_uri': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_callback\n )\n\n def __get_github_emails(\n self,\n **kwargs\n ):\n \"\"\"get_github_emails # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_emails(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [str]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_emails = _Endpoint(\n settings={\n 'response_type': ([str],),\n 'auth': [],\n 'endpoint_path': '/github/emails',\n 'operation_id': 'get_github_emails',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_emails\n )\n\n def __get_github_keys(\n self,\n **kwargs\n ):\n \"\"\"get_github_keys # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_keys(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [SshKeyView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_keys = _Endpoint(\n settings={\n 'response_type': ([SshKeyView],),\n 'auth': [],\n 'endpoint_path': '/github/keys',\n 'operation_id': 'get_github_keys',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_keys\n )\n\n def __get_github_link(\n self,\n **kwargs\n ):\n \"\"\"get_github_link # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_link(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n transaction_id (str): [optional]\n redirect_url (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_link = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/github/link',\n 'operation_id': 'get_github_link',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'transaction_id',\n 'redirect_url',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'transaction_id':\n (str,),\n 'redirect_url':\n (str,),\n },\n 'attribute_map': {\n 'transaction_id': 'transactionId',\n 'redirect_url': 'redirectUrl',\n },\n 'location_map': {\n 'transaction_id': 'query',\n 'redirect_url': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_link\n )\n\n def __get_github_login(\n self,\n **kwargs\n ):\n \"\"\"get_github_login # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_login(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n redirect_url (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n invitation_key (str): [optional]\n subscription_source (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_login = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/github/login',\n 'operation_id': 'get_github_login',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'redirect_url',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n 'invitation_key',\n 'subscription_source',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'redirect_url':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n 'invitation_key':\n (str,),\n 'subscription_source':\n (str,),\n },\n 'attribute_map': {\n 'redirect_url': 'redirectUrl',\n 'from_authorize': 'fromAuthorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n 'invitation_key': 'invitationKey',\n 'subscription_source': 'subscriptionSource',\n },\n 'location_map': {\n 'redirect_url': 'query',\n 'from_authorize': 'query',\n 'cli_token': 'query',\n 'clever_flavor': 'query',\n 'oauth_token': 'query',\n 'invitation_key': 'query',\n 'subscription_source': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_login\n )\n\n def __get_github_username(\n self,\n **kwargs\n ):\n \"\"\"get_github_username # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_github_username(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_github_username = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/github/username',\n 'operation_id': 'get_github_username',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_github_username\n )\n\n def __get_login_form(\n self,\n **kwargs\n ):\n \"\"\"get_login_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_login_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n secondary_email_key (str): [optional]\n deletion_key (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_login_form = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/session/login',\n 'operation_id': 'get_login_form',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'secondary_email_key',\n 'deletion_key',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'secondary_email_key':\n (str,),\n 'deletion_key':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'secondary_email_key': 'secondaryEmailKey',\n 'deletion_key': 'deletionKey',\n 'from_authorize': 'fromAuthorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'secondary_email_key': 'query',\n 'deletion_key': 'query',\n 'from_authorize': 'query',\n 'cli_token': 'query',\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_login_form\n )\n\n def __get_login_form1(\n self,\n **kwargs\n ):\n \"\"\"get_login_form1 # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_login_form1(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n secondary_email_key (str): [optional]\n deletion_key (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_login_form1 = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/sessions/login',\n 'operation_id': 'get_login_form1',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'secondary_email_key',\n 'deletion_key',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'secondary_email_key':\n (str,),\n 'deletion_key':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'secondary_email_key': 'secondaryEmailKey',\n 'deletion_key': 'deletionKey',\n 'from_authorize': 'fromAuthorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'secondary_email_key': 'query',\n 'deletion_key': 'query',\n 'from_authorize': 'query',\n 'cli_token': 'query',\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_login_form1\n )\n\n def __get_password_forgotten_form(\n self,\n **kwargs\n ):\n \"\"\"get_password_forgotten_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_password_forgotten_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_password_forgotten_form = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/password_forgotten',\n 'operation_id': 'get_password_forgotten_form',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_password_forgotten_form\n )\n\n def __get_signup_form(\n self,\n **kwargs\n ):\n \"\"\"get_signup_form # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_signup_form(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n invitation_key (str): [optional]\n url_next (str): [optional]\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_signup_form = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/session/signup',\n 'operation_id': 'get_signup_form',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'invitation_key',\n 'url_next',\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'invitation_key':\n (str,),\n 'url_next':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'invitation_key': 'invitationKey',\n 'url_next': 'url_next',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'invitation_key': 'query',\n 'url_next': 'query',\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_signup_form\n )\n\n def __get_signup_form1(\n self,\n **kwargs\n ):\n \"\"\"get_signup_form1 # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_signup_form1(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n invitation_key (str): [optional]\n url_next (str): [optional]\n clever_flavor (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_signup_form1 = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/sessions/signup',\n 'operation_id': 'get_signup_form1',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'invitation_key',\n 'url_next',\n 'clever_flavor',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'invitation_key':\n (str,),\n 'url_next':\n (str,),\n 'clever_flavor':\n (str,),\n },\n 'attribute_map': {\n 'invitation_key': 'invitationKey',\n 'url_next': 'url_next',\n 'clever_flavor': 'clever_flavor',\n },\n 'location_map': {\n 'invitation_key': 'query',\n 'url_next': 'query',\n 'clever_flavor': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_signup_form1\n )\n\n def __get_user_by_id(\n self,\n id,\n **kwargs\n ):\n \"\"\"get_user_by_id # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_user_by_id(id, async_req=True)\n >>> result = thread.get()\n\n Args:\n id (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n UserView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['id'] = \\\n id\n return self.call_with_http_info(**kwargs)\n\n self.get_user_by_id = _Endpoint(\n settings={\n 'response_type': (UserView,),\n 'auth': [],\n 'endpoint_path': '/users/{id}',\n 'operation_id': 'get_user_by_id',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'id',\n ],\n 'required': [\n 'id',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'id':\n (str,),\n },\n 'attribute_map': {\n 'id': 'id',\n },\n 'location_map': {\n 'id': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_user_by_id\n )\n\n def __github_signup(\n self,\n **kwargs\n ):\n \"\"\"github_signup # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.github_signup(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n redirect_url (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n invitation_key (str): [optional]\n subscription_source (str): [optional]\n terms (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.github_signup = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/github/signup',\n 'operation_id': 'github_signup',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'redirect_url',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n 'invitation_key',\n 'subscription_source',\n 'terms',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'redirect_url':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n 'invitation_key':\n (str,),\n 'subscription_source':\n (str,),\n 'terms':\n (str,),\n },\n 'attribute_map': {\n 'redirect_url': 'redirectUrl',\n 'from_authorize': 'fromAuthorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n 'invitation_key': 'invitationKey',\n 'subscription_source': 'subscriptionSource',\n 'terms': 'terms',\n },\n 'location_map': {\n 'redirect_url': 'query',\n 'from_authorize': 'query',\n 'cli_token': 'query',\n 'clever_flavor': 'query',\n 'oauth_token': 'query',\n 'invitation_key': 'query',\n 'subscription_source': 'query',\n 'terms': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__github_signup\n )\n\n def __login(\n self,\n **kwargs\n ):\n \"\"\"login # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.login(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n email (str): [optional]\n _pass (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.login = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/session/login',\n 'operation_id': 'login',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'email',\n '_pass',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'email':\n (str,),\n '_pass':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n },\n 'attribute_map': {\n 'email': 'email',\n '_pass': 'pass',\n 'from_authorize': 'from_authorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n },\n 'location_map': {\n 'email': 'form',\n '_pass': 'form',\n 'from_authorize': 'form',\n 'cli_token': 'form',\n 'clever_flavor': 'form',\n 'oauth_token': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__login\n )\n\n def __login1(\n self,\n **kwargs\n ):\n \"\"\"login1 # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.login1(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n email (str): [optional]\n _pass (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.login1 = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/sessions/login',\n 'operation_id': 'login1',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'email',\n '_pass',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'email':\n (str,),\n '_pass':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n },\n 'attribute_map': {\n 'email': 'email',\n '_pass': 'pass',\n 'from_authorize': 'from_authorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n },\n 'location_map': {\n 'email': 'form',\n '_pass': 'form',\n 'from_authorize': 'form',\n 'cli_token': 'form',\n 'clever_flavor': 'form',\n 'oauth_token': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__login1\n )\n\n def __mfa_login(\n self,\n **kwargs\n ):\n \"\"\"mfa_login # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.mfa_login(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n mfa_kind (str): [optional]\n mfa_attempt (str): [optional]\n email (str): [optional]\n auth_id (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OAuthApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.mfa_login = _Endpoint(\n settings={\n 'response_type': ([OAuthApplicationView],),\n 'auth': [],\n 'endpoint_path': '/session/mfa_login',\n 'operation_id': 'mfa_login',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'mfa_kind',\n 'mfa_attempt',\n 'email',\n 'auth_id',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'mfa_kind':\n (str,),\n 'mfa_attempt':\n (str,),\n 'email':\n (str,),\n 'auth_id':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n },\n 'attribute_map': {\n 'mfa_kind': 'mfa_kind',\n 'mfa_attempt': 'mfa_attempt',\n 'email': 'email',\n 'auth_id': 'auth_id',\n 'from_authorize': 'from_authorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n },\n 'location_map': {\n 'mfa_kind': 'form',\n 'mfa_attempt': 'form',\n 'email': 'form',\n 'auth_id': 'form',\n 'from_authorize': 'form',\n 'cli_token': 'form',\n 'clever_flavor': 'form',\n 'oauth_token': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__mfa_login\n )\n\n def __mfa_login1(\n self,\n **kwargs\n ):\n \"\"\"mfa_login1 # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.mfa_login1(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n mfa_kind (str): [optional]\n mfa_attempt (str): [optional]\n email (str): [optional]\n auth_id (str): [optional]\n from_authorize (str): [optional]\n cli_token (str): [optional]\n clever_flavor (str): [optional]\n oauth_token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [OAuthApplicationView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.mfa_login1 = _Endpoint(\n settings={\n 'response_type': ([OAuthApplicationView],),\n 'auth': [],\n 'endpoint_path': '/sessions/mfa_login',\n 'operation_id': 'mfa_login1',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'mfa_kind',\n 'mfa_attempt',\n 'email',\n 'auth_id',\n 'from_authorize',\n 'cli_token',\n 'clever_flavor',\n 'oauth_token',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'mfa_kind':\n (str,),\n 'mfa_attempt':\n (str,),\n 'email':\n (str,),\n 'auth_id':\n (str,),\n 'from_authorize':\n (str,),\n 'cli_token':\n (str,),\n 'clever_flavor':\n (str,),\n 'oauth_token':\n (str,),\n },\n 'attribute_map': {\n 'mfa_kind': 'mfa_kind',\n 'mfa_attempt': 'mfa_attempt',\n 'email': 'email',\n 'auth_id': 'auth_id',\n 'from_authorize': 'from_authorize',\n 'cli_token': 'cli_token',\n 'clever_flavor': 'clever_flavor',\n 'oauth_token': 'oauth_token',\n },\n 'location_map': {\n 'mfa_kind': 'form',\n 'mfa_attempt': 'form',\n 'email': 'form',\n 'auth_id': 'form',\n 'from_authorize': 'form',\n 'cli_token': 'form',\n 'clever_flavor': 'form',\n 'oauth_token': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__mfa_login1\n )\n\n def __post_github_redeploy(\n self,\n github_webhook_payload,\n **kwargs\n ):\n \"\"\"post_github_redeploy # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.post_github_redeploy(github_webhook_payload, async_req=True)\n >>> result = thread.get()\n\n Args:\n github_webhook_payload (GithubWebhookPayload):\n\n Keyword Args:\n user_agent (str): [optional]\n x_github_event (str): [optional]\n x_hub_signature (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['github_webhook_payload'] = \\\n github_webhook_payload\n return self.call_with_http_info(**kwargs)\n\n self.post_github_redeploy = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/github/redeploy',\n 'operation_id': 'post_github_redeploy',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'github_webhook_payload',\n 'user_agent',\n 'x_github_event',\n 'x_hub_signature',\n ],\n 'required': [\n 'github_webhook_payload',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'github_webhook_payload':\n (GithubWebhookPayload,),\n 'user_agent':\n (str,),\n 'x_github_event':\n (str,),\n 'x_hub_signature':\n (str,),\n },\n 'attribute_map': {\n 'user_agent': 'User-Agent',\n 'x_github_event': 'X-Github-Event',\n 'x_hub_signature': 'X-Hub-Signature',\n },\n 'location_map': {\n 'github_webhook_payload': 'body',\n 'user_agent': 'header',\n 'x_github_event': 'header',\n 'x_hub_signature': 'header',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__post_github_redeploy\n )\n\n def __reset_password_forgotten(\n self,\n key,\n **kwargs\n ):\n \"\"\"reset_password_forgotten # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.reset_password_forgotten(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n _pass (str): [optional]\n pass2 (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n str\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.reset_password_forgotten = _Endpoint(\n settings={\n 'response_type': (str,),\n 'auth': [],\n 'endpoint_path': '/password_forgotten/{key}',\n 'operation_id': 'reset_password_forgotten',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n '_pass',\n 'pass2',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n '_pass':\n (str,),\n 'pass2':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n '_pass': 'pass',\n 'pass2': 'pass2',\n },\n 'location_map': {\n 'key': 'path',\n '_pass': 'form',\n 'pass2': 'form',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'text/html'\n ],\n 'content_type': [\n 'application/x-www-form-urlencoded'\n ]\n },\n api_client=api_client,\n callable=__reset_password_forgotten\n )\n\n def __update_env(\n self,\n app_id,\n body,\n **kwargs\n ):\n \"\"\"update_env # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_env(app_id, body, async_req=True)\n >>> result = thread.get()\n\n Args:\n app_id (str):\n body (str):\n\n Keyword Args:\n token (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['app_id'] = \\\n app_id\n kwargs['body'] = \\\n body\n return self.call_with_http_info(**kwargs)\n\n self.update_env = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/application/{appId}/environment',\n 'operation_id': 'update_env',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'app_id',\n 'body',\n 'token',\n ],\n 'required': [\n 'app_id',\n 'body',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'app_id':\n (str,),\n 'body':\n (str,),\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'app_id': 'appId',\n 'token': 'token',\n },\n 'location_map': {\n 'app_id': 'path',\n 'body': 'body',\n 'token': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_env\n )\n\n def __update_invoice(\n self,\n bid,\n end_of_invoice_response,\n **kwargs\n ):\n \"\"\"update_invoice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_invoice(bid, end_of_invoice_response, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n end_of_invoice_response (EndOfInvoiceResponse):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n kwargs['end_of_invoice_response'] = \\\n end_of_invoice_response\n return self.call_with_http_info(**kwargs)\n\n self.update_invoice = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/invoice/external/{bid}',\n 'operation_id': 'update_invoice',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'end_of_invoice_response',\n ],\n 'required': [\n 'bid',\n 'end_of_invoice_response',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'end_of_invoice_response':\n (EndOfInvoiceResponse,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n 'end_of_invoice_response': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__update_invoice\n )\n" }, { "alpha_fraction": 0.6667568683624268, "alphanum_fraction": 0.6683028936386108, "avg_line_length": 36.38728332519531, "blob_id": "32825b567307d581ac58653e91ce2b28f7db1053", "content_id": "0abf6b15538a75dfad7a7bb042238e1cfcddff90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25873, "license_type": "no_license", "max_line_length": 712, "num_lines": 692, "path": "/docs/AuthApi.md", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "# openapi_client.AuthApi\n\nAll URIs are relative to *https://api.clever-cloud.com/v2*\n\nMethod | HTTP request | Description\n------------- | ------------- | -------------\n[**authorize_form**](AuthApi.md#authorize_form) | **GET** /oauth/authorize | \n[**authorize_token**](AuthApi.md#authorize_token) | **POST** /oauth/authorize | \n[**get_available_rights**](AuthApi.md#get_available_rights) | **GET** /oauth/rights | \n[**get_login_data**](AuthApi.md#get_login_data) | **GET** /oauth/login_data | \n[**post_access_token_request**](AuthApi.md#post_access_token_request) | **POST** /oauth/access_token | \n[**post_access_token_request_query**](AuthApi.md#post_access_token_request_query) | **POST** /oauth/access_token_query | \n[**post_authorize**](AuthApi.md#post_authorize) | **POST** /authorize | \n[**post_req_token_request**](AuthApi.md#post_req_token_request) | **POST** /oauth/request_token | \n[**post_req_token_request_query_string**](AuthApi.md#post_req_token_request_query_string) | **POST** /oauth/request_token_query | \n\n\n# **authorize_form**\n> str authorize_form()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n ccid = \"ccid_example\" # str | (optional)\n cctk = \"cctk_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n ccid2 = \"ccid_example\" # str | (optional)\n cli_token = \"cli_token_example\" # str | (optional)\n from_oauth = \"from_oauth_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.authorize_form(ccid=ccid, cctk=cctk, oauth_token=oauth_token, ccid2=ccid2, cli_token=cli_token, from_oauth=from_oauth)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->authorize_form: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **ccid** | **str**| | [optional]\n **cctk** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **ccid2** | **str**| | [optional]\n **cli_token** | **str**| | [optional]\n **from_oauth** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **authorize_token**\n> authorize_token()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n ccid = \"ccid_example\" # str | (optional)\n cctk = \"cctk_example\" # str | (optional)\n almighty = \"almighty_example\" # str | (optional)\n access_organisations = \"access_organisations_example\" # str | (optional)\n manage_organisations = \"manage_organisations_example\" # str | (optional)\n manage_organisations_services = \"manage_organisations_services_example\" # str | (optional)\n manage_organisations_applications = \"manage_organisations_applications_example\" # str | (optional)\n manage_organisations_members = \"manage_organisations_members_example\" # str | (optional)\n access_organisations_bills = \"access_organisations_bills_example\" # str | (optional)\n access_organisations_credit_count = \"access_organisations_credit_count_example\" # str | (optional)\n access_organisations_consumption_statistics = \"access_organisations_consumption_statistics_example\" # str | (optional)\n access_personal_information = \"access_personal_information_example\" # str | (optional)\n manage_personal_information = \"manage_personal_information_example\" # str | (optional)\n manage_ssh_keys = \"manage_ssh_keys_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.authorize_token(ccid=ccid, cctk=cctk, almighty=almighty, access_organisations=access_organisations, manage_organisations=manage_organisations, manage_organisations_services=manage_organisations_services, manage_organisations_applications=manage_organisations_applications, manage_organisations_members=manage_organisations_members, access_organisations_bills=access_organisations_bills, access_organisations_credit_count=access_organisations_credit_count, access_organisations_consumption_statistics=access_organisations_consumption_statistics, access_personal_information=access_personal_information, manage_personal_information=manage_personal_information, manage_ssh_keys=manage_ssh_keys)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->authorize_token: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **ccid** | **str**| | [optional]\n **cctk** | **str**| | [optional]\n **almighty** | **str**| | [optional]\n **access_organisations** | **str**| | [optional]\n **manage_organisations** | **str**| | [optional]\n **manage_organisations_services** | **str**| | [optional]\n **manage_organisations_applications** | **str**| | [optional]\n **manage_organisations_members** | **str**| | [optional]\n **access_organisations_bills** | **str**| | [optional]\n **access_organisations_credit_count** | **str**| | [optional]\n **access_organisations_consumption_statistics** | **str**| | [optional]\n **access_personal_information** | **str**| | [optional]\n **manage_personal_information** | **str**| | [optional]\n **manage_ssh_keys** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: text/html, application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_available_rights**\n> get_available_rights()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n\n # example, this endpoint has no required or optional parameters\n try:\n api_instance.get_available_rights()\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->get_available_rights: %s\\n\" % e)\n```\n\n\n### Parameters\nThis endpoint does not need any parameter.\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **get_login_data**\n> get_login_data()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n oauth_key = \"oauth_key_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.get_login_data(oauth_key=oauth_key)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->get_login_data: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **oauth_key** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_access_token_request**\n> post_access_token_request()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n oauth_consumer_key = \"oauth_consumer_key_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n oauth_signature_method = \"oauth_signature_method_example\" # str | (optional)\n oauth_signature = \"oauth_signature_example\" # str | (optional)\n oauth_timestamp = \"oauth_timestamp_example\" # str | (optional)\n oauth_nonce = \"oauth_nonce_example\" # str | (optional)\n oauth_version = \"oauth_version_example\" # str | (optional)\n oauth_verifier = \"oauth_verifier_example\" # str | (optional)\n oauth_callback = \"oauth_callback_example\" # str | (optional)\n oauth_token_secret = \"oauth_token_secret_example\" # str | (optional)\n oauth_callback_confirmed = \"oauth_callback_confirmed_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.post_access_token_request(oauth_consumer_key=oauth_consumer_key, oauth_token=oauth_token, oauth_signature_method=oauth_signature_method, oauth_signature=oauth_signature, oauth_timestamp=oauth_timestamp, oauth_nonce=oauth_nonce, oauth_version=oauth_version, oauth_verifier=oauth_verifier, oauth_callback=oauth_callback, oauth_token_secret=oauth_token_secret, oauth_callback_confirmed=oauth_callback_confirmed)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->post_access_token_request: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **oauth_consumer_key** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **oauth_signature_method** | **str**| | [optional]\n **oauth_signature** | **str**| | [optional]\n **oauth_timestamp** | **str**| | [optional]\n **oauth_nonce** | **str**| | [optional]\n **oauth_version** | **str**| | [optional]\n **oauth_verifier** | **str**| | [optional]\n **oauth_callback** | **str**| | [optional]\n **oauth_token_secret** | **str**| | [optional]\n **oauth_callback_confirmed** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: application/x-www-form-urlencoded\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_access_token_request_query**\n> post_access_token_request_query()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n oauth_consumer_key = \"oauth_consumer_key_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n oauth_signature_method = \"oauth_signature_method_example\" # str | (optional)\n oauth_signature = \"oauth_signature_example\" # str | (optional)\n oauth_timestamp = \"oauth_timestamp_example\" # str | (optional)\n oauth_nonce = \"oauth_nonce_example\" # str | (optional)\n oauth_version = \"oauth_version_example\" # str | (optional)\n oauth_verifier = \"oauth_verifier_example\" # str | (optional)\n oauth_callback = \"oauth_callback_example\" # str | (optional)\n oauth_token_secret = \"oauth_token_secret_example\" # str | (optional)\n oauth_callback_confirmed = \"oauth_callback_confirmed_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_instance.post_access_token_request_query(oauth_consumer_key=oauth_consumer_key, oauth_token=oauth_token, oauth_signature_method=oauth_signature_method, oauth_signature=oauth_signature, oauth_timestamp=oauth_timestamp, oauth_nonce=oauth_nonce, oauth_version=oauth_version, oauth_verifier=oauth_verifier, oauth_callback=oauth_callback, oauth_token_secret=oauth_token_secret, oauth_callback_confirmed=oauth_callback_confirmed)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->post_access_token_request_query: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **oauth_consumer_key** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **oauth_signature_method** | **str**| | [optional]\n **oauth_signature** | **str**| | [optional]\n **oauth_timestamp** | **str**| | [optional]\n **oauth_nonce** | **str**| | [optional]\n **oauth_version** | **str**| | [optional]\n **oauth_verifier** | **str**| | [optional]\n **oauth_callback** | **str**| | [optional]\n **oauth_token_secret** | **str**| | [optional]\n **oauth_callback_confirmed** | **str**| | [optional]\n\n### Return type\n\nvoid (empty response body)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/x-www-form-urlencoded\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**0** | default response | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_authorize**\n> Message post_authorize(wannabe_authorization)\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom openapi_client.model.wannabe_authorization import WannabeAuthorization\nfrom openapi_client.model.message import Message\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n wannabe_authorization = WannabeAuthorization(\n verb=\"verb_example\",\n uri=\"uri_example\",\n authorization=\"authorization_example\",\n payload=\"payload_example\",\n nonce=\"nonce_example\",\n mac=\"mac_example\",\n ) # WannabeAuthorization | \n\n # example passing only required values which don't have defaults set\n try:\n api_response = api_instance.post_authorize(wannabe_authorization)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->post_authorize: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **wannabe_authorization** | [**WannabeAuthorization**](WannabeAuthorization.md)| |\n\n### Return type\n\n[**Message**](Message.md)\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/json\n - **Accept**: application/json\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_req_token_request**\n> str post_req_token_request()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_consumer_key = \"oauth_consumer_key_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n oauth_signature_method = \"oauth_signature_method_example\" # str | (optional)\n oauth_signature = \"oauth_signature_example\" # str | (optional)\n oauth_timestamp = \"oauth_timestamp_example\" # str | (optional)\n oauth_nonce = \"oauth_nonce_example\" # str | (optional)\n oauth_version = \"oauth_version_example\" # str | (optional)\n oauth_verifier = \"oauth_verifier_example\" # str | (optional)\n oauth_callback = \"oauth_callback_example\" # str | (optional)\n oauth_token_secret = \"oauth_token_secret_example\" # str | (optional)\n oauth_callback_confirmed = \"oauth_callback_confirmed_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.post_req_token_request(clever_flavor=clever_flavor, oauth_consumer_key=oauth_consumer_key, oauth_token=oauth_token, oauth_signature_method=oauth_signature_method, oauth_signature=oauth_signature, oauth_timestamp=oauth_timestamp, oauth_nonce=oauth_nonce, oauth_version=oauth_version, oauth_verifier=oauth_verifier, oauth_callback=oauth_callback, oauth_token_secret=oauth_token_secret, oauth_callback_confirmed=oauth_callback_confirmed)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->post_req_token_request: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **clever_flavor** | **str**| | [optional]\n **oauth_consumer_key** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **oauth_signature_method** | **str**| | [optional]\n **oauth_signature** | **str**| | [optional]\n **oauth_timestamp** | **str**| | [optional]\n **oauth_nonce** | **str**| | [optional]\n **oauth_version** | **str**| | [optional]\n **oauth_verifier** | **str**| | [optional]\n **oauth_callback** | **str**| | [optional]\n **oauth_token_secret** | **str**| | [optional]\n **oauth_callback_confirmed** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: application/x-www-form-urlencoded\n - **Accept**: application/x-www-form-urlencoded\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n# **post_req_token_request_query_string**\n> str post_req_token_request_query_string()\n\n\n\n### Example\n\n```python\nimport time\nimport openapi_client\nfrom openapi_client.api import auth_api\nfrom pprint import pprint\n# Defining the host is optional and defaults to https://api.clever-cloud.com/v2\n# See configuration.py for a list of all supported configuration parameters.\nconfiguration = openapi_client.Configuration(\n host = \"https://api.clever-cloud.com/v2\"\n)\n\n\n# Enter a context with an instance of the API client\nwith openapi_client.ApiClient() as api_client:\n # Create an instance of the API class\n api_instance = auth_api.AuthApi(api_client)\n clever_flavor = \"clever_flavor_example\" # str | (optional)\n oauth_consumer_key = \"oauth_consumer_key_example\" # str | (optional)\n oauth_token = \"oauth_token_example\" # str | (optional)\n oauth_signature_method = \"oauth_signature_method_example\" # str | (optional)\n oauth_signature = \"oauth_signature_example\" # str | (optional)\n oauth_timestamp = \"oauth_timestamp_example\" # str | (optional)\n oauth_nonce = \"oauth_nonce_example\" # str | (optional)\n oauth_version = \"oauth_version_example\" # str | (optional)\n oauth_verifier = \"oauth_verifier_example\" # str | (optional)\n oauth_callback = \"oauth_callback_example\" # str | (optional)\n oauth_token_secret = \"oauth_token_secret_example\" # str | (optional)\n oauth_callback_confirmed = \"oauth_callback_confirmed_example\" # str | (optional)\n\n # example passing only required values which don't have defaults set\n # and optional values\n try:\n api_response = api_instance.post_req_token_request_query_string(clever_flavor=clever_flavor, oauth_consumer_key=oauth_consumer_key, oauth_token=oauth_token, oauth_signature_method=oauth_signature_method, oauth_signature=oauth_signature, oauth_timestamp=oauth_timestamp, oauth_nonce=oauth_nonce, oauth_version=oauth_version, oauth_verifier=oauth_verifier, oauth_callback=oauth_callback, oauth_token_secret=oauth_token_secret, oauth_callback_confirmed=oauth_callback_confirmed)\n pprint(api_response)\n except openapi_client.ApiException as e:\n print(\"Exception when calling AuthApi->post_req_token_request_query_string: %s\\n\" % e)\n```\n\n\n### Parameters\n\nName | Type | Description | Notes\n------------- | ------------- | ------------- | -------------\n **clever_flavor** | **str**| | [optional]\n **oauth_consumer_key** | **str**| | [optional]\n **oauth_token** | **str**| | [optional]\n **oauth_signature_method** | **str**| | [optional]\n **oauth_signature** | **str**| | [optional]\n **oauth_timestamp** | **str**| | [optional]\n **oauth_nonce** | **str**| | [optional]\n **oauth_version** | **str**| | [optional]\n **oauth_verifier** | **str**| | [optional]\n **oauth_callback** | **str**| | [optional]\n **oauth_token_secret** | **str**| | [optional]\n **oauth_callback_confirmed** | **str**| | [optional]\n\n### Return type\n\n**str**\n\n### Authorization\n\nNo authorization required\n\n### HTTP request headers\n\n - **Content-Type**: Not defined\n - **Accept**: application/x-www-form-urlencoded\n\n\n### HTTP response details\n| Status code | Description | Response headers |\n|-------------|-------------|------------------|\n**200** | | - |\n\n[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)\n\n" }, { "alpha_fraction": 0.5684887170791626, "alphanum_fraction": 0.5762057900428772, "avg_line_length": 17.734939575195312, "blob_id": "ebef1c06b7c95c7385ef029ed1af837c1d34a9a7", "content_id": "c952d4a0c6bb8b831e6cf46d32c3356b71b62a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "no_license", "max_line_length": 72, "num_lines": 83, "path": "/test/test_payment_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport unittest\n\nimport openapi_client\nfrom openapi_client.api.payment_api import PaymentApi # noqa: E501\n\n\nclass TestPaymentApi(unittest.TestCase):\n \"\"\"PaymentApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = PaymentApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_check_vat(self):\n \"\"\"Test case for check_vat\n\n \"\"\"\n pass\n\n def test_end_payment_with_stripe(self):\n \"\"\"Test case for end_payment_with_stripe\n\n \"\"\"\n pass\n\n def test_get_available_payment_providers(self):\n \"\"\"Test case for get_available_payment_providers\n\n \"\"\"\n pass\n\n def test_get_coupon(self):\n \"\"\"Test case for get_coupon\n\n \"\"\"\n pass\n\n def test_get_invoice_status_button(self):\n \"\"\"Test case for get_invoice_status_button\n\n \"\"\"\n pass\n\n def test_get_stripe_token(self):\n \"\"\"Test case for get_stripe_token\n\n \"\"\"\n pass\n\n def test_stripe_sepa_webhook(self):\n \"\"\"Test case for stripe_sepa_webhook\n\n \"\"\"\n pass\n\n def test_update_stripe_payment(self):\n \"\"\"Test case for update_stripe_payment\n\n \"\"\"\n pass\n\n def test_validate(self):\n \"\"\"Test case for validate\n\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.43330007791519165, "alphanum_fraction": 0.434604674577713, "avg_line_length": 34.70137023925781, "blob_id": "d2aa6232f2caf1ebc0c81af2e13b22eb1210e3a4", "content_id": "285c8fd5e002a16e294e3dfb8580fe198c2e5a8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39093, "license_type": "no_license", "max_line_length": 97, "num_lines": 1095, "path": "/openapi_client/api/payment_api.py", "repo_name": "krezreb/openapi-client-clevercloud", "src_encoding": "UTF-8", "text": "\"\"\"\n Clever-Cloud API\n\n Public API for managing Clever-Cloud data and products # noqa: E501\n\n The version of the OpenAPI document: 1.0.1\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom openapi_client.api_client import ApiClient, Endpoint as _Endpoint\nfrom openapi_client.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types\n)\nfrom openapi_client.model.braintree_token import BraintreeToken\nfrom openapi_client.model.coupon_view import CouponView\nfrom openapi_client.model.invoice_rendering import InvoiceRendering\nfrom openapi_client.model.message import Message\nfrom openapi_client.model.payment_data import PaymentData\nfrom openapi_client.model.payment_provider_view import PaymentProviderView\nfrom openapi_client.model.setup_intent_view import SetupIntentView\nfrom openapi_client.model.stripe_confirmation_error_message import StripeConfirmationErrorMessage\nfrom openapi_client.model.vat_result import VatResult\n\n\nclass PaymentApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def __check_vat(\n self,\n **kwargs\n ):\n \"\"\"check_vat # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.check_vat(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n country (str): [optional]\n vat (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n VatResult\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.check_vat = _Endpoint(\n settings={\n 'response_type': (VatResult,),\n 'auth': [],\n 'endpoint_path': '/vat_check',\n 'operation_id': 'check_vat',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'country',\n 'vat',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'country':\n (str,),\n 'vat':\n (str,),\n },\n 'attribute_map': {\n 'country': 'country',\n 'vat': 'vat',\n },\n 'location_map': {\n 'country': 'query',\n 'vat': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__check_vat\n )\n\n def __end_payment_with_stripe(\n self,\n bid,\n payment_data,\n **kwargs\n ):\n \"\"\"end_payment_with_stripe # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.end_payment_with_stripe(bid, payment_data, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n payment_data (PaymentData):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n kwargs['payment_data'] = \\\n payment_data\n return self.call_with_http_info(**kwargs)\n\n self.end_payment_with_stripe = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/payments/{bid}/end/stripe',\n 'operation_id': 'end_payment_with_stripe',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'payment_data',\n ],\n 'required': [\n 'bid',\n 'payment_data',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'payment_data':\n (PaymentData,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n 'payment_data': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__end_payment_with_stripe\n )\n\n def __get_available_payment_providers(\n self,\n **kwargs\n ):\n \"\"\"get_available_payment_providers # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_available_payment_providers(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n [PaymentProviderView]\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_available_payment_providers = _Endpoint(\n settings={\n 'response_type': ([PaymentProviderView],),\n 'auth': [],\n 'endpoint_path': '/payments/providers',\n 'operation_id': 'get_available_payment_providers',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_available_payment_providers\n )\n\n def __get_coupon(\n self,\n name,\n **kwargs\n ):\n \"\"\"get_coupon # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_coupon(name, async_req=True)\n >>> result = thread.get()\n\n Args:\n name (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n CouponView\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['name'] = \\\n name\n return self.call_with_http_info(**kwargs)\n\n self.get_coupon = _Endpoint(\n settings={\n 'response_type': (CouponView,),\n 'auth': [],\n 'endpoint_path': '/payments/coupons/{name}',\n 'operation_id': 'get_coupon',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'name',\n ],\n 'required': [\n 'name',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'name':\n (str,),\n },\n 'attribute_map': {\n 'name': 'name',\n },\n 'location_map': {\n 'name': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_coupon\n )\n\n def __get_invoice_status_button(\n self,\n token,\n **kwargs\n ):\n \"\"\"get_invoice_status_button # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_invoice_status_button(token, async_req=True)\n >>> result = thread.get()\n\n Args:\n token (str):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['token'] = \\\n token\n return self.call_with_http_info(**kwargs)\n\n self.get_invoice_status_button = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/payments/assets/pay_button/{token}/button.png',\n 'operation_id': 'get_invoice_status_button',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'token',\n ],\n 'required': [\n 'token',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'token':\n (str,),\n },\n 'attribute_map': {\n 'token': 'token',\n },\n 'location_map': {\n 'token': 'path',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'image/png'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_invoice_status_button\n )\n\n def __get_stripe_token(\n self,\n **kwargs\n ):\n \"\"\"get_stripe_token # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.get_stripe_token(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n BraintreeToken\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.get_stripe_token = _Endpoint(\n settings={\n 'response_type': (BraintreeToken,),\n 'auth': [],\n 'endpoint_path': '/payments/tokens/stripe',\n 'operation_id': 'get_stripe_token',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n },\n 'attribute_map': {\n },\n 'location_map': {\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__get_stripe_token\n )\n\n def __stripe_sepa_webhook(\n self,\n **kwargs\n ):\n \"\"\"stripe_sepa_webhook # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.stripe_sepa_webhook(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n stripe_signature (str): [optional]\n body (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n None\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n return self.call_with_http_info(**kwargs)\n\n self.stripe_sepa_webhook = _Endpoint(\n settings={\n 'response_type': None,\n 'auth': [],\n 'endpoint_path': '/payments/webhooks/stripe/sepa',\n 'operation_id': 'stripe_sepa_webhook',\n 'http_method': 'POST',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'stripe_signature',\n 'body',\n ],\n 'required': [],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'stripe_signature':\n (str,),\n 'body':\n (str,),\n },\n 'attribute_map': {\n 'stripe_signature': 'Stripe-Signature',\n },\n 'location_map': {\n 'stripe_signature': 'header',\n 'body': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__stripe_sepa_webhook\n )\n\n def __update_stripe_payment(\n self,\n bid,\n setup_intent_view,\n **kwargs\n ):\n \"\"\"update_stripe_payment # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.update_stripe_payment(bid, setup_intent_view, async_req=True)\n >>> result = thread.get()\n\n Args:\n bid (str):\n setup_intent_view (SetupIntentView):\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n InvoiceRendering\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['bid'] = \\\n bid\n kwargs['setup_intent_view'] = \\\n setup_intent_view\n return self.call_with_http_info(**kwargs)\n\n self.update_stripe_payment = _Endpoint(\n settings={\n 'response_type': (InvoiceRendering,),\n 'auth': [],\n 'endpoint_path': '/payments/{bid}/end/stripe',\n 'operation_id': 'update_stripe_payment',\n 'http_method': 'PUT',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'bid',\n 'setup_intent_view',\n ],\n 'required': [\n 'bid',\n 'setup_intent_view',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'bid':\n (str,),\n 'setup_intent_view':\n (SetupIntentView,),\n },\n 'attribute_map': {\n 'bid': 'bid',\n },\n 'location_map': {\n 'bid': 'path',\n 'setup_intent_view': 'body',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [\n 'application/json'\n ]\n },\n api_client=api_client,\n callable=__update_stripe_payment\n )\n\n def __validate(\n self,\n key,\n **kwargs\n ):\n \"\"\"validate # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.validate(key, async_req=True)\n >>> result = thread.get()\n\n Args:\n key (str):\n\n Keyword Args:\n action (str): [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n Message\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs['async_req'] = kwargs.get(\n 'async_req', False\n )\n kwargs['_return_http_data_only'] = kwargs.get(\n '_return_http_data_only', True\n )\n kwargs['_preload_content'] = kwargs.get(\n '_preload_content', True\n )\n kwargs['_request_timeout'] = kwargs.get(\n '_request_timeout', None\n )\n kwargs['_check_input_type'] = kwargs.get(\n '_check_input_type', True\n )\n kwargs['_check_return_type'] = kwargs.get(\n '_check_return_type', True\n )\n kwargs['_host_index'] = kwargs.get('_host_index')\n kwargs['key'] = \\\n key\n return self.call_with_http_info(**kwargs)\n\n self.validate = _Endpoint(\n settings={\n 'response_type': (Message,),\n 'auth': [],\n 'endpoint_path': '/validation/vat/{key}',\n 'operation_id': 'validate',\n 'http_method': 'GET',\n 'servers': None,\n },\n params_map={\n 'all': [\n 'key',\n 'action',\n ],\n 'required': [\n 'key',\n ],\n 'nullable': [\n ],\n 'enum': [\n ],\n 'validation': [\n ]\n },\n root_map={\n 'validations': {\n },\n 'allowed_values': {\n },\n 'openapi_types': {\n 'key':\n (str,),\n 'action':\n (str,),\n },\n 'attribute_map': {\n 'key': 'key',\n 'action': 'action',\n },\n 'location_map': {\n 'key': 'path',\n 'action': 'query',\n },\n 'collection_format_map': {\n }\n },\n headers_map={\n 'accept': [\n 'application/json'\n ],\n 'content_type': [],\n },\n api_client=api_client,\n callable=__validate\n )\n" } ]
42
lopamd/FAQ-semantic-matching
https://github.com/lopamd/FAQ-semantic-matching
5f9cfcacffea5e4378880cffda9dd6e0494013c7
31fe5e550077e8b92c65a51b4ecd1c6492961e34
99057e4416088e9ba8a342dd1c9d772b2d27facb
refs/heads/master
2020-08-08T11:43:19.700111
2019-10-09T04:54:12
2019-10-09T05:23:34
213,824,202
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6381818056106567, "avg_line_length": 28.756755828857422, "blob_id": "c905ff0253c0b258d22ee1170062fa31bd21cf8e", "content_id": "f00e6148d431bc942c3d59dcba025a189efe850a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 99, "num_lines": 37, "path": "/faq_config.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import csv\nimport base_objects\nimport nlp_config\nimport sys\n\nclass FAQReader( object ):\n \"\"\"Abstract class. Please implement fetch to return a list of QAPairs.\"\"\"\n def fetch( self ):\n raise NotImplementedError(\"Class %s doesn't implement fetch()\" % (self.__class__.__name__))\n \nclass CSVFAQReader( FAQReader ):\n def __init__(self, csvfilename):\n self.csvfilename = csvfilename\n\n def fetch( self ):\n faqs = []\n encoding = 'mac_roman' if sys.platform == 'darwin' else None\n with open(self.csvfilename, encoding=encoding) as csvfile:\n areader = csv.reader(csvfile)\n for row in areader:\n faqs.append(base_objects.QAPair(row[0].strip(), row[1].strip()))\n return faqs\n \n \ndef getFAQs():\n faqreader = CSVFAQReader(nlp_config.faq_input_file)\n return faqreader.fetch()\n\ndef getEvaluationQns():\n faqreader = CSVFAQReader(nlp_config.evaluation_input_file)\n return faqreader.fetch()\n \n#Usage:\n#import faq_config\n#for y in faq_config.getFAQs():\n# print(y.question)\n# print(y.answer)" }, { "alpha_fraction": 0.6862623691558838, "alphanum_fraction": 0.6887376308441162, "avg_line_length": 31.020408630371094, "blob_id": "3f795ce05c113f981387cfb6ef48762c9951672a", "content_id": "18a2bc4ba5761e0ccae9c98db619161a817becdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1616, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/lesk.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk\r\nfrom nltk.corpus import wordnet as wn\r\nnltk.download('wordnet')\r\nnltk.download('stopwords')\r\n\r\nstops = set(nltk.corpus.stopwords.words('english'))\r\nlemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\r\n\r\ndef remove_lemma_and_stops(lemmas, lemma):\r\n return {x for x in lemmas if x != lemma and x not in stops and x is not None}\r\n\r\ndef compute_overlap(synset, lemma_set, lemma):\r\n def get_lemma_set(text):\r\n #tokenize, lemmatize, and remove stops\r\n ret = [lemmatizer.lemmatize(x) for x in nltk.word_tokenize(text)]\r\n return remove_lemma_and_stops(ret, lemma)\r\n \r\n def get_lemma_set_from_synset():\r\n ret = get_lemma_set(synset.definition())\r\n for example in synset.examples():\r\n ret |= get_lemma_set(example)\r\n return ret\r\n \r\n synsets_lemmas = get_lemma_set_from_synset()\r\n return len(synsets_lemmas & lemma_set)\r\n \r\n#get the synset that most closely matches a lemma\r\n#need to modify to allow restriction to a specific part of speech\r\ndef get_lemma_synset(lemma, lemma_neighbors):\r\n lemma_set = remove_lemma_and_stops(lemma_neighbors, lemma)\r\n \r\n synsets = wn.synsets(lemma)\r\n \r\n if not synsets:\r\n return None\r\n \r\n best_option = (synsets[0], 0) #best synset, best overlap\r\n \r\n for candidate_syn in synsets:\r\n overlap_value = compute_overlap(candidate_syn, lemma_set, lemma)\r\n \r\n if overlap_value > best_option[1]:\r\n best_option = (candidate_syn, overlap_value)\r\n \r\n return best_option[0]\r\n \r\n#takes a textfeatureextraction object\r\ndef get_synsets_from_features(tfe):\r\n return [get_lemma_synset(lemma, tfe.lemmas) for lemma in tfe.lemmas]" }, { "alpha_fraction": 0.5862192511558533, "alphanum_fraction": 0.6531543731689453, "avg_line_length": 40.23684310913086, "blob_id": "f64398f65bd3b78d9b3f7aff03fba606c4e9ebac", "content_id": "1ae536c41485876d926d1800e4d201c14e44d817", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11175, "license_type": "no_license", "max_line_length": 436, "num_lines": 266, "path": "/part4tester.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import better_objects as b\r\nimport faq_config\r\nimport random\r\nimport sys\r\nimport lesk\r\nimport nlp_config\r\nimport base_objects\nfrom nltk.parse.stanford import StanfordDependencyParser\r\nfrom nltk.corpus import wordnet_ic\r\nimport numpy as np\r\n\r\nbrown_ic = wordnet_ic.ic('ic-brown.dat')\r\n\r\n'''test_questions = [\r\n \"When is hummingbird season?\",\r\n \"Where do hummingbirds go in the winter?\",\r\n \"Where do hummingbirds live?\",\r\n \"What is the reproduction process like for hummingbirds?\",\r\n \"How fast does a hummingbird fly?\",\r\n \"Do hummingbirds damage flowers?\",\r\n \"Do hummingbirds reuse their nest?\",\r\n \"How much nectar does a hummingbird consume in a day?\",\r\n \"Do hummingbirds eat termites?\",\r\n \"What is a hummingbird's lifecycle?\"]\r\n\"What do hummingbirds eat?\",\r\n \"When do hummingbirds nest?\",\r\n \"Does a hummingbird find flowers by smell?\",\r\n \"How fast do hummingbird wings beat? How do they move?\",\r\n \"How fast do hummingbird hearts beat?\",\r\n \"How long does a hummingbird live?\",\r\n \"How many species are there?\",\r\n \"What makes a hummingbird's throat bright and colorful?\",\r\n \"Can hummingbirds walk?\",\r\n \"What is the smallest hummingbird?\",\r\n \"How many feathers do hummingbirds have?\",\r\n \"What part of a hummingbird weighs the most?\",\r\n \"How big are a hummingbird's eggs?\",\r\n \"How many breaths does a hummingbird take per minute?\",\r\n \"How far can a hummingbird fly during migration?\",\r\n \"Do hummingbirds sit on the backs of other birds during migration?\",\r\n \"Can hummingbirds smell?\",\r\n \"How do humminbirds eat nectar?\",\r\n \"How fast can a hummingbird lick?\",\r\n \"How quickly do humminbirds digest nectar?\",\r\n \"Is there cross-breeding between species?\",\r\n \"Are hummingbirds aggressive?\",\r\n \"What is the longest bill a hummingbird can have?\",\r\n \"Are hummingbirds found in the Eastern Hemisphere?\",\r\n \"What threats are posed to hummingbirds today?\"]\r\n\r\nbest_answers = [ix + 1 for ix in range(len(test_questions))]'''\r\n\r\ntest_questions = [\r\n \"What is the length of time that hummingbirds will be alive?\",\r\n \"How many genera or sorts of hummingbirds can one find?\",\r\n \"Is the hue of the larynx influenced by external factors?\",\r\n \"What do they use their appendiges for and how do they move around?\",\r\n \"How little can hummingbirds get and what is the length?\",\r\n \"How much plumage do these birds have and is that amount high?\",\r\n \"What which muscle has the greatest percentage of weight?\",\r\n \"Is it true that a hummingbird lays the tiniest of ova?\",\r\n \"What is the rate of breath intake for hummingbirds?\",\r\n \"What is the farthest a hummingbird flies when it migrates?\"\r\n]\r\n\r\nbest_answers = [16 + ix for ix in range(10)]\r\n \r\n#final_weights = [0.952926181485045, 0.9840099977685615, 1.0525210561258025, 1.051562827464642, 0.9532682412234448, 0.9520219911127934, \r\n# 0.969117385075304, 0.9546066017400465, 1.013167700035129, 0.961371876331083, 0.9305470016082897, 0.9575960964407408, \r\n# 1.0226004255054897, 0.9374376883134267, 1.0016046379331374, 1.0733357426136956, 0.9578154508191105, 0.9684130290554245, \r\n# 0.9229061653172881]\r\nfinal_weights = [0.7490120039192232, 0.5473259606903345, 0.7201503022341168, 1.1039197157559848, 0.5381058876841468, 0.543439633895209, 0.6933861804185498, 0.32936031784864706, 0.5162382838863742, 0.619586214260726, 1.1842453368513972, 0.40318774451844286, 1.531198544790128, 0.44753838998751644, 0.5750546156748142, 1.529495292317564, 0.5975599793129092, 0.5244313444345968, 0.5864861033072415, 0.4288517647857363, 0.45599724465641506]\r\n\n#TODO: this should be in a central place\r\ndependency_parser = StanfordDependencyParser(path_to_jar=nlp_config.path_to_stanford_jar, path_to_models_jar=nlp_config.path_to_stanford_models_jar)\r\n\r\nclass Annealer(object):\r\n #all of the parameters are functions. e = energy, lower is better. p = probability.\r\n def __init__(self, neighbor, e, p, temperature):\r\n self.neighbor = neighbor\r\n self.e = e\r\n self.p = p\r\n self.temperature = temperature\r\n\r\nmax_temp = 100 \r\n\r\ndef neighbor(s, adj_min = 0.001, adj_max = 0.01):\r\n #random index, random minor adjustment\r\n idx = random.randrange(len(s.weights))\r\n adj = random.uniform(adj_min, adj_max)\r\n if random.randrange(2) == 0:\r\n adj *= -1\r\n new_weights = [w for w in s.weights]\r\n new_weights[idx] += adj\r\n return new_weights\r\n\r\n#larger e is bad\r\n#larger change in e is bad\r\n#smaller temperature is bad (in terms of accepting the change)\r\ndef probability(e, e_prime, temperature):\r\n if e_prime < e:\r\n return 1\r\n return (1 - (e_prime - e)) * temperature / max_temp\r\n \r\n#between 0 and max_temp\r\ndef temperature(time): #time is k / kmax\r\n return (1 - time) * max_temp\r\n\r\n#needs to be between 0 and 1\r\ndef energy(state, weights):\r\n all_scores = state.get_scores(weights)\r\n \r\n total = 0 \r\n for ix, q_score_set in enumerate(all_scores):\r\n total += q_score_set[state.best_choices[ix]]\r\n \r\n return 1 - (total / len(all_scores))\r\n \r\n #slot = 0\r\n #x = sorted([(val, key) for key, val in scores.items()], reverse=True)\r\n \r\n #for xx in x:\r\n # if xx[1] == state.best_answer:\r\n # break\r\n # slot += 1\r\n \r\n #super simple energy function. invert the score\r\n #this is probably too naive, but we will try it.\r\n #return (1 - scores[state.best_answer])# * slot / len(x)\r\n\r\ndef get_jcn_similarity(feat_a, feat_b, pos):\r\n jcn_feature = []\r\n for qsyn in feat_a.synsets:\r\n if qsyn.pos() == pos:\r\n for asyn in feat_b.synsets:\r\n if asyn.pos() == pos:\r\n similarity = qsyn.jcn_similarity(asyn, brown_ic)\r\n if similarity > 1: #identical words are 1e+300 and it's causing infiniti errors\r\n similarity = 1 #1 is the greatest a similarity can be because of the normalization\r\n jcn_feature.append(similarity)\r\n jcn_normalize = [1] * len(jcn_feature)\r\n \r\n jcn_result = 0\r\n if len(jcn_feature) > 0:\r\n jcn_result = np.linalg.norm(jcn_feature) / np.linalg.norm(jcn_normalize)\r\n \r\n return jcn_result\r\n \r\nclass State(object):\r\n def __init__(self, qs_features, as_features, weights, best_choices):\r\n self.weights = weights #never used\r\n self.best_choices = best_choices\r\n self.score_vectors = []\r\n \r\n self.faq_feat = as_features\n for qf in qs_features:\r\n list_of_score_vectors = []\r\n for af in as_features:\r\n qa_score_vector = [get_score_simple(qf.tokens, af.tokens),\r\n get_score_simple(qf.tokens_no_stops, af.tokens_no_stops),\r\n get_score_simple(qf.lemmas, af.lemmas),\r\n get_score_simple(qf.stems, af.stems),\r\n get_score_simple(qf.pos_tags, af.pos_tags),\r\n get_score_simple(qf.synset_lemmas, af.synset_lemmas),\r\n get_score_simple(qf.antonym_lemmas, af.antonym_lemmas),\r\n get_score_simple(qf.hyponym_lemmas, af.hyponym_lemmas),\r\n get_score_simple(qf.hypernym_lemmas, af.hypernym_lemmas),\r\n get_score_simple(qf.part_meronym_lemmas, af.part_meronym_lemmas),\r\n get_score_simple(qf.part_holonym_lemmas, af.part_holonym_lemmas),\r\n get_score_simple(qf.member_meronym_lemmas, af.member_meronym_lemmas),\r\n get_score_simple(qf.member_holonym_lemmas, af.member_holonym_lemmas),\r\n get_score_simple(qf.substance_meronym_lemmas, af.substance_meronym_lemmas),\r\n get_score_simple(qf.substance_holonym_lemmas, af.substance_holonym_lemmas),\r\n get_score_simple(qf.wn_definitions, af.wn_definitions),\r\n get_score_simple(qf.wn_examples, af.wn_examples),\r\n get_score_simple(qf.depgraph_deps, af.depgraph_deps),\r\n get_score_simple(qf.depgraph_rels, af.depgraph_rels),\r\n get_jcn_similarity(qf, af, 'v'),\r\n get_jcn_similarity(qf, af, 'n')]\r\n list_of_score_vectors.append(qa_score_vector)\r\n self.score_vectors.append(list_of_score_vectors)\r\n \r\n def get_scores(self, used_weights = None):\r\n scores = []\r\n for sv in self.score_vectors:\r\n specific_q_scores = dict()\r\n for jx, subv in enumerate(sv):\r\n effective_score = b.score_features(subv, used_weights)\r\n specific_q_scores[jx + 1] = effective_score\r\n scores.append(specific_q_scores)\r\n return scores\r\n \n def get_final_scores(self, used_weights = None):\r\n scores = []\r\n for sv in self.score_vectors:\r\n specific_q_scores = dict()\r\n for jx, subv in enumerate(sv):\r\n effective_score = b.score_features(subv, used_weights)\r\n specific_q_scores[self.faq_feat[jx].qapair] = effective_score\n scores.append(specific_q_scores)\r\n return scores\r\n \ndef lt_default(a, b): return a < b\r\n\r\ndef get_score_simple(arr1, arr2):\r\n if len(arr1) == 0 or len(arr2) == 0:\r\n return 0\r\n math_vecs = b.get_math_vectors(arr1, arr2, lt_default)\r\n return b.cosine_similarity(math_vecs[0], math_vecs[1])\r\n\r\nquestion = \"What do hummingbirds eat?\"#\"What is the lifecycle of a hummingbird like as it grows from birth as a child to death?\"#\"Describe the hummingbird's lifecycle.\"#\"What do hummingbirds eat?\"#\"At what speed do hummingbirds fly in the air?\"\n\ndef get_question_features(qfeat):\n for qf in qfeat:\n qf.synsets = lesk.get_synsets_from_features(qf)\n for qf in qfeat:\n qf.depgraphs = [dg for dg in dependency_parser.raw_parse(question)] #TODO: not the best way to do this. also iter to make an array\n\n for qf in qfeat:\n qf.add_wordnet_features()\n qf.add_depgraph_features()\n\n\ndef get_faq_features(faqs):\n as_features = b.get_answers_features(faqs)\n\n #this should set up the synsets\n b.load_all_synsets(as_features)\n\n #this should set up dependency graphs\n b.load_all_depgraphs(as_features)\n\n for f in as_features:\n f.add_wordnet_features()\n f.add_depgraph_features()\n\n return as_features\n\ndef train_model(faqs):\n feature_count = 21 #TODO: hardcoded\n learned_weights = [1] * feature_count\n qs_features = [b.TextFeatureExtraction(q, base_objects.QAPair(q,\"\")) for q in test_questions]\n get_question_features(qs_features)\n faq_features = get_faq_features(faqs)\n '''\n We train only once and save the weights\n Uncomment this if you want to change algo and train again.\n '''\n\n max_steps = 7500#25000\n state = State(qs_features, faq_features, learned_weights, best_answers)#10)#11) #5)\n anneal = Annealer(neighbor, energy, probability, temperature)\n for k in range(max_steps):\n t = anneal.temperature(k / max_steps)\n new_weights = anneal.neighbor(state)\n e_old = anneal.e(state, state.weights)\n e_new = anneal.e(state, new_weights)\n if anneal.p(e_old, e_new, t) >= random.random():\n state.weights = new_weights\n if k % 20 == 0:\n print(\"k: %5d, last energy: %f. weights = %s\" % (k, e_old, state.weights)) #TODO: might not be e_old\n\n learned_weights = state.weights\n print(state.weights)\r\n \r\n return state\n" }, { "alpha_fraction": 0.5978052020072937, "alphanum_fraction": 0.5978052020072937, "avg_line_length": 35.82828140258789, "blob_id": "05bc494af0999e3171e13bd70d38efe4e8014425", "content_id": "34b6ff4d4ee3f77042435477eaa6a3efb2035e78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3645, "license_type": "no_license", "max_line_length": 113, "num_lines": 99, "path": "/base_objects.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "class QAPair:\n def __init__(self, question, answer):\n self.question = question\n self.answer = answer\n def __str__(self):\n return \"Question: [%s] Answer: [%s]\" % (self.question, self.answer)\n \nclass QAFeatureExtraction( object ):\n '''qa_pairs is a list of QAPair objects'''\n def __init__( self, qa_pairs ):\n self.qa_pairs = qa_pairs\n self._tokens = None\n self._sentence_tokens = None\n self._lemmas = None\n self._stems = None\n self._pos_tags = None\n self._dependency_graphs = None\n self._synsets = None\n self._bow = None #This will hold a List of Counter object of each FAQ\n\n '''Private abstract function to tokenize the questions and answers on word boundaries'''\n def _tokenize( self ):\n raise NotImplementedError(\"Class %s doesn't implement _tokenize()\" % (self.__class__.__name__))\n\n '''Private abstract function to tokenize the questions and answers on sentence boundaries'''\n def _tokenize_sentences( self ):\n raise NotImplementedError(\"Class %s doesn't implement _tokenize_sentences()\" % (self.__class__.__name__))\n\n '''Private abstract function to lemmatize the questions and answers'''\n def _lemmatize( self ):\n raise NotImplementedError(\"Class %s doesn't implement _lemmatize()\" % (self.__class__.__name__))\n\n '''Private abstract function to stem the questions and answers'''\n def _stem( self ):\n raise NotImplementedError(\"Class %s doesn't implement _stem()\" % (self.__class__.__name__))\n \n '''Private abstract function to pos tag the questions and answers''' \n def _pos_tag( self ):\n raise NotImplementedError(\"Class %s doesn't implement _pos_tag()\" % (self.__class__.__name__))\n \n '''Private abstract function to graph the dependencies for the questions and answers''' \n def _graph_dependencies( self ):\n raise NotImplementedError(\"Class %s doesn't implement _graph_dependencies()\" % (self.__class__.__name__))\n \n '''Private abstract function to get wordnet synsets for the lemmas in the questions and answers''' \n def _get_synsets( self ):\n raise NotImplementedError(\"Class %s doesn't implement _get_synsets()\" % (self.__class__.__name__))\n \n '''Private abstract function to get bag of words the questions and answers''' \n def _get_bow( self ):\n raise NotImplementedError(\"Class %s doesn't implement _get_synsets()\" % (self.__class__.__name__))\n \n @property\n def tokens(self):\n if self._tokens is None:\n self._tokenize()\n return self._tokens\n \n @property\n def sentence_tokens(self):\n if self._sentence_tokens is None:\n self._tokenize_sentences()\n return self._sentence_tokens\n \n @property\n def lemmas(self):\n if self._lemmas is None:\n self._lemmatize()\n return self._lemmas\n \n @property\n def stems(self):\n if self._stems is None:\n self._stem()\n return self._stems\n \n @property\n def pos_tags(self):\n if self._pos_tags is None:\n self._pos_tag()\n return self._pos_tags\n \n @property\n def dependency_graphs(self):\n if self._dependency_graphs is None:\n self._graph_dependencies()\n return self._dependency_graphs\n \n @property\n def synsets(self):\n if self._synsets is None:\n self._get_synsets()\n return self._synsets\n\n @property\n def bow(self):\n if self._bow is None:\n self._get_bow()\n return self._bow" }, { "alpha_fraction": 0.5594202876091003, "alphanum_fraction": 0.5634336471557617, "avg_line_length": 31.985294342041016, "blob_id": "435f824a5fe7eed5b06e75d848996052d95ed65c", "content_id": "a70c0171d847909ceed344d2baab1eeea442d6e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4485, "license_type": "no_license", "max_line_length": 102, "num_lines": 136, "path": "/faq_nlp.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk_objects\nimport faq_config\nimport base_objects\nimport operator\n\nfrom nlp_algo import BOWAlgorithm\nfrom nlp_eval import MRREvaluation\nfrom nlp_config import *\nimport better_objects as b\nimport part4tester as model\n\nRESULTS_TOPN = 10\n\nmaster_training = False\n\ndo_training = master_training\nreport_training = master_training\ndo_main = not master_training\n\ndef print_results(user_q, resultDict, algoType):\n sortedResults = sorted(resultDict.items(), key=lambda x:x[1], reverse=True)\n count = 0\n print(\"***********************************************************************\")\n print(\"Given user question: \", user_q)\n print(\"***********************************************************************\")\n if (algoType == CONFIG_ALGO_BOW):\n print(\"Top 10 results from Bag of words algorithm are:\")\n else:\n print(\"Top 10 results NLP Pipeline algorithm are:\")\n for qa_pair,score in sortedResults:\n if count < RESULTS_TOPN:\n print(qa_pair.answer,score)\n count = count + 1\ndef print_eval_result(evalObj, algoType):\n\n if algoType == CONFIG_ALGO_BOW:\n algoName = \"BagOfWords\"\n else:\n algoName = \"NLP Pipeline\"\n print(\"***********************************************************************\")\n print(\"MRR EVALUATION for algorithm: \", algoName)\n print(\"***********************************************************************\")\n print (evalObj.get_rr())\n print ('------------------------------------------------------------')\n print (\"Total MRR of the QA Set: \",evalObj.get_mrr())\n\ndef run_mrr(faq_feat,algoType):\n evaluation = MRREvaluation(algoType, faq_feat)\n evaluation.computeResult()\n print_eval_result(evaluation, algoType)\n\ndef run_userq(user_qa, faq_feat, algoType):\n\n #FIXME: It has to be added to the empty list because nltk_object operates on the list\n #Alt: Alternate approach. Only call __tokenize(). But move stops to a class variable.\n user_q = user_qa[0].question\n if (algoType == CONFIG_ALGO_BOW):\n #BOW specific implementation.\n uq_bow_feat = nltk_objects.NLTKFeatureExtraction(user_qa)\n bow_algo = BOWAlgorithm(user_q, uq_bow_feat, faq_feat)\n resultDict = bow_algo._compute()\n else:\n #NLP Pipeline specific\n uq_nlp_feat = [b.TextFeatureExtraction(user_q, user_qa)]\n\n '''\n Testing code\n '''\n\n tstate = model.State(uq_nlp_feat, faq_feat, model.final_weights, None)\n nlp_rdict = tstate.get_final_scores(model.final_weights)\n resultDict = nlp_rdict[0]\n\n print_results(user_q, resultDict, algoType)\n\ndef space_out():\n print()\n print()\n print()\n \ndef main():\n\n print(\"****** Hummingbird FAQ engine powered by NLTK *********\")\n\n faqs = faq_config.getFAQs()\n\n '''\n TRAINING Code\n '''\n if do_training:\n state = model.train_model(faqs)\n model.final_weights = state.weights\n \n if report_training:\n all_scores = state.get_scores(state.weights)\n for ix, q_score_set in enumerate(all_scores):\n dict_scores = sorted([(ascore, qnum) for qnum, ascore in q_score_set.items()], reverse=True)\n print(state.best_choices[ix])\n for pair in dict_scores:\n print(\"%2d: %f\" % (pair[1], pair[0]))\n print()\n \n if do_main: \n faq_bow_feat = nltk_objects.NLTKFeatureExtraction(faqs)\n faq_nlp_feat = model.get_faq_features(faqs)\n\n run_mrr(faq_bow_feat, CONFIG_ALGO_BOW)\n \n space_out()\n \n run_mrr(faq_nlp_feat, CONFIG_ALGO_NLP)\n\n print(\"You can enter question multiple times. Enter quit or Ctrl+c to quit\")\n while 1: \n #'''\n\n space_out()\n user_q = input(\"Enter your question or 'quit' to Exit : \")\n #user_q = \"when is hummingbird season\"\n #user_q = \"Do hummingbirds migrate in winter?\"\n #user_q = \"How fast do hummingbirds' wings beat per second?\"\n\n if user_q == \"\" or user_q == None:\n raise ValueError(\"Invalid question given. Exiting\")\n exit(1)\n elif user_q == \"quit\":\n print(\"Thank you for trying out our FAQ Engine..Exiting\")\n exit(1) \n user_qa = [base_objects.QAPair(user_q, \"\")]\n space_out()\n run_userq(user_qa, faq_bow_feat, CONFIG_ALGO_BOW)\n space_out()\n run_userq(user_qa, faq_nlp_feat, CONFIG_ALGO_NLP)\n \nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5993843674659729, "alphanum_fraction": 0.6037818789482117, "avg_line_length": 37.54237365722656, "blob_id": "631cd95dd335bcc54f48b2549c4a1877c20abf15", "content_id": "7c14387209b260e7bf4b3b45bfddd75dc4f147a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2274, "license_type": "no_license", "max_line_length": 103, "num_lines": 59, "path": "/nlp_algo.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "from sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nfrom collections import defaultdict\n\nclass NLPAlgorithm:\n def __init__(self, uquestion, qfeat, docfeat):\n self.user_question = uquestion\n #This will contain the qapair:score\n self.scoreDict = defaultdict()\n self.qafeat = qfeat\n self.docs_feature = docfeat\n #Final results\n self.results = list()\n '''Private abstract function to implement the actual algorithm'''\n def _compute(self):\n raise NotImplementedError(\"Class %s doesn't implement _compute()\" % (self.__class__.__name__))\n\n '''Private abstract function to evaluate the output'''\n def _evaluate(self):\n raise NotImplementedError(\"Class %s doesn't implement _evaluate()\" % (self.__class__.__name__))\n\n '''Function to print the output'''\n def _print(self):\n print(\"Final Score is \" + self.score)\n\nclass BOWAlgorithm(NLPAlgorithm):\n def __init__(self, uquestion, qfeat, docfeat):\n super().__init__(uquestion, qfeat, docfeat)\n\n def __compute_cosine(self, query, doc):\n query = np.array(query).reshape(1,-1)\n doc = np.array(doc).reshape(1,-1)\n return cosine_similarity(query, doc)\n def _compute(self):\n\n '''\n TODO: QAFeatureExtraxction object has qa_pairs and _bow in the same order.\n This works as both are sequentially accessed. So _bow index can be used to\n access corresponding qa_pair\n '''\n query_vec = list(self.qafeat.bow[0].values())\n for index, faq_bow in enumerate(self.docs_feature.bow):\n faq_vec = []\n for word in self.qafeat.bow[0]:\n if word in faq_bow:\n faq_vec.append(faq_bow[word])\n else:\n faq_vec.append(0)\n if len(faq_vec) != 0:\n #cosine similarity returns in numpy array. Convert it into regular val\n simScore = self.__compute_cosine(query_vec, faq_vec).tolist()[0][0]\n self.scoreDict[self.docs_feature.qa_pairs[index]] = simScore\n else:\n print(\"No matching words found\")\n return self.scoreDict\n def _evaluate(self):\n #Use the Evaluation objects here\n \n pass\n" }, { "alpha_fraction": 0.6506370306015015, "alphanum_fraction": 0.6607544422149658, "avg_line_length": 38.449493408203125, "blob_id": "232c7519ad85d3901466f1f120449fb34c7c643e", "content_id": "2cc0e9ecf20660ac9456d4376b664062393b5bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8006, "license_type": "no_license", "max_line_length": 111, "num_lines": 198, "path": "/better_objects.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk\r\nimport base_objects\r\nimport nltk_objects\r\nfrom collections import Counter\r\nimport sklearn.metrics\r\nimport numpy as np\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.parse.dependencygraph import DependencyGraph\r\n\r\nsynset_folder = 'data/synsets/'\r\nsynset_filename_format = \"%d_%s.txt\" #%d is from 1 to 50, %s is question or answer\r\n\r\ndepgraph_folder = 'data/depgraphs/'\r\ndepgraph_filename_format = \"%d_%s.conll\" #%d is from 1 to 50, %s is question or answer\r\n\r\n#TODO: normalize words??\r\n#TODO: do lemmatize and stem need context?? tokens were already sorted\r\n\r\nstops = set(nltk.corpus.stopwords.words('english'))\r\nlemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\r\nstemmer = nltk.stem.PorterStemmer()\r\n\r\nflatten = lambda l: [item for sublist in l for item in sublist]\r\n\r\n#array of array of sorted answer tokens.\r\ndef do_tokenize(text):\r\n return sorted(nltk.word_tokenize(text))\r\n \r\n#array of array of sorted answer tokens, not including stop words.\r\ndef do_tokenize_no_stops(tokens):\r\n return [w for w in tokens if w not in stops]\r\n \r\n#array of array of sorted lemmas including stop words\r\ndef do_lemmatize(tokens):\r\n return [lemmatizer.lemmatize(w) for w in tokens]\r\n \r\n#array of array of sorted stems including stop words\r\ndef do_stem(tokens):\r\n return [stemmer.stem(w) for w in tokens]\r\n \r\n#array of array of tuples of the form ('word', 'pos')\r\ndef do_pos_tag(text):\r\n return sorted(nltk.pos_tag(nltk.word_tokenize(text)))\r\n \r\nclass TextFeatureExtraction(object):\r\n def __init__(self, text,qapair):\r\n self.tokens = do_tokenize(text)\r\n self.tokens_no_stops = do_tokenize_no_stops(self.tokens)\r\n self.lemmas = do_lemmatize(self.tokens)\r\n self.stems = do_stem(self.tokens)\r\n self.pos_tags = do_pos_tag(text)\r\n self.synsets = []\r\n self.depgraphs = []\r\n \r\n self.depgraph_deps = []\r\n self.depgraph_rels = []\r\n \r\n self.synset_lemmas = []\r\n self.antonym_lemmas = []\r\n self.hyponym_lemmas = []\r\n self.hypernym_lemmas = []\r\n self.part_meronym_lemmas = []\r\n self.part_holonym_lemmas = []\r\n self.member_meronym_lemmas = []\r\n self.member_holonym_lemmas = []\r\n self.substance_meronym_lemmas = []\r\n self.substance_holonym_lemmas = []\r\n \r\n self.wn_definitions = [] #just going to be a list of words\r\n self.wn_examples = [] #just going to be a list of words\r\n self.qapair = qapair\n def add_wordnet_features(self):\r\n #TODO: hack. do this better\r\n self.synsets = [s for s in self.synsets if s is not None]\r\n \r\n self.load_all_wordnet_lemmas()\r\n self.load_all_wordnet_definitions()\r\n self.load_all_wordnet_examples()\r\n \r\n def load_all_wordnet_definitions(self):\r\n self.wn_definitions = flatten([s.definition().split() for s in self.synsets])\r\n \r\n def load_all_wordnet_examples(self):\r\n for s in self.synsets:\r\n self.wn_definitions.extend(flatten([e.split() for e in s.examples()]))\r\n \r\n #grab all lemmas from wordnet possible\r\n def load_all_wordnet_lemmas(self):\r\n def internal_synset_lemmas(syns):\r\n return flatten([s.lemma_names() for s in syns])\r\n \r\n for s in self.synsets:\r\n self.synset_lemmas.extend(s.lemma_names())\r\n for lemma in s.lemmas():\r\n self.antonym_lemmas.extend([a.name() for a in lemma.antonyms()])\r\n self.hyponym_lemmas.extend(internal_synset_lemmas(s.hyponyms()))\r\n self.hypernym_lemmas.extend(internal_synset_lemmas(s.hypernyms()))\r\n self.part_meronym_lemmas.extend(internal_synset_lemmas(s.part_meronyms()))\r\n self.part_holonym_lemmas.extend(internal_synset_lemmas(s.part_holonyms()))\r\n self.member_meronym_lemmas.extend(internal_synset_lemmas(s.member_meronyms()))\r\n self.member_holonym_lemmas.extend(internal_synset_lemmas(s.member_holonyms()))\r\n self.substance_meronym_lemmas.extend(internal_synset_lemmas(s.substance_meronyms()))\r\n self.substance_holonym_lemmas.extend(internal_synset_lemmas(s.substance_holonyms()))\r\n \r\n def add_depgraph_features(self):\r\n #('firstword', 'secondword', 'dependency')\r\n for dg in self.depgraphs:\r\n for addr, item in dg.nodes.items():\r\n for dep, depaddr in item['deps'].items():\r\n if len(depaddr) > 0:\r\n item_lemma = item['lemma']\r\n if item_lemma is None:\r\n item_lemma = \"\"\r\n self.depgraph_deps.append((item_lemma, dep, dg.nodes[depaddr[0]]['lemma']))\r\n \r\n #('word', 'relation')\r\n for dg in self.depgraphs:\r\n for item in dg.nodes.values():\r\n if item['lemma'] != \"\" and item['lemma'] is not None and item['rel'] != \"\" and item['rel'] is not None:\r\n self.depgraph_rels.append((item['lemma'], item['rel']))\r\n \r\n#takes a text feature extraction and a filename and hooks you up with the synsets\r\ndef add_synsets(tfe, filename):\r\n lines = [line.rstrip('\\n') for line in open(filename)]\r\n synset_names = [line.split()[1] for line in lines] #grab the synset names\r\n tfe.synsets.extend([wn.synset(synset_name) for synset_name in synset_names])\r\n \r\n#TODO: consolidate load_all_synsets and load_all_depgraphs\r\ndef load_all_synsets(tfes):\r\n current = 1\r\n for tfe in tfes:\r\n filename_question = synset_folder + (synset_filename_format % (current, \"question\"))\r\n filename_answer = synset_folder + (synset_filename_format % (current, \"answer\"))\r\n add_synsets(tfe, filename_question)\r\n add_synsets(tfe, filename_answer)\r\n current += 1\r\n \r\ndef load_all_depgraphs(tfes):\r\n current = 1\r\n for tfe in tfes:\r\n filename_question = depgraph_folder + (depgraph_filename_format % (current, \"question\"))\r\n filename_answer = depgraph_folder + (depgraph_filename_format % (current, \"answer\"))\r\n graphs_question = DependencyGraph.load(filename_question)\r\n graphs_answer = DependencyGraph.load(filename_answer)\r\n tfe.depgraphs = graphs_question + graphs_answer\r\n \r\ndef get_answers_features(qapairs):\r\n ret = []\r\n for qa in qapairs:\r\n ret.append(TextFeatureExtraction(\"%s %s\" % (qa.question, qa.answer), qa))\r\n return ret\r\n \r\ndef get_math_vectors(items_one, items_two, lt):\r\n counters = (Counter(items_one), Counter(items_two))\r\n \r\n #sort because we're going to be walking the lists\r\n items = (sorted(counters[0].items()), sorted(counters[1].items()))\r\n \r\n vectors = ([], [])\r\n \r\n key_indices = (0, 0)\r\n \r\n while key_indices[0] < len(items[0]) and key_indices[1] < len(items[1]):\r\n itempair = (items[0][key_indices[0]], items[1][key_indices[1]])\r\n if lt(itempair[0][0], itempair[1][0]): #comparing the keys\r\n vectors[0].append(itempair[0][1]) #add the count to the math vector\r\n vectors[1].append(0)\r\n key_indices = (key_indices[0] + 1, key_indices[1])\r\n elif lt(itempair[1][0], itempair[0][0]):\r\n vectors[0].append(0)\r\n vectors[1].append(itempair[1][1]) #add the count to the math vector\r\n key_indices = (key_indices[0], key_indices[1] + 1)\r\n else:\r\n vectors[0].append(itempair[0][1]) #add the count to the math vector\r\n vectors[1].append(itempair[1][1]) #add the count to the math vector\r\n key_indices = (key_indices[0], key_indices[1] + 1)\r\n\r\n while key_indices[0] < len(items[0]):\r\n vectors[0].append(items[0][key_indices[0]][1]) #add the count to the math vector\r\n vectors[1].append(0)\r\n key_indices = (key_indices[0] + 1, key_indices[1])\r\n\r\n while key_indices[1] < len(items[1]):\r\n vectors[0].append(0)\r\n vectors[1].append(items[1][key_indices[1]][1]) #add the count to the math vector\r\n key_indices = (key_indices[0], key_indices[1] + 1)\r\n\r\n return vectors\r\n \r\ndef cosine_similarity(a, b):\r\n #the semantics of cosine_similarity are annoying.\r\n #it must make sense in general because it's really annoying.\r\n return sklearn.metrics.pairwise.cosine_similarity([a], [b])[0][0] #seriously, it's a number in a nested array\r\n \r\n#a and b are already scored vectors\r\ndef score_features(scores, weights):\r\n weighted_sims = [c * d for c, d in zip(scores, weights)]\r\n return np.linalg.norm(weighted_sims) / np.linalg.norm(weights)" }, { "alpha_fraction": 0.7443286180496216, "alphanum_fraction": 0.7559779286384583, "avg_line_length": 30.346153259277344, "blob_id": "56b15716d54bfac1615a6f5f46fcdd14daefbe1d", "content_id": "d7b225ccff1109af82caa125144186cd97ce6259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1631, "license_type": "no_license", "max_line_length": 353, "num_lines": 52, "path": "/INSTALL.md", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "\n## Steps to Install\n\n\nThe following dependencies need to be installed:\n```\n pip install nltk\n```\n* If you are on python2, you need to switch to python 3.\nby doing this on mac:\n```\nbrew install python3\npip3 install nltk\n```\nYou need to use python3 to execute any python script.\nThis is convenient if you do not want to change your default python environment to python 3.\n\nIf nltk gives an error about 'charmap' encoding or decoding not working and you're on Windows, run the following command in your terminal:\n```\n set PYTHONIOENCODING=\"UTF-8\"\n```\nJava is needed for the Stanford Parser, used for dependency parsing. At least version 1.8 is needed. Use a similar command to this on Windows to set JAVAHOME:\n```\n set JAVAHOME=C:\\Program Files\\Java\\jre-10\n```\n* If you are on Mac:\n\n1. Install Java using\n```\nbrew cask install java\nif homebrew is not configured\n/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\nbrew cask install java\n```\nThe Stanford files are not included in this release. They need to be manually downloaded from https://stanfordnlp.github.io/CoreNLP/#programming-languages-and-operating-systems and added to the root of the repository. Look in nlp_config.py for hints about where to place the downloaded artifacts (put it in the deps folder in the root of the directory).\n\n2. Install python libraries\n```\npip3 install scipy\npip3 install sklearn\npip3 install numpy\n```\n3. Install wordnet_ic\n```\npython3\n>>> import nltk\n>>> nltk.download('wordnet_ic')\n```\n\n## Steps to Run\n\nGo to the project folder through command line and run below command.\npython3 faq_nlp.py\n" }, { "alpha_fraction": 0.6139023900032043, "alphanum_fraction": 0.6182243824005127, "avg_line_length": 45.28333282470703, "blob_id": "6e3368a87a4dc1825d70a79d9b866c31c389ae79", "content_id": "e2548c3e65a80b7d0b3dca547e1ad7ae3aa8c212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5553, "license_type": "no_license", "max_line_length": 148, "num_lines": 120, "path": "/nltk_objects.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import base_objects\nimport nlp_config\nimport nltk\nfrom collections import Counter\nfrom nltk.parse.stanford import StanfordDependencyParser\nfrom nltk.corpus import wordnet as wn\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('stopwords')\nnltk.download('averaged_perceptron_tagger')\n\ndependency_parser = StanfordDependencyParser(path_to_jar=nlp_config.path_to_stanford_jar, path_to_models_jar=nlp_config.path_to_stanford_models_jar)\n\ndef penn2morphy(penntag, returnNone=False):\n morphy_tag = {'NN':wn.NOUN, 'JJ':wn.ADJ,\n 'VB':wn.VERB, 'RB':wn.ADV}\n try:\n return morphy_tag[penntag[:2]]\n except:\n return None if returnNone else ''\n \ndef get_synset_name(lemma, pos, num=1):\n return \"%s.%s.%02d\" % (lemma, pos, num)\n\nclass NLTKFeatureExtraction( base_objects.QAFeatureExtraction ):\n def __init__( self, qa_pairs ):\n super().__init__(qa_pairs)\n \n '''\n TODO: We may need to refactor rest of the functions as well if we have extract the features for given quesiton\n The other approach is to not make answer mandatory so that we can extract features only for questions!\n '''\n def __tokenize_text( self, text, stops ):\n return [w for w in nltk.word_tokenize(text) if w not in stops]\n \n def _tokenize( self ):\n self._tokens = []\n stops = set(nltk.corpus.stopwords.words(nlp_config.default_locale))\n for qa in self.qa_pairs:\n question_tokens = self.__tokenize_text(qa.question, stops)\n answer_tokens = self.__tokenize_text(qa.answer, stops)\n self._tokens.append((question_tokens, answer_tokens))\n\n def _get_bow( self ):\n self._bow = []\n for tokenpair in self.tokens:\n #FIXME: We need to create one bow for both q & a ??\n self._bow.append(Counter(tokenpair[0] + tokenpair[1]))\n\n def _tokenize_sentences( self ):\n self._sentence_tokens = []\n for qa in self.qa_pairs:\n question_sentences = nltk.sent_tokenize(qa.question)\n answer_sentences = nltk.sent_tokenize(qa.answer)\n self._sentence_tokens.append((question_sentences, answer_sentences))\n \n def _lemmatize( self ):\n self._lemmas = []\n lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n for tokenpair in self.tokens:\n question_lemmas = [lemmatizer.lemmatize(x) for x in tokenpair[0]]\n answer_lemmas = [lemmatizer.lemmatize(x) for x in tokenpair[1]]\n self._lemmas.append((question_lemmas, answer_lemmas))\n \n def _stem( self ):\n self._stems = []\n stemmer = nltk.stem.PorterStemmer()\n for tokenpair in self.tokens:\n question_stems = [stemmer.stem(x) for x in tokenpair[0]]\n answer_stems = [stemmer.stem(x) for x in tokenpair[1]]\n self._stems.append((question_stems, answer_stems))\n \n def _pos_tag( self ):\n self._pos_tags = []\n for tokenpair in self.sentence_tokens:\n question_word_tokens = [nltk.word_tokenize(sentence) for sentence in tokenpair[0]]\n answer_word_tokens = [nltk.word_tokenize(sentence) for sentence in tokenpair[1]]\n question_pos_tags = [nltk.pos_tag(sentence) for sentence in question_word_tokens]\n answer_pos_tags = [nltk.pos_tag(sentence) for sentence in answer_word_tokens]\n self._pos_tags.append((question_pos_tags, answer_pos_tags))\n \n def _graph_dependencies( self ):\n self._dependency_graphs = []\n for tokenpair in self.sentence_tokens:\n question_graph = [dependency_parser.raw_parse(sentence) for sentence in tokenpair[0]]\n answer_graph = [dependency_parser.raw_parse(sentence) for sentence in tokenpair[1]]\n self._dependency_graphs.append((question_graph, answer_graph))\n \n #the result of this function is 2-tuples of arrays of arrays of synsets. each inner array is one sentence.\n #the first outer array in a 2-tuple is for the questions. the second outer array is for the answers.\n #\n #to get the rest of the relations, you can use:\n # syn.hypernyms()\n # .hyponyms()\n # .part_meronyms()\n # .substance_meronyms()\n # .part_holonyms()\n # .substance_holonyms()\n def _get_synsets( self ):\n self._synsets = []\n lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n for qa_pos_tags in self.pos_tags:\n #qa_pos_tags[0] is an array of arrays of pos tags for the question sentences. ('constructor', 'NN')\n #qa_pos_tags[1] is an array of arrays of pos tags for the answers sentences.\n \n def get_synsets_for_pos_tags(a_pos_tags):\n ret_synsets = []\n for word_pos_pair in a_pos_tags:\n wordnet_pos = penn2morphy(word_pos_pair[1])\n if wordnet_pos:\n try:\n ret_synsets.append(wn.synset(get_synset_name(lemmatizer.lemmatize(word_pos_pair[0]), wordnet_pos)))\n except:\n ret_synsets.append(None) #not sure if we should append none or just pass\n return ret_synsets\n \n q_sentence_synsets = [get_synsets_for_pos_tags(sentence_tags) for sentence_tags in qa_pos_tags[0]] \n a_sentence_synsets = [get_synsets_for_pos_tags(sentence_tags) for sentence_tags in qa_pos_tags[1]]\n \n self._synsets.append((q_sentence_synsets, a_sentence_synsets))" }, { "alpha_fraction": 0.6176669001579285, "alphanum_fraction": 0.6217127442359924, "avg_line_length": 30.282608032226562, "blob_id": "2f89a6f2c01edb3a02ee6caf643f6e9f8eab143a", "content_id": "fdfe3fbf3ccd705b4d54e69ec7f2655447a0295f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 91, "num_lines": 46, "path": "/serialize_synsets.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk_objects as no\r\nimport faq_config\r\nimport lesk\r\n\r\n#shoutout to Avicii\r\n\r\n#TODO: not sure if i need to remove stopwords before lemmatizing (ok, tokenizing does that)\r\n# but i'm not sure if tokenizing should do that........\r\n\r\n#TODO: some words (like In) may need to be lowercased\r\n\r\n#TODO: maybe we should leave stopwords. like \"to\" should be there for verbs i feel...\r\n\r\n#TODO: words like \"United States\" are being tagged with synsets separately\r\n\r\n#TODO: need to add in parts of speech. look at question 50. \"build\" should not be a noun\r\n\r\nsub_folder = 'data/synsets'\r\n\r\nfaqs = faq_config.getFAQs()\r\nfeature_extractor = no.NLTKFeatureExtraction(faqs)\r\n\r\n#flatten = lambda l: [item for sublist in l for item in sublist]\r\n\r\ndef save_synsets(lemmas, filename):\r\n with open(filename, \"w+\") as outfile:\r\n first = True\r\n for lemma in lemmas:\r\n lemma_synset = lesk.get_lemma_synset(lemma, lemmas)\r\n \r\n if lemma_synset is not None:\r\n if not first:\r\n outfile.write('\\n')\r\n outfile.write(\"%s %s\" % (lemma, lemma_synset.name()))\r\n first = False\r\n\r\nfaq_number = 1\r\n \r\nfor faq_lemmas in feature_extractor.lemmas:\r\n q_lemmas = faq_lemmas[0]\r\n a_lemmas = faq_lemmas[1]\r\n \r\n save_synsets(q_lemmas, \"%s/%d_question.txt\" % (sub_folder, faq_number))\r\n save_synsets(a_lemmas, \"%s/%d_answer.txt\" % (sub_folder, faq_number))\r\n \r\n faq_number += 1" }, { "alpha_fraction": 0.5476394891738892, "alphanum_fraction": 0.5553647875785828, "avg_line_length": 32.29999923706055, "blob_id": "1b5e80b4feea564d8ef7c6ac9d1f3f436c652615", "content_id": "a8571aaddc59528473591119f76f849d7f2aec72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2330, "license_type": "no_license", "max_line_length": 87, "num_lines": 70, "path": "/nlp_eval.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk_objects\nimport faq_config\nimport base_objects\nimport operator\n\nfrom nlp_algo import BOWAlgorithm\nfrom collections import defaultdict\nimport better_objects as b\nimport part4tester as model\n\nTOPTEN_ANS = []\n\nclass Evaluation(object):\n\n def __init__(self, algoType,fext):\n self.scores = 0\n self.count = 0\n self.aType = algoType\n self.rdict = defaultdict()\n self.fext = fext\n\n def get_topNResults(self, resultDict, n):\n sortedResults = sorted(resultDict.items(), key=lambda x:x[1], reverse=True)\n count = 0\n for qa_pair,score in sortedResults:\n if count < n:\n TOPTEN_ANS.append(qa_pair.answer)\n count = count + 1\n\n def computeResult(self):\n #For evaluation of BOW\n eval_qns = faq_config.getEvaluationQns()\n for qns in eval_qns:\n TOPTEN_ANS.clear()\n user_qa = [base_objects.QAPair(qns.question, \"\")]\n\n if self.aType == 1:\n #BOW Type\n user_feat_extractor = nltk_objects.NLTKFeatureExtraction(user_qa)\n bow_algo = BOWAlgorithm(user_qa, user_feat_extractor, self.fext)\n resultDict = bow_algo._compute()\n else:\n uq_nlp_feat = [b.TextFeatureExtraction(qns.question, qns)]\n tstate = model.State(uq_nlp_feat, self.fext, model.final_weights, None)\n resultDict = tstate.get_final_scores(model.final_weights)[0]\n self.get_topNResults(resultDict, 10)\n index_ = TOPTEN_ANS.index(qns.answer) if qns.answer in TOPTEN_ANS else -1\n print (\"Question is: \",qns.question)\n print (\"Correct answer at index: \", index_)\n print (\"--------------------------------------------\")\n self.rdict.update({qns.question : index_+1})\n\nclass MRREvaluation(Evaluation):\n\n def __init__(self, algoType, fext):\n super().__init__(algoType, fext)\n\n def get_rr(self):\n i = 0\n for key, value in self.rdict.items():\n if value != 0:\n rr = 1.0 / float(value)\n print (key, rr)\n self.scores += rr\n else:\n print (key, 0)\n self.count += 1\n # mrr\n def get_mrr(self):\n return self.scores / self.count" }, { "alpha_fraction": 0.7529183030128479, "alphanum_fraction": 0.7597275972366333, "avg_line_length": 23.5, "blob_id": "b909cc9765ec302b79f9433899a4ae4cf770eb28", "content_id": "93a1ed8f295c29a7c4e7a99c153adeb17acc18f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/tester.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk_objects\nimport faq_config\n\nfaqs = faq_config.getFAQs()\nfeature_extractor = nltk_objects.NLTKFeatureExtraction(faqs)\n\nfor qatoken in feature_extractor.tokens:\n print(qatoken)\n\nfor qatoken in feature_extractor.sentence_tokens:\n print(qatoken)\n\nfor qabow in feature_extractor.bow:\n print(qabow)\n\nfor qalemma in feature_extractor.lemmas:\n print(qalemma)\n \nfor qastem in feature_extractor.stems:\n print(qastem)\n \nfor postag in feature_extractor.pos_tags:\n print(postag)\n \nfor graphs in feature_extractor.dependency_graphs:\n print(graphs)\n\nfor syns in feature_extractor.synsets:\n print(syns)\n\n'''\nTest cases:\n\nMandatory for Q2:\n1. Exact same faq question in the input: It should return the same answer\n2. Couple of words missing: It should return the same answer\n3. Words jumbled or transposed: It should return the same answer\n4. Synonyms or similar semantic meaing: Doesn't expect to return correct answer\n\nQ3: Should show imporovements over Q2\nTODO: Write the updated test cases here\n'''" }, { "alpha_fraction": 0.7971970438957214, "alphanum_fraction": 0.7988458275794983, "avg_line_length": 48.46938705444336, "blob_id": "9d5f7271792fa2fd21e2b9220d24d1243ba9dbdf", "content_id": "964d8ffca6e99d56496a031475ed1b43a3a7f954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2438, "license_type": "no_license", "max_line_length": 691, "num_lines": 49, "path": "/readme.md", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "\n\n## Semantic Matching FAQ Application\n\n### Problem Description\n\nGiven 50 frequently asked questions (FAQs) about hummingbirds, the task is to implement a naïve approach and a more sophisticated approach using Natural Language Processing techniques to retrieve the most related FAQs when presented with a user’s question about hummingbirds. For instance, when the user asks “How many years will a hummingbird live?”, the system must attempt to provide a ranked list of the most relevant FAQs. The naïve approach should treat the questions and answers as bags of words and match the word tokens against a user’s question. The other approach must use tokens, lemmas, stems, parts of speech, parse trees, and Wordnet to find matches in a more intelligent way.\n\n\n### Various modules in solution:\n\nCollection of features\nProcessing of features\nCalculation of Weights\nLearning weights\nFinal score calculation\nRanking of answers\n\nA details description of each module can be found in [final report]( project_report.pdf)\n\n### Programming Tools:\n* Python 3\n\n\tPython 3 is the primary programming language used for this project.\n* Java\n\n\tWhile no Java code was written by the team members, jar libraries from the Stanford Parser were used for dependency parsing.\n* NLTK\n\n\tNatural Language Toolkit. This is a library for Python that provides a host of natural language processing tools. These include access to wordnet, various corpora, and dependency parsers.\n* Stanford Parser\n\n\tStanford’s dependency parser was used to get dependency trees from questions and answers. We used a python library that wrapped the java libraries.\n* Wordnet\n\n\tThe wordnet corpus was used via NLTK. We collected synsets via the Lesk algorithm and used their similarities and definitions and examples. We also used the Brown corpus for information content that was exposed via Wordnet.\n* Brown Corpus\n\n\tWe used the Brown corpus that was provided with Wordnet for the information content when we took the JCN similarity of various synsets.\n* Numpy\n\n\tNumpy is a Python library that we used for linear algebra operations such as taking the norm of a vector.\n* Sklearn\n\n\tSklearn is a Python library that we used for cosine similarity calculations.\n* Brown Corpus\n\n\tWe used the Brown corpus that was provided with Wordnet for the information content when we took the JCN similarity of various synsets.\n\n### Output:\nPlease refer [final report]( project_report.pdf).\n" }, { "alpha_fraction": 0.7086419463157654, "alphanum_fraction": 0.7481481432914734, "avg_line_length": 32.83333206176758, "blob_id": "ecdb98376fd16ec6fcec8a1f8b23cfa18781a9e9", "content_id": "c573129a1245a2b7f18a0dd459a0d65fa563b6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 90, "num_lines": 12, "path": "/nlp_config.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import os\n\nCONFIG_ALGO_BOW = 1\nCONFIG_ALGO_NLP = 2\ndefault_locale = 'english'\n\nfaq_input_file = 'input/Hummingbirds.csv'\nevaluation_input_file = 'input/evaluationInput.csv'\n\npath_to_stanford_lib = r'deps/stanford-corenlp-full-2018-02-27'\npath_to_stanford_jar = path_to_stanford_lib + r'/stanford-corenlp-3.9.1.jar'\npath_to_stanford_models_jar = path_to_stanford_lib + r'/stanford-corenlp-3.9.1-models.jar'" }, { "alpha_fraction": 0.6111645698547363, "alphanum_fraction": 0.6159769296646118, "avg_line_length": 29.545454025268555, "blob_id": "66fd95cc4fe4bc9518cfb596219ef8c23749a904", "content_id": "3c26d6df3fd1041ea52153b9e8b3a920f7fffb3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 103, "num_lines": 33, "path": "/serialize_tests.py", "repo_name": "lopamd/FAQ-semantic-matching", "src_encoding": "UTF-8", "text": "import nltk_objects as no\r\nimport faq_config\r\n\r\nsub_folder = 'data/depgraphs'\r\n\r\nfaqs = faq_config.getFAQs()\r\nfeature_extractor = no.NLTKFeatureExtraction(faqs)\r\n\r\ndef save_dependency_graphs(graphs, filename):\r\n with open(filename, \"w+\") as outfile:\r\n first = True\r\n for graph in graphs:\r\n if not first:\r\n outfile.write('\\n')\r\n outfile.write(graph.to_conll(4))\r\n first = False\r\n \r\ndef extract_graphs(alist):\r\n ret = []\r\n for iter in alist:\r\n ret.extend([x for x in iter])\r\n return ret\r\n\r\nfaq_number = 1\r\n \r\nfor faq_graphs in feature_extractor.dependency_graphs:\r\n q_graphs = faq_graphs[0] #these are lists of list iterators\r\n a_graphs = faq_graphs[1] # because parsing returns a list iterator\r\n \r\n save_dependency_graphs(extract_graphs(q_graphs), \"%s/%d_question.conll\" % (sub_folder, faq_number))\r\n save_dependency_graphs(extract_graphs(a_graphs), \"%s/%d_answer.conll\" % (sub_folder, faq_number))\r\n \r\n faq_number += 1" } ]
15
Nikil9/Truth-Table-of-proportional-formula-generetor
https://github.com/Nikil9/Truth-Table-of-proportional-formula-generetor
47acf7b395ed0c766347f3605ef2004df7da0e45
151637f012f611e78c3b20efc8b37bb69ded91c3
7cf7c91877464422ae29003a2d19556f316321c2
refs/heads/master
2022-10-31T09:51:47.375040
2020-06-14T16:32:55
2020-06-14T16:32:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8389830589294434, "alphanum_fraction": 0.8389830589294434, "avg_line_length": 58, "blob_id": "f7e70e2d06e7620aea43138c3821d852e40acc91", "content_id": "16ad1c5532b7a3bb8dc5cac0b947b94b02528048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "no_license", "max_line_length": 69, "num_lines": 2, "path": "/README.md", "repo_name": "Nikil9/Truth-Table-of-proportional-formula-generetor", "src_encoding": "UTF-8", "text": "# Truth-Table-of-proportional-formula-generetor\nThis repo contains truth table generator of any preportional formula.\n" }, { "alpha_fraction": 0.5229013562202454, "alphanum_fraction": 0.5321244597434998, "avg_line_length": 36.3801155090332, "blob_id": "9ab9c5d9a58520862c16a43a1d947866f2cd031b", "content_id": "a150025ac706f05826121c42342c649bf513ddce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6397, "license_type": "no_license", "max_line_length": 143, "num_lines": 171, "path": "/TruthTableGenerator.py", "repo_name": "Nikil9/Truth-Table-of-proportional-formula-generetor", "src_encoding": "UTF-8", "text": "\n\n\ndef replace_operator(operand1,operand2,operator): # changing the operator\n if operator =='.':\n return ' ( '+operand1 +' and '+operand2+' ) '\n elif operator =='+':\n return ' ( '+operand1 +' or '+operand2+' ) '\n elif operator =='*':\n return ' ( '+' not '+operand1 +' or '+operand2+' ) '\n elif operator =='==':\n return ' ( ( '+' not '+operand1 +' or '+operand2+' ) '+' and '+' ( '+' not '+operand2 +' or '+operand1+' ) ) '\ndef convert_dnf_natural(operand_Stack): #to convert eval() known operators\n dnf_formula_chars=[]\n for ch in (operand_Stack[0].split()):\n # print(ch)\n if ch=='not':\n dnf_formula_chars.append(' ~ ')\n elif ch=='or':\n dnf_formula_chars.append(' + ')\n elif ch=='and':\n dnf_formula_chars.append(' . ')\n else:\n dnf_formula_chars.append(ch)\n str=\"\"\n for x in dnf_formula_chars:\n str=str+x\n return str\ndef convert_dnf_valued(operand_Stack,list_operands,bits): #converting to eval known form and replacing all operands to their bits values\n dnf_formula_chars=[]\n for ch in (operand_Stack[0].split()):\n # print(ch)\n if ch=='not':\n dnf_formula_chars.append(' not ')\n elif ch=='or':\n dnf_formula_chars.append(' or ')\n elif ch=='and':\n dnf_formula_chars.append(' and ')\n elif ch=='(':\n dnf_formula_chars.append(' ( ')\n elif ch==')':\n dnf_formula_chars.append(' ) ')\n else:\n ch_index=list_operands.index(ch)\n if(bits[ch_index]=='0'):\n dnf_formula_chars.append(\" False \")\n else:\n dnf_formula_chars.append(\" True \")\n\n str=\"\"\n for x in dnf_formula_chars:\n str=str+x\n return str\ndef find_product(bits,list_operands): #finding single product\n product=[]\n product.append( ' ( ')\n for i in range(len(bits)):\n if bits[i] == '0':\n product.append('~'+list_operands[i])\n else:\n product.append(list_operands[i])\n if(i<len(bits)-1):\n product.append(' . ')\n product.append(' ) ')\n return product\ndef convertToDnf(formula):\n formula_chars = [char for char in formula] #changing formula characters to a list\n l=len(formula_chars)\n if(formula_chars[0]!='('):\n formula_chars.append(')')\n formula_chars.insert(0,'(')\n\n k=0\n for i in range(l-1):\n if(formula_chars[i]=='~' and (formula_chars[i+1]!='(')): #handling negetion\n formula_chars[i+1]='~'+formula_chars[i+1]\n del formula_chars[i]\n k=k+1\n operator_list=['~','.','+','*','==']\n operator_Stack = [] #defining stacks for operator and operands\n operand_Stack = []\n flag = 0\n temp_list=[]\n flag=0\n for i in range(len(formula_chars)):\n if formula_chars[i]=='=':\n flag=flag+1\n if flag==2:\n temp_list.append(formula_chars[i]+formula_chars[i])\n flag=0\n continue\n temp_list.append(formula_chars[i])\n formula_chars=temp_list\n for i in range(len(formula_chars)):\n c = formula_chars[i]\n if (formula_chars[i] == '('): #pushing '(' simply\n operator_Stack.append('(')\n continue\n elif(formula_chars[i]==')'): #poping whwn ')' encounters\n if(operator_Stack[len(operator_Stack)-1]=='~'):\n operator_Stack.pop()\n operator_Stack.pop()\n val=operand_Stack.pop()\n operand_Stack.append('('+' not '+val+' ) ')\n continue\n operand2=operand_Stack.pop()\n operand1=operand_Stack.pop()\n operator=operator_Stack.pop()\n operator_Stack.pop()\n operand_Stack.append(replace_operator(operand1,operand2,operator)) #evaluating with operands with operator and push to the stack\n continue\n if c in operator_list:\n operator_Stack.append(c)\n else:\n operand_Stack.append(c)\n while(len(operator_Stack)!=0):\n opr=operator_Stack.pop()\n if(opr=='~'):\n operand_Stack.append('('+' not '+operand_Stack.pop())\n operator_Stack.pop()\n else:\n operand2=operand_Stack.pop()\n operand1=operand_Stack.pop()\n operator_Stack.pop()\n operand_Stack.append(replace_operator(operand1,operand2,opr))\n\n #############################################################################\n\n dnf_formula_chars=convert_dnf_natural(operand_Stack) #converting to reduced formula\n reduced_formula=dnf_formula_chars\n list_operands=[]\n for ch in formula_chars:\n if ch not in operator_list+['(',')']:\n list_operands.append(ch)\n list_operands=list(set(list_operands))\n list_operands.sort() #all operand list\n products=[]\n j=0\n for i in range (pow(2,len(list_operands))): #running all bits combinations to evaluate dnf using eval function\n bits=\"{0:b}\".format(i)\n bits = [char for char in bits]\n l = len(list_operands) - len(bits)\n temp = ['0'] * l\n bits = temp + bits\n dnf_formula_chars1 = convert_dnf_valued(operand_Stack,list_operands,bits)\n evaluation=str(eval(dnf_formula_chars1))\n if j==0:\n print(\"###########Truth Table##########\") #Generating Truth table with eval function\n print(list_operands,\" Formula value \")\n j=j+1\n print(bits,\" \",evaluation)\n if evaluation=='True':\n products =products+ find_product(bits,list_operands)\n products=products+[' + ']\n temp_dnf=\"\"\n for t in products:\n temp_dnf=temp_dnf+t\n temp_dnf=temp_dnf[0:-2]\n temp_dnf_2 = [char for char in temp_dnf]\n for i in range (len(temp_dnf_2)):\n if(temp_dnf_2[i]=='~' and temp_dnf_2[i+1]=='~'):\n temp_dnf_2[i]=\"\"\n temp_dnf_2[i+1]=\"\"\n final_dnf=\"\"\n for ch in temp_dnf_2:\n final_dnf=final_dnf+ch\n print(\"DNF formula: \", final_dnf)\n print(\"Reduced Formula: \", reduced_formula)\n\nprint(\"Please Use optimal parentheses otherwise error may occur\")\nprint (\"Enter your formula:\")\nformula=input()\n\nconvertToDnf(formula)\n\n\n" } ]
2
phihhim/acu-sdk
https://github.com/phihhim/acu-sdk
ada34e041f4b478a5227f84a7de7fb3c5c4fe890
1a92205388376eddb2fd997c2854ca22a3cbaabd
f710c51860835f5636a23ca258e71687f5d33bd7
refs/heads/main
2023-08-14T10:16:08.258224
2021-10-01T03:32:43
2021-10-01T03:32:43
412,303,106
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5585079789161682, "alphanum_fraction": 0.6504373550415039, "avg_line_length": 31.047618865966797, "blob_id": "74811b87086effda7d9d4761f6219b17658d430f", "content_id": "1c0e246c653b3ce03ffdc17738a22d973fc8de5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6361, "license_type": "no_license", "max_line_length": 131, "num_lines": 189, "path": "/README.md", "repo_name": "phihhim/acu-sdk", "src_encoding": "UTF-8", "text": "# acu-sdk\n\n\n## Core:\n### Class Acunetix:\n- ** Gọi 1 service:\n - Yêu cầu url tới server Acunetix + Token\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n # service = <class Acunetix>\n ```\n- **create_target(url, description=\"\")** \n - tạo 1 target \n - input: url, có thể có description hoặc không\n - output: json | `None`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n target = service.create_target('http://google.com','this is a description')\n ```\n\n- **create_targets(list_target)**\n - tạo nhiều target \n - input: danh sách target `list_target`\n - output: json | `[]`\n ```python\n list_target = [{\"address\": 'http://google.com',\"description\": \"ndqk\"}, {\"address\": 'http://google.com',\"description\": \"ndqk2\"}]\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n targets = service.create_targets(list_target)\n ```\n\n- **get_target_by_id(target_id)** \n - lấy 1 target theo id. \n - input: `target_id`\n - ouput: json | `None`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n target = service.get_target_by_id('123pl15ni3h45b7g8v9')\n\n ```\n\n- **get_targets_by_ids(list_id)**\n - lấy danh sách target theo danh sách id\n - input: danh sách id `list_id`\n - output: json | `[]`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n targets = service.get_targets_by_ids(['nj124jni5j4b3ugvyut346k', '456jk3bn7hjv1u236b5jk7i548u'])\n\n ```\n\n- **get_all_targets()**\n - lấy danh sách tất cả các target có trong cơ sở dữ liệu của acunetix.\n - input: \n - output: set | `[]`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n targets = service.get_all_targets()\n\n ```\n\n- **delete_targets(list_id)**\n - xóa các target nằm trong danh sách id.\n - input: danh sách id `list_id`\n - output: đối tượng `Response`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n del = service.delete_targets(['356kjnbgfc54h8tf42kl4556'])\n ```\n\n- **create_scan_from_target(target, profile_id=,schedule=)**\n - tạo scan từ target đã tạo trước. \n - input: đối tượng `Target`, `profile_id` và `schedule` (`profile_id`, `schedule` có thể có hoặc không)\n - output: scanid | `None`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n target = service.create_target('http://google.com')\n new_scan = service.create_scan_from_target(target)\n ```\n\n- **get_all_scans()**\n - lấy tất cả các scans có trong cơ sở dữ liệu Acunetix. \n - input: \n - output: danh sách đối tượng `Scan` | `[]`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n all_scan = service.get_all_scans()\n ```\n \n- **get_scan_by_id(id)**\n - lấy scan theo id cho trước. \n - input: id của scan\n - output: đối tượng `Scan` | `None`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('93428622uhjnv5h354')\n\n ```\n \n- **get_scans_by_ids(list_id)**\n - lấy danh sách scan theo danh sách id cho trước. \n - input: danh sách scan id\n - output: danh sách đối tượng `Scan` | `[]` \n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scans = service.get_scans_by_ids(['456mlnkj3bn83hjk557c', '0tg8r5k34liuy96787df5667ef'])\n ```\n\n- **pause_scan(scan)**\n - tạm dừng 1 scan. \n - input: là 1 đối tượng `Scan`. \n - input: đối tượng `Response`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n service.pause_scan(scan)\n ```\n \n- **resume_scan(scan)**\n - khởi động lại 1 scan đang tạm dừng. \n - input: là 1 đối tượng `Scan`. \n - input: đối tượng `Response`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n service.pause_scan(scan)\n service.resume_scan(scan)\n ```\n \n- **stop_scan(scan)**\n - kết thúc 1 scan. \n - input: là 1 đối tượng `Scan`. \n - input: đối tượng `Response`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n service.stop_scan(scan)\n ```\n \n- **delete_scan(scan)**\n - xóa 1 scan. \n - input: là 1 đối tượng `Scan`. \n - input: đối tượng `Response`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n service.delete_scan(scan)\n ```\n\n- **get_results_of_scan(scan)**\n - lấy danh sách các reulst của 1 scan\n - input: đối tượng `Scan` \n - output: danh sách các đối tượng `Result` |`[]`\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n results = service.get_results_of_scan(scan)\n ```\n\n- **get_vulns_of_result(result)**\n - lấy danh sách các lỗ hổng của 1 kết quả scan\n - input: đối tượng `Result`\n - output: danh sách các đối tượng `Vulnerability` | []\n ```python\n service = Acunetix('127.0.0.1','9238BN45BOKJ12B36H45755B4J13587DFBF')\n scan = service.get_scan_by_id('abc')\n results = service.get_results_of_scan(scan)\n result = results[0]\n vulns = service.get_vulns_of_result(result)\n ```\n- **get_result_statistic(result)**\n - lấy dữ liệu thống kê của result (dùng cho biểu diễn quá trình scan)\n - input: đối tượng `Result`\n - output: json ([result](https://github.com/ngdquockhanh/acunetix-sdk/blob/main/statistic.json))\n ```python\n results = Acunetix.get_results_of_scan(scan)\n result = results[0]\n statistic = Acunetix.get_result_statistic(result)\n ```\n \n- **get_root_location(result)**\n - lấy thư mục gốc của trang web được scan\n - input: đối tượng `Result`\n - output: đối tượng `Location` | `None`\n ```python\n scan = Acunetix.get_scan_by_id('abc')\n results = Acunetix.get_results_of_scan(scan)\n result = results[0]\n root = Acunetix.get_root_location(result)\n ```\n\n\n" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 15, "blob_id": "522bf1059d61b669e9abb93699b326266c599518", "content_id": "6f0c7a2cb6a6bee07894158870f96d38dc343f01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/setup.py", "repo_name": "phihhim/acu-sdk", "src_encoding": "UTF-8", "text": "import setuptools\r\n\r\nsetuptools.setup(\r\n name=\"acunetix\",\r\n version = \"0.0.1\",\r\n packages = [\"acunetix\"],\r\n)" }, { "alpha_fraction": 0.5575734972953796, "alphanum_fraction": 0.5603819489479065, "avg_line_length": 33.15131759643555, "blob_id": "e3c91e942ac366e1b040d4cb53f49495c5337795", "content_id": "6a8ede86e04fed8cb9dec7255a00a1fa9b221795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5341, "license_type": "no_license", "max_line_length": 124, "num_lines": 152, "path": "/acunetix/model.py", "repo_name": "phihhim/acu-sdk", "src_encoding": "UTF-8", "text": "from .api_call import APICall\r\n\r\nclass Target:\r\n def __init__(self, id, address, description=\"\", criticality=10, continuous_mode=False,\r\n manual_intervention=None, type=None,verification=None, status=None, scans=[]):\r\n self.id = id\r\n self.address = address\r\n self.description = description\r\n self.criticality = criticality\r\n self.continuous_mode = continuous_mode\r\n self.manual_intervention = manual_intervention\r\n self.type = type\r\n self.verification = verification\r\n self.scans = scans\r\n self.status = status\r\n\r\n def __repr__(self):\r\n rep = self.id\r\n return str(rep)\r\n\r\n\r\nclass Scan:\r\n def __init__(self, id, profile, incremental=False,\r\n max_scan_time=0, next_run=None, report=None, schedule=None, target=None, results=None):\r\n self.id = id\r\n self.profile = profile\r\n self.incremental = incremental\r\n self.max_scan_time = max_scan_time\r\n self.next_run = next_run\r\n self.report = report\r\n self.schedule = schedule\r\n self.target = target\r\n if results is None:\r\n results = []\r\n\r\n def __repr__(self):\r\n rep = self.id\r\n return str(rep)\r\n\r\nclass Result:\r\n def __init__(self, id, start_date, scan, end_date=None, status=\"\"):\r\n self.id = id\r\n self.start_date = start_date\r\n self.end_date = end_date\r\n self.status = status\r\n self.scan = scan\r\n\r\n def __repr__(self):\r\n rep = self.id\r\n return str(rep)\r\n\r\nclass VulnDesciption:\r\n def __init__(self, id, name, cvss2, cvss3, cvss_score, description, details,\r\n highlights, impact, long_description, recommendation, references, request, response_info, source, tags):\r\n self.id = id\r\n self.name = name\r\n self.cvss2 = cvss2\r\n self.cvss3 = cvss3\r\n self.cvss_score = cvss_score\r\n self.description = description\r\n self.details = details\r\n self.highlights = highlights\r\n self.impact = impact\r\n self.long_description = long_description\r\n self.recommendation = recommendation\r\n self.references = references\r\n self.request = request\r\n self.response_info = response_info\r\n self.source = source\r\n self.tags = tags\r\n\r\n def __repr__(self):\r\n rep = self.id\r\n return str(rep)\r\n\r\nclass Vulnerability:\r\n def __init__(self, id, name, affects_url, affects_detail, confidence, criticality, last_seen, severity, status, result):\r\n self.id = id\r\n self.name = name\r\n self.affects_url = affects_url\r\n self.affects_detail = affects_detail\r\n self.confidence = confidence\r\n self.criticality = criticality\r\n self.last_seen = last_seen\r\n self.severity = severity\r\n self.status = status\r\n self.result = result\r\n\r\n def __repr__(self):\r\n rep = self.id\r\n return str(rep)\r\n\r\n def detail(self, api, token):\r\n endpoint = '/scans/{}/results/{}/vulnerabilities/{}'.format(\r\n self.result.scan.id, self.result.id, self.id)\r\n new_call = APICall(api, token)\r\n response = new_call.get(endpoint)\r\n id = response['vt_id']\r\n name = response['vt_name']\r\n cvss2 = response['cvss2']\r\n cvss3 = response['cvss3']\r\n cvss_score = response['cvss_score']\r\n description = response['description']\r\n details = response['details']\r\n highlights = response['highlights']\r\n impact = response['impact']\r\n long_description = response['long_description']\r\n recommendation = response['recommendation']\r\n references = response['references']\r\n request = response['request']\r\n response_info = response['response_info']\r\n source = response['source']\r\n tags = response['tags']\r\n\r\n return VulnDesciption(id, name, cvss2, cvss3, cvss_score, description, details, highlights,\r\n impact, long_description, recommendation, references, request, response_info, source, tags)\r\n\r\n\r\nclass Location:\r\n def __init__(self, loc_id, loc_type, name, parent, path, source, tags, result):\r\n self.loc_id = loc_id\r\n self.loc_type = loc_type\r\n self.name = name\r\n self.parent = parent\r\n self.path = path\r\n self.source = source\r\n self.tags = tags\r\n self.result = result\r\n\r\n def childrens(self, api, token):\r\n try:\r\n new_call = APICall(api, token)\r\n response = new_call.get('/scans/{}/results/{}/crawldata/{}/children'.format(self.result.scan.id, self.result\r\n .id, self.loc_id))\r\n raw_locations = response['locations']\r\n\r\n locations = []\r\n\r\n for location in raw_locations:\r\n loc_id = location['loc_id']\r\n loc_type = location['loc_type']\r\n name = location['name']\r\n parent = None\r\n path = location['path']\r\n source = None\r\n tags = location['tags']\r\n\r\n locations.append(Location(loc_id, loc_type, name, parent, path, source, tags, self.result))\r\n\r\n return locations\r\n except:\r\n return []" }, { "alpha_fraction": 0.5622676610946655, "alphanum_fraction": 0.5636616945266724, "avg_line_length": 30.188405990600586, "blob_id": "8929987972b4781f252b86282e18f1d9eac47761", "content_id": "025e59c8b7fe680d22bafa111fc53292082c4508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2152, "license_type": "no_license", "max_line_length": 113, "num_lines": 69, "path": "/acunetix/api_call.py", "repo_name": "phihhim/acu-sdk", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nclass APICall:\n\n def __init__(self, api, token):\n self.apibase = api\n self.apikey = token\n self.headers = {\n \"X-Auth\": self.apikey,\n \"content-type\": \"application/json\",\n }\n\n def __send_request(self, method='get', endpoint='', data=None):\n request_call = getattr(requests, method)\n url = str(\"{}{}\".format(self.apibase, endpoint if endpoint else \"/\"))\n\n response = request_call(\n url,\n headers = self.headers,\n data = json.dumps(data),\n verify = False\n )\n return json.loads(response.text)\n\n def get_raw(self, endpoint=\"\"):\n url = str(\"{}{}\".format(self.apibase, endpoint if endpoint else \"/\"))\n try:\n response = requests.get(url, headers=self.headers, verify=False)\n return response\n except:\n return None\n\n def post_raw(self, endpoint, data=None):\n if data is None:\n data = {}\n url = str(\"{}{}\".format(self.apibase, endpoint if endpoint else \"/\"))\n try:\n response = requests.post(url, headers=self.headers, json=data, allow_redirects=False, verify=False)\n return response\n except:\n return None\n\n def delete_raw(self, endpoint, data=None):\n if data is None:\n data = {}\n url = str(\"{}{}\".format(self.apibase, endpoint if endpoint else \"/\"))\n try:\n response = requests.delete(url, headers=self.headers, json=data, allow_redirects=False, verify=False)\n return response\n except:\n return None\n\n def get(self, endpoint=\"\"):\n return self.__send_request(\"get\", endpoint)\n\n def post(self, endpoint, data=None):\n if data is None:\n data = {}\n request = self.__send_request(\"post\", endpoint, data)\n return request\n\n def delete(self, endpoint, data=None):\n if data is None:\n data = {}\n return self.__send_request(\"delete\", endpoint, data)\n" }, { "alpha_fraction": 0.5129658579826355, "alphanum_fraction": 0.5174803137779236, "avg_line_length": 32.65724563598633, "blob_id": "1578cb45926adc797b1d86460106c4223995a0d0", "content_id": "13465f594e49e281a44cc72c78d02c51262fe13f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9525, "license_type": "no_license", "max_line_length": 129, "num_lines": 283, "path": "/acunetix/acunetix.py", "repo_name": "phihhim/acu-sdk", "src_encoding": "UTF-8", "text": "from .api_call import APICall\nfrom .model import Target, Scan, Result, Vulnerability, Location\nimport re\nimport json\nfrom pprint import pprint\nclass Acunetix:\n def __init__(self, api: str, token: str):\n self.api = api\n self.token = token\n\n def __str__(self):\n return f'Acunetix: {self.api} token {self.token}'\n\n def __repr__(self):\n return f'Acunetix: {self.api} token {self.token}'\n\n def create_target(self, url, description=\"\"):\n if not re.fullmatch(\n r\"^(http://www\\.|https://www\\.|http://|https://)?[a-z0-9]+([\\-.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?$\",\n url, re.IGNORECASE):\n return None\n\n data = {\n \"targets\": [\n {\n \"address\": url,\n \"description\": description\n }\n ],\n \"groups\": []\n }\n new_call = APICall(self.api, self.token)\n respose = new_call.post('/targets/add', data)\n target = respose['targets'][0]\n id = target['target_id']\n address = target['address']\n criticality = target['criticality']\n description = target['description']\n type = target['type']\n\n return Target(id, address, description, criticality, type=type)\n\n\n def create_targets(self, list_target):\n r = re.compile(\n r\"^(http://www\\.|https://www\\.|http://|https://)?[a-z0-9]+([\\-.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?$\",\n re.IGNORECASE)\n tmp_targets = []\n\n for i in list_target:\n url = str(i['address'])\n if r.match(url):\n tmp_targets.append(i)\n\n data = {\n \"targets\": tmp_targets,\n \"groups\": []\n }\n try:\n new_call = APICall(self.api, self.token)\n respose = new_call.post('/targets/add', data)\n raw_targets = respose['targets']\n\n targets = []\n\n for target in raw_targets:\n id = target['target_id']\n address = target['address']\n criticality = target['criticality']\n description = target['description']\n type = target['type']\n\n targets.append(\n Target(id, address, description, criticality, type=type))\n\n return targets\n\n except:\n return []\n\n def get_all_targets(self):\n try:\n new_call = APICall(self.api, self.token)\n response = new_call.get('/targets')\n raw_targets = response['targets']\n targets = []\n\n for target in raw_targets:\n id = target['target_id']\n address = target['address']\n description = target['description']\n criticality = target['criticality']\n continuous_mode = target['continuous_mode']\n manual_intervention = target['manual_intervention']\n type = target['type']\n verification = target['verification']\n status = target['last_scan_session_status']\n\n new_target = Target(id, address, description, criticality, continuous_mode,\n manual_intervention, type, verification, status)\n\n targets.append(new_target)\n\n return targets\n\n except:\n return None\n\n def get_target_by_id(self, id):\n try:\n id = id.strip()\n id = id.lower()\n if len(id) > 255:\n return None\n new_call = APICall(self.api, self.token)\n target = new_call.get('/targets/{}'.format(id))\n id = target['target_id']\n address = target['address']\n description = target['description']\n criticality = target['criticality']\n continuous_mode = target['continuous_mode']\n manual_intervention = target['manual_intervention']\n type = target['type']\n verification = target['verification']\n\n new_target = Target(id, address, description, criticality,\n continuous_mode, manual_intervention, type, verification)\n return new_target\n\n except:\n return None\n\n\n def get_targets_by_ids(self, list_id):\n all_target = self.get_all_targets()\n for i in range(len(list_id)):\n list_id[i] = list_id[i].strip()\n list_id[i] = list_id[i].lower()\n targets = [x for x in all_target if x.id in list_id]\n return targets\n\n def delete_targets(self, ids):\n ids = [x for x in ids if len(x) <= 255]\n data = {\n \"target_id_list\": ids\n }\n new_call = APICall(self.api, self.token)\n return new_call.post_raw('/targets/delete', data)\n # scan\n\n def create_scan(self, target, profile_id,\n schedule=None):\n if schedule is None:\n schedule = {\"disable\": False, \"start_date\": None, \"time_sensitive\": False}\n if len(profile_id) > 255:\n return None\n data = {\n \"profile_id\": profile_id,\n \"incremental\": False,\n \"schedule\": schedule,\n \"target_id\": target.id\n }\n try:\n new_call = APICall(self.api, self.token)\n res = new_call.post_raw('/scans', data)\n #response = json.loads(res.text)\n scan_id = res.headers['Location'].split('/')[-1]\n '''\n scan_id = res.headers['Location'].split('/')[-1]\n incremental = response['incremental']\n max_scan_time = response['max_scan_time']\n\n new_scan = Scan(id=scan_id, profile=profile_id, incremental=incremental,\n max_scan_time=max_scan_time, schedule=schedule, target=target)\n '''\n return scan_id\n except:\n return None\n\n def get_all_scans(self):\n try:\n new_call = APICall(self.api, self.token)\n response = new_call.get('/scans')\n raw_scans = response['scans']\n return raw_scans\n\n except:\n return []\n\n def get_scan_by_id(self, scan_id):\n try:\n scan_id = scan_id.strip()\n scan_id = scan_id.lower()\n if len(scan_id) > 255:\n return None\n new_call = APICall(self.api, self.token)\n scan = new_call.get('/scans/{}'.format(scan_id))\n id = scan['scan_id']\n profile = scan['profile_id']\n incremental = scan['incremental']\n max_scan_time = scan['max_scan_time']\n next_run = scan['next_run']\n report = scan['report_template_id']\n schedule = scan['schedule']\n\n new_scan = Scan(id, profile, incremental=incremental,\n max_scan_time=max_scan_time, next_run=next_run, report=report, schedule=schedule)\n\n return new_scan\n except:\n return None\n\n def get_scans_by_ids(self, list_id):\n all_scans = self.get_all_scans()\n for i in range(len(list_id)):\n list_id[i] = list_id[i].strip()\n list_id[i] = list_id[i].lower()\n scans = [x for x in all_scans if x.id in list_id]\n return scans\n\n def pause_scan(self, scan):\n new_call = APICall(self.api, self.token)\n return new_call.post_raw('/scans/{}/pause'.format(scan.id))\n\n def resume_scan(self, scan):\n new_call = APICall(self.api, self.token)\n return new_call.post_raw('/scans/{}/resume'.format(scan.id))\n\n def stop_scan(self, scan):\n new_call = APICall(self.api, self.token)\n return new_call.post_raw('/scans/{}/abort'.format(scan.id))\n\n def delete_scan(self, scan):\n id = scan.id\n if len(id) > 255:\n return None\n new_call = APICall(self.api, self.token)\n return new_call.delete_raw('/scans/{}'.format(id))\n\n # result\n def get_results_of_scan(self, scan_id):\n new_call = APICall(self.api, self.token)\n response = new_call.get('/scans/{}/results'.format(scan_id))\n\n return response['results'][0]['result_id']\n\n\n # vulnerability\n def get_vulns_of_result(self, result_id, scan_id):\n try:\n new_call = APICall(self.api, self.token)\n response = new_call.get('/scans/{}/results/{}/vulnerabilities'.format(result_id, scan_id))\n raw_vulns = response['vulnerabilities']\n\n \n\n return response\n\n except:\n return []\n\n def get_result_statistic(self, scan_id, result_id):\n new_call = APICall(self.api, self.token)\n return new_call.get('/scans/{}/results/{}/statistics'.format(scan_id, result_id))\n\n # location\n def get_root_location(self, result):\n try:\n new_call = APICall(self.api, self.token)\n response = new_call.get('/scans/{}/results/{}/crawldata/0/children'.format(result.scan.id, result.id))\n raw_location = response['locations'][0]\n loc_id = raw_location['loc_id']\n loc_type = raw_location['loc_type']\n name = raw_location['name']\n parent = None\n path = raw_location['path']\n source = None\n tags = raw_location['tags']\n\n return Location(loc_id, loc_type, name, parent, path, source, tags, result)\n\n except:\n return None\n" } ]
5
formyfamily/Guo-s-Motions-and-Emotions
https://github.com/formyfamily/Guo-s-Motions-and-Emotions
78d063840e1e49e36860a3c95a82727852183e68
51612c8d1e5e16ef8feac005797ab32267ad5f41
dd7668a391e05104fd492c30202a4b6e6f8e8d7e
refs/heads/master
2021-09-03T06:32:51.651317
2017-12-13T10:26:02
2017-12-13T10:26:02
114,106,490
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12.333333015441895, "blob_id": "5fa8f8fef6847f3fb1e9f70f1ba9a2e8a51338b4", "content_id": "cb0bc18e2a1281b797fb4d7e4386555f76353ecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/README.md", "repo_name": "formyfamily/Guo-s-Motions-and-Emotions", "src_encoding": "UTF-8", "text": "## Guo‘s motions and emotions\n\n​\tMo Guo" }, { "alpha_fraction": 0.7095709443092346, "alphanum_fraction": 0.7282728552818298, "avg_line_length": 29.266666412353516, "blob_id": "91be6843576563393d048225b5aa618c9e80e03f", "content_id": "3f696e78835b61b2dd610a8d8c3a2996c37fab18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 103, "num_lines": 30, "path": "/utils/ScreenPrinter.py", "repo_name": "formyfamily/Guo-s-Motions-and-Emotions", "src_encoding": "UTF-8", "text": "\n\nimport cv2\nimport os\n\n# moviePath: 保存电影的路径\n# dataPath: 存储截图的文件夹路径\n# frameMode: 是否通过统计帧数来截图\n# perSec: 多少秒一张截图(frameMode=1时无效)\n# perSec: 多少帧一张截图(frameMode=0时无效)\n\ndef getScreenShotData(moviePath, dataPath=\"data/screenshots\", frameMode=True, perSec=1.0, perFrame=23):\n\tif(not os.path.exists(dataPath)):\n\t\tos.makedirs(dataPath) \n\tif(not os.path.isdir(dataPath)):\n\t\treturn -1 \n\tvideo = cv2.VideoCapture(moviePath) #读入视频文件\n\tif (not video.isOpened()): #判断是否正常打开\n\t\tvideo.release()\t\t\n\t\treturn -1 \n\tcurrentFrame = 1\n\tfps = video.get(cv2.CAP_PROP_FPS)\n\ttimeF = perFrame if frameMode else int(perSec*fps) ; #视频帧计数间隔频率\n\trval = True \n\twhile rval: #循环读取视频帧\n\t\trval, frame = video.read()\n\t\tif(currentFrame%timeF == 0): #每隔timeF帧进行存储操作\n\t\t\tscreenShotPath = os.path.join(dataPath, \"screenshot%d.jpg\"%currentFrame)\n\t\t\tcv2.imwrite(screenShotPath, frame) #存储为图像\n\t\tcurrentFrame = currentFrame + 1\n\t\tcv2.waitKey(1)\n\tvideo.release()" }, { "alpha_fraction": 0.8947368264198303, "alphanum_fraction": 0.8947368264198303, "avg_line_length": 8.75, "blob_id": "40ee1cf6db22af62aef3816d6b6efd9d15d9e4fc", "content_id": "3ab8a4b026f89d77a9b95a7b373991a1f2361197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 38, "license_type": "no_license", "max_line_length": 14, "num_lines": 4, "path": "/requirements.txt", "repo_name": "formyfamily/Guo-s-Motions-and-Emotions", "src_encoding": "UTF-8", "text": "numpy\ntensorflow\ntensorflow-gpu\nopencv" }, { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7190476059913635, "avg_line_length": 20.049999237060547, "blob_id": "b175afd9e28f117cffbe8576d1a90c32e735eb8a", "content_id": "6c455bd6346ea8219e35572fcdb405ff49eb4624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 80, "num_lines": 20, "path": "/main.py", "repo_name": "formyfamily/Guo-s-Motions-and-Emotions", "src_encoding": "UTF-8", "text": "import os\nimport pdb\nimport array\nimport json\nimport random\nimport zipfile\nfrom argparse import ArgumentParser\nfrom utils import ScreenPrinter\n\ndef get_args():\n parser = ArgumentParser(description=\"Guo's Emotions!\")\n args = parser.parse_args()\n return args\n\ndef main():\n args = get_args()\n ScreenPrinter.getScreenShotData(\"data/continuous-movies/After_The_Rain.mp4\")\n\nif __name__ == \"__main__\":\n main()" } ]
4
1mSAD/Discord-Mediabot
https://github.com/1mSAD/Discord-Mediabot
181118cf534da112eb6c9ee1132ec2e2d2876683
a8769c6092136caf094aec97c1bd906d9475a65a
2a14ab2765397c9600f1b176c053f9c869b5adf7
refs/heads/main
2023-05-31T04:59:28.275406
2021-06-11T01:22:38
2021-06-11T01:22:38
370,991,825
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6450344324111938, "alphanum_fraction": 0.6627335548400879, "avg_line_length": 30.8125, "blob_id": "26aac75667057caa5c51c681629ad3568685750b", "content_id": "42296c047e8205d3f3fbdb28865b74832d34dc7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 112, "num_lines": 32, "path": "/main.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord import file\nfrom discord_slash import SlashCommand\n\n#Setting Values\nfrom config import *\nclient = commands.Bot(command_prefix=config[\"Prefix\"])\nslash = SlashCommand(client, sync_commands=True, sync_on_cog_reload=True)\nTOKEN = config[\"TOKEN\"]\n\nevents_extensions = ['cogs.events.tiktok',\n 'cogs.events.instagram',\n 'cogs.commands.sendtodm',\n 'cogs.commands.slash-sendtodm',\n 'cogs.commands.help']\n\[email protected]\nasync def on_ready():\n await client.change_presence(status=discord.Status.online, activity=discord.Game(f'{config[\"Prefix\"]}help'))\n print(\"\\u001b[32mMediabot is Ready to go. \\u001b[0m\")\n\nif __name__ == \"__main__\":\n # Loads Extentions (Cogs)\n for extension in events_extensions:\n print(f\"Loaded \\u001b[32m{extension}\\u001b[0m\")\n client.load_extension(extension)\n\n from api.flaskapi import run_api\n run_api()\n\n client.run(TOKEN)" }, { "alpha_fraction": 0.628000020980835, "alphanum_fraction": 0.628000020980835, "avg_line_length": 26.799999237060547, "blob_id": "d7deb3a3d37d702c04c5b42ee72229445a236294", "content_id": "8193dccf991949b9b12d182628ae3e6b56ee791b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 106, "num_lines": 45, "path": "/cogs/commands/help.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord_slash import cog_ext, SlashContext\n\nimport json\n\n#Setting Values\nimport sys\nsys.path.append(\"./\")\nfrom config import *\nprefix = config[\"Prefix\"]\n\ndef gembed(ctx):\n embed=discord.Embed(title='help', description='available commands.', color = discord.Colour.random(),)\n embed.add_field(\n name=f\"{prefix}send - {prefix}s - /send\", \n value=f\"```{prefix}s <mention||userid> <link> <number-optional>```\", \n inline=False\n )\n embed.add_field(\n name=f\"{prefix}clear - ``only works in dm``\"\n , value=f\"```{prefix}clear <amount>```\", \n inline=True\n )\n embed.set_footer(\n text=f\"Requested by {ctx.author}\", \n icon_url=ctx.author.avatar_url\n )\n return embed\n\nclass HelpCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.bot.remove_command('help')\n\n @commands.command(name='help')\n async def Help_cmd(self, ctx):\n await ctx.send(embed=gembed(ctx))\n\n @cog_ext.cog_slash(name=\"help\",description=\"view available commands.\")\n async def slashHelp_cmd(self, ctx:SlashContext):\n await ctx.send(embed=gembed(ctx))\n\ndef setup(bot):\n bot.add_cog(HelpCog(bot))" }, { "alpha_fraction": 0.5676735639572144, "alphanum_fraction": 0.5849549770355225, "avg_line_length": 35.24736785888672, "blob_id": "5d531f0769e236f654e10317521459361a427e44", "content_id": "15282d06fc70b4ee35827d6204676a8826fd5028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6886, "license_type": "no_license", "max_line_length": 174, "num_lines": 190, "path": "/cogs/functions/tik_fn.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\nimport math\nimport re\nfrom decimal import Decimal\n\nimport os\nfrom urllib.parse import parse_qsl, urlparse\nimport random\n\nimport time\nfrom pystreamable import StreamableApi\n\nimport sys\nsys.path.append(\"./\")\nfrom config import *\nstream_email = config[\"stream_email\"]\nstream_pass = config[\"stream_pass\"]\n\nimport discord\n\n#For Below Function\ndef remove_exponent(d):\n \"\"\"Remove exponent.\"\"\"\n return d.quantize(Decimal(1)) if d == d.to_integral() else d.normalize()\n\n#To Make Numbers Readable , 1k 1m...\ndef millify(n, precision=0, drop_nulls=True, prefixes=[]):\n \"\"\"Humanize number.\"\"\"\n millnames = ['', 'k', 'M', 'B', 'T', 'P', 'E', 'Z', 'Y']\n if prefixes:\n millnames = ['']\n millnames.extend(prefixes)\n n = float(n)\n millidx = max(0, min(len(millnames) - 1,\n int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))\n result = '{:.{precision}f}'.format(n / 10**(3 * millidx), precision=precision)\n if drop_nulls:\n result = remove_exponent(Decimal(result))\n return '{0}{dx}'.format(result, dx=millnames[millidx])\n\nclass Tiktok_fn:\n def __init__(self, url):\n self.url = url\n # Convert https://vm.tiktok.com to https://www.tiktok.com\n header = {\n 'Host': 't.tiktok.com',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0',\n 'Referer': 'https://www.tiktok.com/',\n}\n tikfull_url = url\n if url.startswith('https://vm.tiktok.com'):\n res = requests.get(url)\n url = (res.url)\n if url.startswith('https://m.tiktok.com'):\n req = requests.get(f\"https://www.tiktok.com/oembed?url={url}\")\n extr_url = json.loads(req.text)\n extr_url = extr_url[\"html\"]\n m = re.search(\" +cite=\\\"(.*?)\\\"\", extr_url)\n url = m.group(1)\n tikfull_url = url\n self.url = tikfull_url\n video_id = (('{}'.format(*tikfull_url.split('/')[-1:]))).split(\"?\")[0]\n video_username = (('{}'.format(*tikfull_url.split('/')[-3:]))).split(\"?\")[0]\n # Send to api\n api_url = f\"http://localhost:8080/api/tiktok/{video_username}/{video_id}\"\n # Read json array via api\n response = requests.request(\"get\", api_url)\n self.datameta = json.loads(response.text)\n self.video_url = self.datameta[\"video\"][\"playAddr\"]\n self.header = header\n self.fll_url = tikfull_url\n\n def likes_number(self):\n likes_number = (millify(self.datameta[\"stats\"][\"diggCount\"], precision=1))\n return likes_number\n\n def comments_number(self):\n comments_number = (millify(self.datameta[\"stats\"][\"commentCount\"], precision=1))\n return comments_number\n\n def share_number(self):\n share_number = (millify(self.datameta[\"stats\"][\"shareCount\"], precision=1))\n return share_number\n\n def play_number(self):\n play_number = (millify(self.datameta[\"stats\"][\"playCount\"], precision=1))\n return play_number\n\n def user_name(self):\n user_name = self.datameta[\"author\"][\"uniqueId\"]\n return user_name\n \n def author_avatar(self):\n author_avatar = self.datameta[\"author\"][\"avatarLarger\"]\n return author_avatar\n \n def sound_des(self):\n sound_des = self.datameta[\"music\"][\"title\"]\n return sound_des\n\n def caption(self):\n caption = self.datameta[\"desc\"]\n return caption\n\n def video_id(self):\n video_id = self.datameta[\"video\"][\"id\"]\n return video_id\n \n def video_url(self):\n video_url = self.datameta[\"video\"][\"playAddr\"]\n return video_url\n \n def default_url(self):\n return self.fll_url\n\n # Upload To streamable\n def upload_to_streamable(self, path, title):\n streamable_username = stream_email\n streamable_password = stream_pass\n api = StreamableApi(streamable_username, streamable_password)\n deets = api.upload_video(path, title)\n count = 0\n while True:\n count+=1\n test = api.get_info(deets['shortcode'])\n if test['percent'] == 100:\n break\n elif count == 6:\n exit()\n else:\n time.sleep(10)\n global streamable_link\n streamable_link = (\"https://streamable.com/\" +deets['shortcode'])\n return streamable_link\n\n def embedgen(self, url, author, author_avatar):\n e=discord.Embed(title=\"Tiktok\", description=self.caption())\n e.set_author(name=(f'@{self.user_name()}'), url=url , icon_url=self.author_avatar())\n e.set_thumbnail(url=\"https://i.imgur.com/rMollzc.png\")\n e.add_field(name=\"Likes\", value=self.likes_number(), inline=True)\n e.add_field(name=\"Comments\", value=self.comments_number(), inline=True)\n e.add_field(name=\"Sound\", value=self.sound_des(), inline=False)\n e.add_field(name=\"Shares\", value=self.share_number(), inline=True)\n e.add_field(name=\"Views\", value=self.play_number(), inline=True)\n e.set_footer(text=(f'Shared by {author}'), icon_url=author_avatar)\n return e\n\n\nclass TikTokDownloader:\n HEADERS = {\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'DNT': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',\n 'Accept': '*/*',\n 'Sec-Fetch-Site': 'same-site',\n 'Sec-Fetch-Mode': 'no-cors',\n 'Sec-Fetch-Dest': 'video',\n 'Referer': 'https://www.tiktok.com/',\n 'Accept-Language': 'en-US,en;q=0.9,bs;q=0.8,sr;q=0.7,hr;q=0.6',\n 'sec-gpc': '1',\n 'Range': 'bytes=0-',\n }\n\n def __init__(self, url: str):\n web_id = str(random.randint(10000, 999999999))\n self.__url = url\n self.__cookies = {\n 'tt_webid': web_id,\n 'tt_webid_v2': web_id\n }\n\n def __get_video_url(self) -> str:\n response = requests.get(self.__url, cookies=self.__cookies, headers=TikTokDownloader.HEADERS)\n return response.text.split('\"playAddr\":\"')[1].split('\"')[0].replace(r'\\u0026', '&')\n\n def download(self, file_path: str):\n video_url = self.__get_video_url()\n url = urlparse(video_url)\n params = tuple(parse_qsl(url.query))\n request = requests.Request(method='GET',url='{}://{}{}'.format(url.scheme,url.netloc, url.path),cookies=self.__cookies,headers=TikTokDownloader.HEADERS,params=params)\n prepared_request = request.prepare()\n session = requests.Session()\n response = session.send(request=prepared_request)\n response.raise_for_status()\n with open(os.path.abspath(file_path), 'wb') as output_file:\n output_file.write(response.content)" }, { "alpha_fraction": 0.5704103112220764, "alphanum_fraction": 0.5759735703468323, "avg_line_length": 37.604026794433594, "blob_id": "794222ceabb90846d4c3e36d7ec6c671cfb87be9", "content_id": "db697adf3515713d0a5f6f5e0c89a9111693c143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5755, "license_type": "no_license", "max_line_length": 213, "num_lines": 149, "path": "/cogs/functions/insta_fn.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport time\nfrom pystreamable import StreamableApi\n\n#Setting Values\nimport sys\nsys.path.append(\"./\")\nfrom config import *\nstream_email = config[\"stream_email\"]\nstream_pass = config[\"stream_pass\"]\n\nimport discord\n\nclass Insta_fn:\n def __init__(self, url, multipost_num=0):\n shortcode = url.split('/')[-2].replace('/', '')\n api_url = f\"http://localhost:8080/api/instagram/{shortcode}\"\n # Read json array via api\n response = requests.request(\"get\", api_url)\n \n self.datameta = json.loads(response.text)\n self.typeofmedia = self.datameta[\"__typename\"]\n self.multipost_num = multipost_num\n self.multipost_num_chosen = False\n if multipost_num > 0:\n self.multipost_num_chosen = True\n elif self.typeofmedia == \"GraphSidecar\":\n try:\n self.datameta[\"edge_sidecar_to_children\"][\"edges\"][self.multipost_num][\"node\"][\"__typename\"]\n except:\n print(f\"Number {multipost_num} is out of index, ``Setting number back to 1.``\")\n self.multipost_num = 0\n\n def type_media(self):\n if self.typeofmedia != 'GraphSidecar':\n return self.typeofmedia\n elif self.typeofmedia == 'GraphSidecar':\n if self.datameta[\"edge_sidecar_to_children\"][\"edges\"][self.multipost_num][\"node\"][\"__typename\"] == 'GraphVideo':\n return 'GraphVideo'\n else:\n return 'GraphImage'\n elif self.datameta[\"statusCode\"] == 404:\n return '**Error StatusCode 404 \\n Account Maybe Private.**'\n\n def play_number(self):\n if self.type_media() == \"GraphVideo\":\n try:\n play_number = self.datameta[\"video_view_count\"]\n play_number = (\"\" + \"{:,}\".format(play_number))\n except:\n play_number = self.datameta[\"edge_sidecar_to_children\"][\"edges\"][self.multipost_num][\"node\"][\"video_view_count\"] or self.datameta[\"edge_sidecar_to_children\"][\"edges\"][0][\"node\"][\"video_view_count\"]\n play_number = (\"\" + \"{:,}\".format(play_number))\n return play_number\n \n def likes_number(self):\n likes_number = self.datameta[\"edge_media_preview_like\"][\"count\"]\n likes_number = (\"\" + \"{:,}\".format(likes_number))\n return likes_number\n \n def comments_number(self):\n comments_number = self.datameta[\"edge_media_to_comment\"][\"count\"]\n comments_number = (\"\" + \"{:,}\".format(comments_number))\n return comments_number\n \n def caption(self):\n try:\n caption = self.datameta[\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"]\n except:\n caption = ' '\n return caption\n \n def user_name(self):\n user_name = self.datameta[\"owner\"][\"username\"]\n return user_name\n \n def user_pfp(self):\n user_pfp = self.datameta[\"owner\"][\"profile_pic_url\"]\n return user_pfp \n \n def display_url(self):\n if self.typeofmedia == \"GraphVideo\":\n display_url = self.datameta[\"video_url\"]\n return display_url\n \n elif self.typeofmedia == \"GraphImage\":\n display_url = self.datameta[\"display_url\"]\n return display_url\n \n elif self.typeofmedia == \"GraphSidecar\":\n if self.type_media() == \"GraphVideo\":\n display_url = self.datameta[\"edge_sidecar_to_children\"][\"edges\"][self.multipost_num][\"node\"][\"video_url\"]\n return display_url\n elif self.type_media() == \"GraphImage\":\n display_url = self.datameta[\"edge_sidecar_to_children\"][\"edges\"][self.multipost_num][\"node\"][\"display_url\"]\n return display_url\n\n def video_duration(self):\n video_duration = self.datameta[\"video_duration\"]\n limit_duration = float('600')\n return video_duration\n\n def video_id(self):\n video_id = self.datameta[\"id\"]\n return video_id\n\n def video_download(self, path):\n data = requests.get(self.display_url())\n idd = self.datameta[\"id\"]\n with open(path+'/{}.mp4'.format(idd), 'wb') as fb:\n fb.write(data.content)\n \n # Upload To streamable\n def upload_to_streamable(self, path, title):\n streamable_username = stream_email\n streamable_password = stream_pass\n api = StreamableApi(streamable_username, streamable_password)\n deets = api.upload_video(path, title)\n count = 0\n while True:\n count+=1\n test = api.get_info(deets['shortcode'])\n if test['percent'] == 100:\n break\n elif count == 6:\n exit()\n else:\n time.sleep(10)\n global streamable_link\n streamable_link = (\"https://streamable.com/\" +deets['shortcode'])\n return streamable_link\n\n def embedgen(self, url, author, author_avatar):\n\n embed=discord.Embed(title=\"Instagram\", description=self.caption())\n embed.set_author(name=(f'@{self.user_name()}'), url=url, icon_url=self.user_pfp())\n embed.set_thumbnail(url=\"https://i.imgur.com/9S6AZz8.png\")\n embed.add_field(name=\"Likes\", value=self.likes_number(), inline=True)\n embed.add_field(name=\"Comments\", value=self.comments_number(), inline=True)\n embed.set_footer(text=(f'Shared by • @{author}'), icon_url=author_avatar)\n \n # Embed for video\n if self.type_media() == \"GraphVideo\":\n embed.add_field(name=\"Views\", value=self.play_number(), inline=True)\n #Embed for pic\n if self.type_media() == \"GraphImage\": \n embed.set_image(url=self.display_url())\n \n return embed\n" }, { "alpha_fraction": 0.5269917845726013, "alphanum_fraction": 0.5318081378936768, "avg_line_length": 40.19008255004883, "blob_id": "c015b5fc2043100c8c99ce0b93675c0266abbd65", "content_id": "e70b11945d5c18b52a0da4b56d39cb59e9b9ffa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4989, "license_type": "no_license", "max_line_length": 169, "num_lines": 121, "path": "/cogs/commands/sendtodm.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord import file\n\nimport os\nimport random\n\nimport sys\nsys.path.append(\"./cogs/functions\")\nimport tik_fn\nimport insta_fn\n\n# Setting Values\nsys.path.append(\"./\")\nfrom config import *\npath_down = config[\"path\"]\nlimitsize = config[\"limitsize\"] # <-- 8 mb for file size limit set by discord\nsEmoji = config[\"sEmoji\"]\n\nclass SendtodmCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # Delete dms\n @commands.dm_only()\n @commands.command(name='clear',pass_context=True)\n async def clear(self, ctx, limit: int=None):\n passed = 0\n failed = 0\n async for msg in ctx.message.channel.history(limit=limit+1):\n if msg.author.id == self.bot.user.id:\n try:\n await msg.delete()\n passed += 1\n except:\n failed += 1\n else:\n pass\n #ctx.send(f\"[Complete] Removed {passed} messages with {failed} fails\", delete_after=10)\n \n # send video to dm\n @commands.command(name='send', aliases=['s'],pass_context=True)\n async def sendtodm_cmd(self, ctx, member: discord.Member, link_url, multipost_num=1):\n channel = await member.create_dm()\n \n # Tiktok send to dm\n if link_url.startswith('https://www.tiktok.com') or link_url.startswith('https://vm.tiktok.com'):\n try:\n # Sends url to tik_fn\n t = tik_fn.Tiktok_fn(link_url) \n \n # Download video\n downloader = tik_fn.TikTokDownloader(t.default_url())\n downloader.download(path_down+'/{}.mp4'.format(t.video_id()))\n\n mp4_file = (f\"{path_down}/{t.video_id()}.mp4\")\n file_size = os.path.getsize(mp4_file) \n \n # Embed\n e = t.embedgen(link_url, ctx.author, ctx.author.avatar_url)\n\n # Upload to discord\n if file_size <= limitsize:\n await channel.send(embed=e)\n await channel.send(file=discord.File(mp4_file))\n\n # Upload to Streamable\n else:\n await channel.send(embed=e)\n mssg = await channel.send(f'Wait Uploading...🔃 {ctx.author}')\n streamable_link=t.upload_to_streamable(mp4_file, t.video_id())\n await mssg.edit(content=streamable_link)\n #Delete the file\n os.remove(mp4_file) \n await ctx.message.add_reaction(sEmoji)\n except:\n embed=discord.Embed(title=\"Error\", description='The video is private, or the api is broken \\n make sure to use a proxy.', icon_url=ctx.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await ctx.channel.send(embed=embed, delete_after=10)\n\n # Instagram send to dm\n elif link_url.startswith('https://www.instagram.com/'):\n url = link_url\n multipost_num = (int(multipost_num) - 1)\n try:\n i = insta_fn.Insta_fn(url , multipost_num)\n embed = i.embedgen(link_url, ctx.author, ctx.author.avatar_url)\n # For Videos\n if i.type_media() == \"GraphVideo\":\n i.video_download(path_down)\n file_tosend = (f\"{path_down}/{i.video_id()}.mp4\")\n file_size = os.path.getsize(file_tosend)\n \n if file_size <= limitsize:\n await channel.send(embed=embed)\n await channel.send(file=discord.File(file_tosend))\n os.remove(file_tosend) # Deletes downloaded file\n await ctx.message.add_reaction(sEmoji)\n \n else: # Upload to streamable if file over size limit\n await channel.send(embed=embed)\n msg = await channel.send(f'{message.author} 🔃 Wait Uploading...')\n streamable_link=i.upload_to_streamable(file_tosend, i.user_name())\n await msg.edit(content=streamable_link)\n os.remove(file_tosend) # Deletes downloaded file\n await ctx.message.add_reaction(sEmoji)\n\n # For Pictures\n elif i.type_media() == \"GraphImage\":\n await channel.send(embed=embed)\n await ctx.message.add_reaction(sEmoji)\n \n except:\n embed=discord.Embed(title=\"Error\", description='Account Maybe Private.', icon_url=ctx.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await ctx.channel.send(embed=embed, delete_after=10)\n else:\n pass\n\ndef setup(bot):\n bot.add_cog(SendtodmCog(bot))" }, { "alpha_fraction": 0.6813559532165527, "alphanum_fraction": 0.7310734391212463, "avg_line_length": 22.263158798217773, "blob_id": "052d433c9256433fd8fafe351ad032439f1af2f6", "content_id": "e8f04156471636c6d5bb9cb261e284bb5e1994c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 885, "license_type": "no_license", "max_line_length": 140, "num_lines": 38, "path": "/README.md", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "\n<p align=\"center\">\n <img width=\"128\" height=\"128\" src=\"https://user-images.githubusercontent.com/67737881/119644616-1d0b8780-be2e-11eb-82c5-bcf9ed17a730.png\">\n</p>\n\n## Discord-Mediabot\nA Discord bot that allows you to share Tiktoks, Instagram public/private posts, reels to discord.\n## Run on replit\nClick : [![Run on Repl.it](https://repl.it/badge/github/1mSAD/Discord-Mediabot)](https://repl.it/github/1mSAD/Discord-Mediabot)\n## Installation\n```bash\ngit clone https://github.com/1mSAD/Discord-Mediabot.git\n```\n```bash\npip install -r requirements.txt \n```\n## Usage\n\n```python\npython main.py\n```\n\n## Config\nCheck config.py\n\nTo get the instagram session file use\n```bash\ninstaloader -l USERNAME\n```\nGo to the specified path copy the session-username file to api directory. <- Not required to do this step\n\n.env example\n```\nTOKEN=\nstream_email=\nstream_pass=\nIG_USERNAME=\nproxyip=\n```\n" }, { "alpha_fraction": 0.876288652420044, "alphanum_fraction": 0.876288652420044, "avg_line_length": 11.25, "blob_id": "9c3b9fed31f1456587fa9a598ecb1c90e81f61f3", "content_id": "aed17e798d5148a369349b6c6e1318a4830ebd02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 97, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/requirements.txt", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "flask\nrequests\npython_dotenv\ndiscord\ndiscord.py\ndiscord-py-slash-command\ninstaloader\npystreamable" }, { "alpha_fraction": 0.5507857203483582, "alphanum_fraction": 0.5553852319717407, "avg_line_length": 40.42856979370117, "blob_id": "ab2903cec4b27bca40cdc051db985425c01d1bc2", "content_id": "cf60d1ce1dadc0a9d8de16abd25806fa31032e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2612, "license_type": "no_license", "max_line_length": 124, "num_lines": 63, "path": "/cogs/events/instagram.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord import file\n\nimport os\n\nimport sys\nsys.path.append(\"./cogs/functions\")\nimport insta_fn\n\n#Setting Values\nsys.path.append(\"./\")\nfrom config import *\ninstapath = config[\"path\"]\nlimitsize = config[\"limitsize\"] # <-- 8 mb for file size limit set by discord\n\nclass InstagramCog(commands.Cog):\n \n def __init__(self, bot):\n self.bot = bot\n @commands.Cog.listener()\n async def on_message(self, message, multipost_num=1):\n if message.content.startswith('https://www.instagram.com/'):\n split_url = message.content.split(' ') #Seperates url and num if there is\n url = split_url[0] \n # Check if the user entered a number after the url\n try:\n split_url[1]\n except IndexError:\n multipost_num = 0\n else:\n multipost_num = (int(split_url[1]) - 1) # -1 from number \n try:\n i = insta_fn.Insta_fn(url , multipost_num)\n embed = i.embedgen(message.content, message.author, message.author.avatar_url)\n # For Videos\n if i.type_media() == \"GraphVideo\":\n i.video_download(instapath)\n file_tosend = (f\"{instapath}/{i.video_id()}.mp4\")\n file_size = os.path.getsize(file_tosend)\n\n if file_size <= limitsize:\n await message.channel.send(embed=embed)\n await message.channel.send(file=discord.File(file_tosend))\n os.remove(file_tosend) # Delete the file\n \n else: # Upload to streamable if file over size limit\n await message.channel.send(embed=embed)\n msg = await message.channel.send(f'{message.author} 🔃 Wait Uploading...')\n streamable_link=i.upload_to_streamable(file_tosend, i.user_name())\n await msg.edit(content=streamable_link)\n os.remove(file_tosend) # Delete the file\n\n # For Pictures\n elif i.type_media() == \"GraphImage\":\n await message.channel.send(embed=embed)\n except:\n embed=discord.Embed(title=\"Error\", description='Account Maybe Private.', icon_url=message.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await message.channel.send(embed=embed, delete_after=10)\n \ndef setup(bot):\n bot.add_cog(InstagramCog(bot))" }, { "alpha_fraction": 0.6186224222183228, "alphanum_fraction": 0.644132673740387, "avg_line_length": 38.25, "blob_id": "826cffabe97fcfcf3810eba084c0f066c2150c5d", "content_id": "b815878bf07c24028bddc9180ac4af5693171dd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 786, "license_type": "no_license", "max_line_length": 134, "num_lines": 20, "path": "/config.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nconfig = {\n \"TOKEN\": '' or os.getenv(\"TOKEN\"), #Discord Bot Token.\n 'Prefix': '.',\n\n \"INSTA_USER\": '' or os.getenv(\"IG_USERNAME\"), # Instagram Username\n \"SESSION-Path\": './api', # get session with instaloader -l USERNAME, then copy the session file to this directory.\n\n \"stream_email\": '' or os.getenv(\"stream_email\"), # Streamable email https://streamable.com/.\n \"stream_pass\": '' or os.getenv(\"stream_pass\"), # Streamable pass.\n\n \"proxyip\": '' or os.getenv(\"proxyip\"), # Required for tiktok to work, use http proxy, example 0.0.0.0:80 or user:[email protected]:80. \n\n \"path\": './api/downloads-cache', # download path.\n \"limitsize\": 8388608, # 8 mb for file size limit set by discord.\n \"sEmoji\": '☑',\n}" }, { "alpha_fraction": 0.5145173668861389, "alphanum_fraction": 0.5197963714599609, "avg_line_length": 38.007354736328125, "blob_id": "9171e53c8f1134d7e3f6bd4d3db687bdf50fabea", "content_id": "59fb972b4bd67d29930a0e4d5c84be9f7017e838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5310, "license_type": "no_license", "max_line_length": 169, "num_lines": 136, "path": "/cogs/commands/slash-sendtodm.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord import file\nfrom discord_slash import cog_ext, SlashContext\nfrom discord_slash.utils.manage_commands import create_choice, create_option\n\nimport os\nimport random\n\nimport sys\nsys.path.append(\"./cogs/functions\")\nimport tik_fn\nimport insta_fn\n\n# Setting Values\nsys.path.append(\"./\")\nfrom config import *\npath_down = config[\"path\"]\nlimitsize = config[\"limitsize\"] # <-- 8 mb for file size limit set by discord\nsEmoji = config[\"sEmoji\"]\n\nclass SlashSendtodmCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n # send video to dm\n\n @cog_ext.cog_slash(\n name=\"send\",\n description=\"Send post to dm.\",\n options=[\n\n create_option(\n \n name=\"user\",\n description=\"Select a user to send to.\",\n required=True,\n option_type=6,\n ),\n create_option(\n \n name=\"url\",\n description=\"Link of Post.\",\n required=True,\n option_type=3,\n ),\n create_option(\n \n name=\"number\",\n description=\"If there is multiple posts choose the number of post (instagram).\",\n required=False,\n option_type=4,\n ),\n ]\n )\n async def sendtodm_cmd(self, ctx:SlashContext, user: discord.Member, url, number=1):\n member = user\n link_url = url\n multipost_num = number\n await ctx.defer()\n channel = await member.create_dm()\n # Tiktok send to dm\n if link_url.startswith('https://www.tiktok.com') or link_url.startswith('https://vm.tiktok.com'):\n try:\n # Sends url to tik_fn\n t = tik_fn.Tiktok_fn(link_url) \n \n # Download video\n downloader = tik_fn.TikTokDownloader(t.default_url())\n downloader.download(path_down+'/{}.mp4'.format(t.video_id()))\n\n mp4_file = (f\"{path_down}/{t.video_id()}.mp4\")\n file_size = os.path.getsize(mp4_file) \n \n # Embed\n e = t.embedgen(link_url, ctx.author, ctx.author.avatar_url)\n\n # Upload to discord\n if file_size <= limitsize:\n await channel.send(embed=e)\n await channel.send(file=discord.File(mp4_file))\n\n # Upload to Streamable\n else:\n await channel.send(embed=e)\n mssg = await channel.send(f'Wait Uploading...🔃 {ctx.author}')\n streamable_link=t.upload_to_streamable(mp4_file, t.video_id())\n await mssg.edit(content=streamable_link)\n #Delete the file\n os.remove(mp4_file) \n await ctx.send(sEmoji, delete_after=15)\n except:\n embed=discord.Embed(title=\"Error\", description='The video is private, or the api is broken \\n make sure to use a proxy.', icon_url=ctx.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await ctx.send(embed=embed, delete_after=10)\n\n # Instagram send to dm\n elif link_url.startswith('https://www.instagram.com/'):\n url = link_url\n multipost_num = (int(multipost_num) - 1)\n try:\n i = insta_fn.Insta_fn(url , multipost_num)\n embed = i.embedgen(link_url, ctx.author, ctx.author.avatar_url)\n # For Videos\n if i.type_media() == \"GraphVideo\":\n i.video_download(path_down)\n file_tosend = (f\"{path_down}/{i.video_id()}.mp4\")\n file_size = os.path.getsize(file_tosend)\n \n if file_size <= limitsize:\n await channel.send(embed=embed)\n await channel.send(file=discord.File(file_tosend))\n os.remove(file_tosend) # Deletes downloaded file\n await ctx.send(sEmoji, delete_after=15)\n \n else: # Upload to streamable if file over size limit\n await channel.send(embed=embed)\n msg = await channel.send(f'{message.author} 🔃 Wait Uploading...')\n streamable_link=i.upload_to_streamable(file_tosend, i.user_name())\n await msg.edit(content=streamable_link)\n os.remove(file_tosend) # Deletes downloaded file\n await ctx.send(sEmoji, delete_after=15)\n\n # For Pictures\n elif i.type_media() == \"GraphImage\":\n await channel.send(embed=embed)\n await ctx.send(sEmoji, delete_after=15)\n \n except:\n embed=discord.Embed(title=\"Error\", description='Account Maybe Private.', icon_url=ctx.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await ctx.send(embed=embed, delete_after=10)\n else:\n pass\n\ndef setup(bot):\n bot.add_cog(SlashSendtodmCog(bot))" }, { "alpha_fraction": 0.631697416305542, "alphanum_fraction": 0.6618640422821045, "avg_line_length": 31.202898025512695, "blob_id": "7a00dafbb546844f85f9d55651fbe06d9efbeaba", "content_id": "c8d7a4397e2758a16e101fc87184354afcd97f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "no_license", "max_line_length": 162, "num_lines": 69, "path": "/api/flaskapi.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "from flask import Flask, json, request\nimport json\nimport requests\nimport random\nimport instaloader\n\nimport sys\nsys.path.append(\"./\")\n#Setting Values\nfrom config import *\nUSER = config[\"INSTA_USER\"]\nsession_path = config[\"SESSION-Path\"]\nproxyip = config[\"proxyip\"]\n\napp = Flask(__name__)\n#app.config.from_mapping(config)\n\[email protected]('/', methods=['GET'])\ndef home(): \n return {'status': 'Online'}\n\n# TikTok\n# Get metadata\[email protected]('/api/tiktok/<video_username>/<video_id>', methods=['GET'])\ndef data(video_username, video_id):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.864.37'}\n web_id = str(random.randint(10000, 999999999))\n cookie = { 'tt_webid': web_id, 'tt_webid_v2': web_id }\n api_url = ('https://www.tiktok.com/node/share/video/@' + video_username + '/' + video_id)\n response = requests.request(\"get\", api_url, headers=headers, cookies=cookie)\n data = json.loads(response.text)\n if data[\"statusCode\"] == 0:\n return data['itemInfo']['itemStruct']\n else:\n proxies = dict(https=f'http://{proxyip}')\n response = requests.request(\"get\", api_url, headers=headers, proxies=proxies, cookies=cookie)\n data = json.loads(response.text)\n if data[\"statusCode\"] == 0:\n return data['itemInfo']['itemStruct']\n else:\n return {\"statusCode\": 404}\n\n\n# Instagram\nL = instaloader.Instaloader()\n# login credentials\ntry:\n try:\n L.load_session_from_file(USER)\n except:\n L.load_session_from_file(USER, f'{session_path}/session-{USER}')\nexcept:\n print('Instagram Session File Not Found Please Add it, otherwise youll get blocked by instagram.')\n# Get metadata\[email protected]('/api/instagram/<shortcode>', methods=['GET'])\ndef gp(shortcode):\n try:\n post = instaloader.Post.from_shortcode(L.context, shortcode)\n return json.dumps(post._full_metadata_dict, ensure_ascii=False)\n except instaloader.exceptions.BadResponseException:\n return {\"statusCode\": 404}\n\nfrom threading import Thread\ndef run():\n app.run(host='0.0.0.0',port=8080)\n\ndef run_api(): \n t = Thread(target=run)\n t.start()" }, { "alpha_fraction": 0.5670289993286133, "alphanum_fraction": 0.57201087474823, "avg_line_length": 38.44643020629883, "blob_id": "d441f6cce36d3938fd94aab46cd7c48f3805ad6d", "content_id": "aadf4609e06d9d58fcf30d77a5e95ba9009f3771", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 173, "num_lines": 56, "path": "/cogs/events/tiktok.py", "repo_name": "1mSAD/Discord-Mediabot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nfrom discord import file\n\nimport os\n\nimport sys\nsys.path.append(\"./cogs/functions\")\nimport tik_fn\n\n# Setting Values\nsys.path.append(\"./\")\nfrom config import *\ntik_down = config[\"path\"]\nlimitsize = config[\"limitsize\"] # <-- 8 mb for file size limit set by discord\n\nclass TiktokCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n @commands.Cog.listener()\n async def on_message(self, message):\n # Listen for Tiktok links\n if message.content.startswith('https://www.tiktok.com') or message.content.startswith('https://vm.tiktok.com'):\n try:\n # Sends url to tik_fn\n t = tik_fn.Tiktok_fn(message.content) \n \n # Download video\n downloader = tik_fn.TikTokDownloader(t.default_url())\n downloader.download(tik_down+'/{}.mp4'.format(t.video_id()))\n\n mp4_file = (f\"{tik_down}/{t.video_id()}.mp4\")\n file_size = os.path.getsize(mp4_file) \n \n # Embed\n e = t.embedgen(message.content, message.author, message.author.avatar_url)\n\n # Upload to discord\n if file_size <= limitsize:\n await message.channel.send(embed=e)\n await message.channel.send(file=discord.File(mp4_file))\n\n # Upload to Streamable\n else:\n await message.channel.send(embed=e)\n mssg = await message.channel.send(f'Wait Uploading...🔃 {message.author}')\n streamable_link=t.upload_to_streamable(mp4_file, t.video_id())\n await mssg.edit(content=streamable_link)\n #Delete the file\n os.remove(mp4_file) \n except:\n embed=discord.Embed(title=\"Error\", description='The video is private, or the api is broken \\n make sure to use a proxy.', icon_url=message.author.avatar_url)\n embed.set_thumbnail(url=\"https://i.imgur.com/j3wGKKr.png\")\n await message.channel.send(embed=embed, delete_after=10)\ndef setup(bot):\n bot.add_cog(TiktokCog(bot))" } ]
12
boubakersalmi/projet6
https://github.com/boubakersalmi/projet6
08d2639633c6b7e1359a3c14ed82d8244cd72a1f
08ec0490c4768e46960db67c76fe00137e89400c
f95ce6b0f2cc1091d763c0706aa70141c9a47720
refs/heads/master
2020-06-01T04:16:16.297557
2019-07-10T11:51:14
2019-07-10T11:51:14
190,631,840
0
0
null
2019-06-06T18:39:48
2019-07-10T11:47:47
2019-07-10T11:51:15
Python
[ { "alpha_fraction": 0.7346647381782532, "alphanum_fraction": 0.7346647381782532, "avg_line_length": 32.380950927734375, "blob_id": "cd138e8c0c770cc8c94007b0dbf06b4f2470b5ff", "content_id": "22f5551749dab447c35fca0486f5239fb46dc2f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "permissive", "max_line_length": 79, "num_lines": 21, "path": "/main.py", "repo_name": "boubakersalmi/projet6", "src_encoding": "UTF-8", "text": "## debut du code partie principale intégrant la POO\n## les fonctions appelés ci-dessous sont importées du fichier main.py\n## cette partie du programme n'intègre pas de fonctionnalités graphique\n\n## Dans un premier temps, nous importons les fonctions programmées dans main.py\n## afin de les intégrer dans la classe Action\n## puis nous importons le module logging afin de générer des fichiers de logs\n\nfrom setting import *\n\n## definition de la classe action\nclass Action:\n\n def __init__(self):\n self.nettoyagebureau = DesktopCleaner()\n self.corporatebg = wallpaper_update()\n\nif __name__ == '__main__':\n logger.info(chemindetoilestart)\n Action()\n logger.info(chemindetoileend)\n" }, { "alpha_fraction": 0.778413712978363, "alphanum_fraction": 0.7788225412368774, "avg_line_length": 47.900001525878906, "blob_id": "8d59b91d598d4459436f5f790832bb1a0dc0c350", "content_id": "e0e9680647ac714a28dab99ba2fa51afea09b6e6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2484, "license_type": "permissive", "max_line_length": 333, "num_lines": 50, "path": "/README.md", "repo_name": "boubakersalmi/projet6", "src_encoding": "ISO-8859-1", "text": "Utilisation du programme\n\nCe programme permet d'effectuer 3 taches :\n\n modifier le fond d'écran de façon automatique avec un fond adaptatif se changeant quotidiennement.\n ranger le bureau afin que les fichiers du bureau soient systématiquement dans des dossiers.\n journaliser toutes les modifications afin de permettre une tracabilité des changements.\n\nLes fonds d'écran sont définis depuis des liens présents dans le dossier S:\\Booba\\configfiles. Les changements se font en prenant en compte les services dans lesquels travaillent les utilisateurs. La liste des utilisateurs ainsi que les services sont disponibles dans le fichier usersandservices dans le dossier S:\\Booba\\configfiles.\n\nles fonds d'écran sont disponibles dans le réseau :\n\n pour les commerciaux : S:\\Booba\\commercial\n pour la compta : S:\\Booba\\compta\n pour le service technique : S:\\Booba\\Technique\n pour le service RH : S:\\Booba\\rh\n=======\n - modifier le fond d'écran de façon automatique avec un fond adaptatif se changeant quotidiennement.\n - ranger le bureau afin que les fichiers du bureau soient systématiquement dans des dossiers.\n - journaliser toutes les modifications afin de permettre une tracabilité des changements.\n\nLes fonds d'écran sont définis depuis des liens présents dans le dossier S:\\Booba\\configfiles.\nLes changements se font en prenant en compte les services dans lesquels travaillent les utilisateurs.\nLa liste des utilisateurs ainsi que les services sont disponibles dans le fichier usersandservices dans le dossier S:\\Booba\\configfiles.\n\nles fonds d'écran sont disponibles dans le réseau :\n - pour les commerciaux : S:\\Booba\\commercial\n - pour la compta : S:\\Booba\\compta\n - pour le service technique : S:\\Booba\\Technique\n - pour le service RH : S:\\Booba\\rh\n\n\nLe programme a été organisé de telle façon a ce que pour modifier les fichiers sources, il suffit de le faire depuis le réseau et non depuis le programme.\n\nChacune des fonctions a été détaillée afin d'apporter des explications sur son utilisation.\n\n\nCe programme a été créé dans le cadre d'un projet d'étude Openclassroom.\n\nLicense : MIT\n=======\nCe programme a été créé dans le cadre d'un projet d'étude Openclassroom.\n\nPour contribuer / Contributing : \n\n Fork it\n Create your feature branch (git checkout -b example)\n Commit your changes (git commit -am 'vos changements / idées')\n Push to the branch (git push origin example)\n Create new Pull Request\n\n" }, { "alpha_fraction": 0.6718353033065796, "alphanum_fraction": 0.6746314167976379, "avg_line_length": 38.73737335205078, "blob_id": "805b50bbc06eb951b231f639e48785b07e740977", "content_id": "9ffe9ffb7471face1c680b2fda38e63290d4a094", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7953, "license_type": "permissive", "max_line_length": 144, "num_lines": 198, "path": "/setting.py", "repo_name": "boubakersalmi/projet6", "src_encoding": "UTF-8", "text": "## ce programme est réalisé sur python 3.7, certains points doivent être adaptés aux versions antérieures de pytho##\n\n##\n\n## DEBUT DU PROGRAMME\n\n\n## Import des modules nécessaires à l'execution du programme\nimport os\nimport json\nimport glob\nimport ctypes\nimport time\nimport logging\nimport sys\nimport csv\nimport socket\n\n\n\n############ Premièren partie du code : la journalisation ########################\n\n## lors de la génération du log, l'heure et la date d'éxecution apparaitront dans le modèle jj/mm/aaaa hh:mm:ss\n## le logger a été définit afin de pouvoir faire apparaitre les éléments voulu dans le fichier de log. celui-ci peut etre adapté\nlogging.basicConfig(\n filename=r'S:\\Booba\\configfiles\\logfile.log',\n format=\"%(asctime)s - %(message)s\",\n datefmt=\"%d/%m/%Y %H:%M:%S\",\n level=logging.INFO\n)\nlogger = logging.getLogger()\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n##configuration des messages de début et de fin de session\nchemindetoileend = \"**************** FIN DE SESSION *******************\"\nchemindetoilestart = \"**************** DEBUT DE SESSION *******************\"\n\n\n\n############ Troisieme partie du code : Définition du fond d'écran adaptatif ########################\n\n## le fichier config_data définit l'emplacement des liens des fonds d'écran en cas d'utilisation d'un fond d'écran non adaptatif.\n## celui-ci peut etre modifié pour pointer sur une autre source\n## config_data = r'C:\\Users\\booba\\Desktop\\filerepo\\fond_ecran.txt'\n## définition d'une fonction permettant de définir le service dans lequel une personne travaille\n## pour cela, la fonction fait appel auu fichier usersandservices dans lequel nous retrouvons la relation entre utilisateur et service concerné.\n## le nom de l'utilisateur est celui du hostname windows afin de permettre a la fonction gethostname de la récupérer.\n## un logger.info a été rajouté afin de faire apparaitre la relation entre utilisateur et service dans le fichier de log\n\nfichierutilisateurservices = r'S:\\Booba\\configfiles\\usersandservices.csv'\n\ndef get_hostname_service():\n # Traitement du csv\n with open(fichierutilisateurservices, \"r\") as f:\n csvreader = csv.reader(f, delimiter=',')\n next(csvreader) # skip header\n us_data = [row for row in csvreader]\n\n current_hostname = socket.gethostname()\n # On cherche a quel service il appartient\n for hostname, service in us_data:\n if hostname == current_hostname:\n break\n else:\n raise Exception(\n f\"Impossible de trouver le service auquel '{hostname}' appartient.\")\n\n return service\n\n\n## par l'intermédiaire de if, nous avons la possbilité de conditionner le chemin de la bibliothèque de fond d'écra\n## a utiliser.\n## selon la valeur de service nous pourrons définir l'emplacement du dossier dans lequel rechercher les liens de BG\n\nservicesconcerne = get_hostname_service()\n\nif servicesconcerne == \"Technique\":\n config_data = r'S:\\Booba\\configfiles\\fond_ecran_technique.txt'\nelif servicesconcerne == \"RH\":\n config_data = r'S:\\Booba\\configfiles\\fond_ecran_rh.txt'\nelif servicesconcerne == \"Commercial\":\n config_data = r'S:\\Booba\\configfiles\\fond_ecran_commerciaux.txt'\nelse:\n print(\"Impossible de definir le service de l'utilisateur\")\n\n\n## definition de la fonction change_wallpaper\n## les prints ci-dessous permettent de définir les messages à afficher. si l'etape ctypes... se déroule bien, nous aurons les deux messages\n## suivants qui s'afficheront\ndef change_wallpaper(wallpaper_path):\n \"\"\"On change le fond d'ecran\"\"\"\n print(\"Actualisation du fond d'écran\")\n ctypes.windll.user32.SystemParametersInfoA(20, 0, wallpaper_path.encode(\"us-ascii\"), 3)\n logger.info(\"Actualisation du fond d'écran réalisée\")\n\n## on lit le fichiers fond_ecran contenant les 7 liens pour chacune des images disponibles sur le réseaux interne disque B\nwith open(config_data, \"r\") as f:\n mesfonddecrans = f.readlines()\n # On retire le '\\n' (retour à la ligne)\n mesfonddecrans = [p[:-1] for p in mesfonddecrans]\n\n## definition des parametres de temps permettant de modifier chaque fond d'écran par rapport au jour d'apparition\nlocaltime = time.localtime(time.time())\njdls = localtime[6]\nimage_du_jour = mesfonddecrans[jdls]\n\n## si ecran noir apparait en fond d'écran, vérifier les liens\ndef wallpaper_update():\n change_wallpaper(image_du_jour)\n\n\n\n\n############ Troisieme partie du code : Nettoyage du bureau ########################\n\n\n\n\n## definition de l'adresse du desktop intégrant la variable nomutilisateur\nCHEMIN_BUREAU = r'C:\\Users\\booba\\Desktop'\n\n## définition du droit donné sur les dossiers contenant les fichiers nettoyés\npermission_octal = 777\n\n## fichier dans lequel nous retrouverons les éléments concernés par le tri\ntypeelementsconfig = r'S:\\Booba\\configfiles\\type_fichier.json'\n\n## creation du dossier si non existant\ndef creer_dossier(chemin_dossier):\n # Si le dossier n'existe pas déjà, on le créer\n if not os.path.exists(chemin_dossier):\n os.makedirs(chemin_dossier, permission_octal)\n\n## définition de la règle de gestion de doublon\ndef creer_version(nouveau_chemin):\n ## Si le fichier dans le dossier de destination existe déjà, on rajoute une version\n ## example test.txt existe, on renomme en test-v(1, 2, 3, ...).txt\n ## cette partie permet de ne jamais écraser un fichier si deux fichiers ont le même nom\n version = 0\n while os.path.isfile(nouveau_chemin):\n version += 1\n nom_fichier_liste = nom_fichier_liste.split(\".\")\n nom_fichier_avec_version = \"{}-v{}.{}\".format(\n nom_fichier_liste[0],\n version,\n nom_fichier_liste[1]\n )\n nouveau_chemin = os.path.join(\n CHEMIN_BUREAU,\n chemin_dossier,\n nom_fichier_avec_version\n )\n return nouveau_chemin\n\n## definition de la fonction de nettoyage du bureau\ndef DesktopCleaner ():\n\n with open(typeelementsconfig, \"r\") as f:\n ## recherche dans le dictionnaire\n dossier_et_extensions = json.load(f)\n\n for dossier in dossier_et_extensions.keys():\n ## Liste des fichiers qui vont dans le dossier 'dossier'\n ## Si dossier = 'TEXTE'\n ## 'fichiers_dossier' ressemble à ça ['monfichiertxt.txt', 'blabla.txt', ...])\n fichiers_dossier = []\n for extension in dossier_et_extensions[dossier]:\n for fichier in glob.glob(os.path.join(CHEMIN_BUREAU, \"*%s\" % extension)):\n fichiers_dossier.append(fichier)\n\n ## Si on a trouvé un fichier alors on le met dans le dossier\n if len(fichiers_dossier) > 0:\n\n ## Si le dossier n'existe pas déjà, on le créer\n creer_dossier(os.path.join(CHEMIN_BUREAU, dossier))\n\n ## On met chaque fichier dans le (nouveau) dossier\n for chemin_original in fichiers_dossier:\n nom_fichier = os.path.basename(chemin_original)\n ## message de confirmation\n print(\"On met le fichier '%s' dans le dossier '%s'\" % (nom_fichier, dossier))\n logger.info(\"Le fichier nommé '%s' a été déplacé dans le dossier '%s'\" % (nom_fichier, dossier))\n\n nouveau_chemin = os.path.join(\n CHEMIN_BUREAU,\n dossier,\n nom_fichier\n )\n ## On ajoute une version -v* si un fichier avec le même nom existe déjà\n nouveau_chemin = creer_version(nouveau_chemin)\n\n ## on déplace effectivement le fichier dans le dossier\n os.rename(chemin_original, nouveau_chemin)\n\n ## definition d'un else permettant d'informer du non déplacement de fichier\n else:\n print(\"Pas de fichiers a ranger pour le dossier %s.\" % dossier)\n logger.info(\"Aucune modification n'a été apportée au dossier %s\" % dossier)\n" } ]
3
novapost/django-ticketoffice
https://github.com/novapost/django-ticketoffice
482101ddbafd2c9cb5721aa4006e781c2e2f56c4
f9c252c05664efd9a5919a4f2921805f535cfbd7
6fa945d405649c72eddec0a6f7c140998e9849d2
refs/heads/master
2021-01-01T06:39:45.722140
2019-04-23T16:09:09
2019-04-23T16:09:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.64702308177948, "alphanum_fraction": 0.6488456726074219, "avg_line_length": 22.18309783935547, "blob_id": "7de921a95943c53280e99d970062ba10bcbdf6eb", "content_id": "d3e9ff2d8af6812f3a92d42db03d78f214d6edfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1646, "license_type": "no_license", "max_line_length": 72, "num_lines": 71, "path": "/demo/demoproject/settings.py", "repo_name": "novapost/django-ticketoffice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Django settings.\n\n.. warning::\n\n These settings are made for development environment.\n They are not safe in production!\n\n\"\"\"\nimport os\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\nroot_dir = os.path.dirname(os.path.dirname(here))\ndata_dir = os.path.join(root_dir, 'var')\ncfg_dir = os.path.join(root_dir, 'etc')\n\n\n# Applications, dependencies.\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n # Third-parties.\n 'django_nose',\n 'floppyforms',\n # Project's.\n 'django_ticketoffice',\n]\n\n\n# Databases.\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ.get('PGNAME', 'ticketoffice'),\n 'USER': os.environ.get('PGUSER', 'ticketoffice'),\n 'PASSWORD': os.environ.get('PGPASS', 'ticketoffice'),\n 'HOST': os.environ.get('PGHOST', 'localhost'),\n }\n}\n\n\n# URL configuration.\nROOT_URLCONF = '{package}.urls'.format(package=__package__)\n\n\n# Fake secret key.\nSECRET_KEY = 'Fake secret.'\n\n\n# Use django-nose.\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\nnose_cfg_dir = os.path.join(cfg_dir, 'nose')\nNOSE_ARGS = [\n '--verbosity=2',\n '--nocapture',\n '--rednose',\n '--no-path-adjustment',\n '--all-modules',\n '--cover-inclusive',\n '--cover-tests',\n]\n\n# Disable password hashing for better performances.\n# Enable this feature on demand with @django.test.override_settings() or\n# @django.test.TestCase.settings() decorators in tests.\nPASSWORD_HASHERS = (\n 'django_ticketoffice.utils.PlainPasswordHasher',\n)\n" }, { "alpha_fraction": 0.6566416025161743, "alphanum_fraction": 0.6908938884735107, "avg_line_length": 18, "blob_id": "b17aa7a2fc2cfc9f12e76803c733c4bfd089c14b", "content_id": "997c159677c9c187b6bbe331904511d941c71fe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 119, "num_lines": 63, "path": "/tox.ini", "repo_name": "novapost/django-ticketoffice", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py{27,35,36}-dj{111},py{35,36}-dj{111,20}, flake8, readme\n\n[testenv]\ncommands =\n demo test --nose-verbosity=2 --with-doctest --with-coverage --cover-package=django_ticketoffice django_ticketoffice\n coverage erase\ndeps =\n coverage\n nose\n rednose\n -e.\n -edemo/\n dj111: Django>=1.11,<2\n dj20: Django>=2.0,<2.1\npassenv =\n PYTHONPATH\n PGHOST\n PGNAME\n PGPASS\n PGUSER\nskip_install = True\nusedevelop = True\n\n[testenv:flake8]\ndeps =\n flake8\ncommands =\n flake8 --exclude=migrations django_ticketoffice/\n flake8 demo\nskip_install = True\n\n[testenv:sphinx]\ndeps =\n sphinx\ncommands =\n make --directory=docs SPHINXBUILD=\"sphinx-build -W\" clean html doctest\nskip_install = True\nusedevelop = True\nwhitelist_externals =\n make\n\n[testenv:readme]\ndeps =\n docutils\n pygments\ncommands =\n mkdir -p var/docs\n rst2html.py --exit-status=2 README.rst var/docs/README.html\n rst2html.py --exit-status=2 CONTRIBUTING.rst var/docs/CONTRIBUTING.html\nskip_install = True\nusedevelop = True\nwhitelist_externals =\n mkdir\n\n[testenv:release]\ndeps =\n wheel\n zest.releaser\nskip_install = True\nusedevelop = True\ncommands =\n fullrelease\n" }, { "alpha_fraction": 0.5968007445335388, "alphanum_fraction": 0.6025640964508057, "avg_line_length": 31.5747127532959, "blob_id": "77e8febedd5a86e953df56e1969f536991109963", "content_id": "39bfdea1b6f8246bd76459ced9a1625a72bacd6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8502, "license_type": "no_license", "max_line_length": 79, "num_lines": 261, "path": "/django_ticketoffice/utils.py", "repo_name": "novapost/django-ticketoffice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"Utilities that may be packaged in external libraries.\"\"\"\nfrom random import SystemRandom\nfrom collections import OrderedDict\nfrom importlib import import_module\n\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.hashers import BasePasswordHasher, mask_hash\n\n\ndef random_unicode(min_length=None,\n max_length=None,\n alphabet=u'abcdefghijklmnopqrstuvwxyz'\n u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n u'0123456789'):\n \"\"\"Return random unicode.\n\n .. note:: Uses :py:func:`os.urandom`.\n\n \"\"\"\n if min_length is None:\n if max_length is None:\n raise ValueError(\"Provide min_length or max_length.\")\n else:\n min_length = 1\n\n if max_length is None:\n max_length = min_length\n\n if min_length < 1:\n raise ValueError(\"Minimum length is 1.\")\n\n if max_length < min_length:\n raise ValueError(\"Maximum length must be greater than minimum length.\")\n\n random = SystemRandom()\n length = random.randint(min_length, max_length)\n return u''.join(random.choice(alphabet) for i in range(length))\n\n\ndef random_password(min_length=16, max_length=32,\n alphabet='abcdefghjkmnpqrstuvwxyz'\n 'ABCDEFGHJKLMNPQRSTUVWXYZ'\n '23456789'):\n \"\"\"Return random password of random length with limited ASCII alphabet.\n\n .. note::\n\n The default value of allowed chars does not have \"I\" or \"O\" or\n letters and digits that look similar -- just to avoid confusion.\n\n \"\"\"\n return random_unicode(min_length, max_length, alphabet)\n\n\nclass PlainPasswordHasher(BasePasswordHasher):\n \"Plain password hashing algorithm for test (DO NOT USE in production).\"\n algorithm = \"plain\"\n\n def salt(self):\n return ''\n\n def encode(self, password, salt):\n return '%s$$%s' % (self.algorithm, password)\n\n def verify(self, password, encoded):\n algorithm, hash = encoded.split('$$', 1)\n assert algorithm == self.algorithm\n return password == hash\n\n def safe_summary(self, encoded):\n return OrderedDict([\n ('algorithm', self.algorithm),\n ('hash', mask_hash(encoded, show=3)),\n ])\n\n\nclass UnauthorizedView(TemplateView):\n template_name = '401.html'\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\"Render response with status code 401.\"\"\"\n response_kwargs.setdefault('status', 401)\n return TemplateView.render_to_response(self, context,\n **response_kwargs)\n\n\nclass ForbiddenView(TemplateView):\n template_name = '403.html'\n\n def render_to_response(self, context, **response_kwargs):\n \"\"\"Render response with status code 401.\"\"\"\n response_kwargs.setdefault('status', 403)\n return TemplateView.render_to_response(self, context,\n **response_kwargs)\n\n\ndef import_member(import_string):\n \"\"\"Import one member of Python module by path.\n\n >>> import os.path\n >>> imported = import_member('os.path.supports_unicode_filenames')\n >>> os.path.supports_unicode_filenames is imported\n True\n\n \"\"\"\n module_name, factory_name = import_string.rsplit('.', 1)\n module = import_module(module_name)\n return getattr(module, factory_name)\n\n\n#: Sentinel to detect undefined function argument.\nUNDEFINED_FUNCTION = object()\n\n\nclass NotCallableError(TypeError):\n \"\"\"Raised when operation requires a callable.\"\"\"\n\n\nclass Decorator(object):\n \"\"\"Base class to create class-based decorators.\n\n See: https://tech.people-doc.com/python-class-based-decorators.html\n\n Override :meth:`setup`, :meth:`run` or :meth:`decorate` to create custom\n decorators:\n\n * :meth:`setup` is dedicated to setup, i.e. setting decorator's internal\n options.\n :meth:`__init__` calls :py:meth:`setup`.\n\n * :meth:`decorate` is dedicated to wrapping function, i.e. remember the\n function to decorate.\n :meth:`__init__` or :meth:`__call__` may call :meth:`decorate`,\n depending on the usage.\n\n * :meth:`run` is dedicated to execution, i.e. running the decorated\n function.\n :meth:`__call__` calls :meth:`run` if a function has already been\n decorated.\n\n Decorator instances are callables. The :meth:`__call__` method has a\n special implementation in Decorator. Generally, consider overriding\n :meth:`run` instead of :meth:`__call__`.\n\n \"\"\"\n #: Sentinel to detect undefined function argument.\n UNDEFINED_FUNCTION = UNDEFINED_FUNCTION\n\n #: Shortcut to exception:\n NotCallableError = NotCallableError\n\n def __init__(self, func=UNDEFINED_FUNCTION):\n \"\"\"Constructor.\n\n Accepts one optional positional argument: the function to decorate.\n\n Other arguments **must** be keyword arguments.\n\n And beware passing ``func`` as keyword argument: it would be used as\n the function to decorate.\n\n Handle decorator's options; return decorator instance (``self``).\n\n Default implementation decorates ``func``.\n\n Override this method and adapt its signature depending on your needs.\n\n If the decorator has mandatory options, they should be positional\n arguments in :meth:`setup` (or an exception should be raised inside\n :meth:`setup`).\n\n If the decorator accepts optional configuration, there should be\n keyword arguments in :meth:`setup`.\n\n \"\"\"\n #: Decorated function.\n self.decorated = self.UNDEFINED_FUNCTION\n # Decorate function, if it has been passed to :meth:`__init__`, i.e.\n # if decorator has been used with ``@`` and without parentheses:\n #\n # .. code:: python\n #\n # @Decorator\n # def some_function():\n # pass\n #\n # Which is an equivalent to:\n #\n # .. code:: python\n #\n # def some_function():\n # pass\n # some_function = Decorator(some_function)\n if func is not self.UNDEFINED_FUNCTION:\n self.decorate(func)\n return self\n\n def decorate(self, func):\n \"\"\"Set :attr:`decorated`; return decorator instance (``self``).\n\n Raises :class:`NotCallableError` (inherits from :class:`TypeError` if\n ``func`` is not a callable.\n\n \"\"\"\n if not callable(func):\n raise NotCallableError(\n 'Cannot decorate non callable object \"{func}\"'\n .format(func=func))\n self.decorated = func\n return self\n\n def __call__(self, *args, **kwargs):\n \"\"\"Run decorated function if available, else decorate first arg.\n\n First use case of :meth:`__call__` is: decorator instance has already\n been initialized with function to decorate, and the decorated function\n is called:\n\n .. code:: python\n\n @Decorator # No parentheses => __init__() will be called with\n # some_function as first (and only) argument.\n def some_function():\n pass\n\n some_function() # Decorator.__call__()\n\n Second use case is: decorator instance has been initialized with\n configuration, but without function to decorate. Then the decorator\n instance is used to decorate a function:\n\n .. code:: python\n\n @Decorator() # Parentheses => ``some_function`` will be decorated\n # via ``Decorator.__call__(some_function)``.\n def some_function():\n pass\n\n \"\"\"\n if self.decorated is self.UNDEFINED_FUNCTION:\n func = args[0]\n if args[1:] or kwargs:\n raise ValueError('Cannot decorate and setup simultaneously '\n 'with __call__(). Use __init__() or '\n 'setup() for setup. Use __call__() or '\n 'decorate() to decorate.')\n self.decorate(func)\n return self\n else:\n return self.run(*args, **kwargs)\n\n def run(self, *args, **kwargs):\n \"\"\"Actually run the decorator.\n\n This base implementation is a transparent proxy to the decorated\n function: it passes positional and keyword arguments as is, and returns\n result.\n\n \"\"\"\n return self.decorated(*args, **kwargs)\n" }, { "alpha_fraction": 0.6795096397399902, "alphanum_fraction": 0.6812609434127808, "avg_line_length": 20.148147583007812, "blob_id": "129b0b20da2e7d002d767899f9337a47654d5753", "content_id": "f31bd7e7dfb27d7f929edb969ffab6e50cab65c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "no_license", "max_line_length": 51, "num_lines": 27, "path": "/django_ticketoffice/api.py", "repo_name": "novapost/django-ticketoffice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"API shortcuts for use in client applications.\"\"\"\nfrom django_ticketoffice.exceptions import (\n CredentialsError,\n NoTicketError,\n TicketExpiredError,\n TicketUsedError,\n)\nfrom django_ticketoffice.decorators import (\n invitation_required,\n stamp_invitation,\n)\nfrom django_ticketoffice.models import Ticket\n\n\n__all__ = [\n # exceptions\n 'CredentialsError',\n 'NoTicketError',\n 'TicketExpiredError',\n 'TicketUsedError',\n # decorators\n 'invitation_required',\n 'stamp_invitation',\n # models\n 'Ticket',\n]\n" }, { "alpha_fraction": 0.6073486804962158, "alphanum_fraction": 0.6253602504730225, "avg_line_length": 39.82352828979492, "blob_id": "f2b74e0386edfb2ac6a3f58785fe977b1a081143", "content_id": "b3123890d9020380c6c7a87c4e038e5209887282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 149, "num_lines": 34, "path": "/django_ticketoffice/migrations/0001_initial.py", "repo_name": "novapost/django-ticketoffice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.13 on 2017-04-25 14:33\nfrom __future__ import unicode_literals\n\nimport django.contrib.auth.hashers\nfrom django.contrib.postgres.fields import JSONField\nfrom django.db import migrations, models\nimport functools\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ticket',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('uuid', models.UUIDField(default=uuid.uuid4)),\n ('password', models.CharField(default=functools.partial(django.contrib.auth.hashers.make_password, *(None,), **{}), max_length=255)),\n ('place', models.CharField(blank=True, db_index=True, max_length=50)),\n ('purpose', models.CharField(blank=True, db_index=True, max_length=50)),\n ('data', JSONField(blank=True, default=dict, null=True)),\n ('creation_datetime', models.DateTimeField(auto_now_add=True, db_index=True)),\n ('expiry_datetime', models.DateTimeField(blank=True, db_index=True, default=None, null=True)),\n ('usage_datetime', models.DateTimeField(blank=True, db_index=True, default=None, null=True)),\n ],\n ),\n ]\n" } ]
5
mspagon/Uni
https://github.com/mspagon/Uni
ab2b8f83c5ff139ca42e1d8ee30da7ec75df4b4d
602a1739d759957901195caf9cb5ebf6a5633823
00c1583b264ce285961854c1eef06f257ad4e204
refs/heads/master
2020-05-21T07:25:20.505441
2019-05-11T11:20:53
2019-05-11T11:20:53
185,960,239
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.7642857432365417, "alphanum_fraction": 0.7642857432365417, "avg_line_length": 27.200000762939453, "blob_id": "f0b23dc92dc285fba6fe8523d48a6dcccea7f4c0", "content_id": "2ba44e4258e281272b3fb0d74e4f45a3e2c0f158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 94, "num_lines": 5, "path": "/install/postgres/populate.py", "repo_name": "mspagon/Uni", "src_encoding": "UTF-8", "text": "from database import db\n\nconn = db.connect()\n\nid, username, name, start_semester, start_year, end_semester, end_year, total_credits_enrolled" }, { "alpha_fraction": 0.5954825282096863, "alphanum_fraction": 0.6160164475440979, "avg_line_length": 24.6842098236084, "blob_id": "33e8a9f92dc74c916f55492101c282f599b165e8", "content_id": "247ceee368d28b1c92b650998367753f52c9270c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 109, "num_lines": 19, "path": "/database/db.py", "repo_name": "mspagon/Uni", "src_encoding": "UTF-8", "text": "import psycopg2\nimport os\n\nfrom psycopg2 import connect\n\n\ndef connect():\n secret = os.getenv(\"POSTGRES_SECRET\")\n if secret:\n conn = psycopg2.connect(host='uni.c47pehqpskfz.us-east-1.rds.amazonaws.com', port=5432, dbname='uni',\n user='spagon', password=secret)\n return conn\n else:\n raise OSError(\"Environment variable for POSTGRES_SECRET is not set.\")\n\n\nif __name__ == '__main__':\n conn = connect()\n print(type(conn))" }, { "alpha_fraction": 0.586150050163269, "alphanum_fraction": 0.6009892821311951, "avg_line_length": 30.102563858032227, "blob_id": "b856b92f6717c2895adde46b265c0824716139e7", "content_id": "a54e717b32191f47047f15beca682ae529440344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 111, "num_lines": 39, "path": "/database/students.py", "repo_name": "mspagon/Uni", "src_encoding": "UTF-8", "text": "import psycopg2\nimport collections\n\nfrom psycopg2 import sql\n\nfrom database import db\n\nStudent = collections.namedtuple('Student', 'id username name start_semester start_year end_semester \\\n end_year total_credits enrolled')\n\n\ndef insert_student(conn: psycopg2.extensions.connection, student: dict):\n query = sql.SQL(\"INSERT INTO students ({columns}) VALUES ({values});\").format(\n columns=sql.SQL(', ').join(\n sql.Identifier(n) for n in student.keys()\n ),\n values=sql.SQL(', ').join(\n sql.Placeholder() * len(student)\n )\n )\n\n with conn, conn.cursor() as curs:\n curs.execute(query, list(student.values()))\n\n# TODO make a test case for the database inserts... Do I do this using mocks?\n# because I can't insert data thats already there. Also... modifies production database, so testing database?\nif __name__ == '__main__':\n bob = {\n 'username': 'bs1025',\n 'name': 'Bob Sagat',\n 'start_semester': 'spring',\n 'start_year': 2001,\n 'end_semester': 'fall',\n 'end_year': 2008,\n 'total_credits': 120,\n }\n\n with db.connect() as conn:\n insert_student(conn, bob)\n" }, { "alpha_fraction": 0.42635658383369446, "alphanum_fraction": 0.5348837375640869, "avg_line_length": 31.5, "blob_id": "230603d4f46717c9f1b232a7e4d1b3d3532ed8ea", "content_id": "ab6726c2c5eba1d0db1735c39fece956a01065a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 129, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/install/postgres/populate.SQL", "repo_name": "mspagon/Uni", "src_encoding": "UTF-8", "text": "INSERT INTO students(username, name)\nVALUES ('mrs2555', 'Michael Spagon'),\n ('ag12345', 'Arnold Garrett'),\n ('br16853', 'Brittany Reynolds');" } ]
4
Raihan-jamil/Django
https://github.com/Raihan-jamil/Django
504b5a379675dc560cb8376300a40eb6cdfa7ef3
c15bbd6c90ea9b3a395edf2a5e5b8808a047f1e9
48b9fd7a48d5cf322869473905ea71b5d85a280b
refs/heads/master
2023-03-12T02:23:20.797995
2021-03-01T10:28:49
2021-03-01T10:28:49
294,062,960
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6996337175369263, "alphanum_fraction": 0.6996337175369263, "avg_line_length": 29.33333396911621, "blob_id": "9367cfbf4f8c84b5ad58191049a0d245c5b92fef", "content_id": "ee8a5b9e5fef50bc7c54c712b8540effd405503a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/website/urls.py", "repo_name": "Raihan-jamil/Django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import include, path\nfrom users import views as user_views\n\nurlpatterns = [\n path(r'^admin/', admin.site.urls),\n path('register/', user_views.register, name='register'),\n path(r'^music/', include('music.urls')),\n]\n" } ]
1
ShadowFient/dds-modified
https://github.com/ShadowFient/dds-modified
3a0d44439f6f6081f4fcd70a4b2b96eda113b8f4
9de810cd6398ec3e7e34f735eacf1cfda8e8b76e
729448fd35c378a4613de73787d9705d4992772a
refs/heads/main
2023-07-01T09:52:36.444245
2021-08-10T14:25:57
2021-08-10T14:25:57
394,683,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5390804409980774, "alphanum_fraction": 0.5637931227684021, "avg_line_length": 27.04838752746582, "blob_id": "5eaa020b4465b372289e9619aec594a1bbc98f92", "content_id": "85938362c9d5cda1a633c81f96f9df53258df355", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3480, "license_type": "no_license", "max_line_length": 111, "num_lines": 124, "path": "/workspace/visualize_object_detection.py", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "\nimport argparse\n\nfrom PIL import Image, ImageDraw\nfrom torchvision import io\nimport sys\nfrom pathlib import Path\nimport yaml\nimport logging\n\nsys.path.append('../')\nfrom dds_utils import read_results_dict\n\nrelevant_classes = 'vehicle'\nconfidence_threshold = 0.5\nmax_area_threshold = 0.04\niou_threshold = 0.8\n\ndef iou(b1, b2):\n # calculate the iou of two bounding boxes\n\t(x1,y1,w1,h1) = b1\n\t(x2,y2,w2,h2) = b2\n\tx3 = max(x1,x2)\n\ty3 = max(y1,y2)\n\tx4 = min(x1+w1,x2+w2)\n\ty4 = min(y1+h1,y2+h2)\n\tif x3>x4 or y3>y4:\n\t\treturn 0\n\telse:\n\t\toverlap = (x4-x3)*(y4-y3)\n\t\treturn overlap/(w1*h1+w2*h2-overlap)\n\ndef load_image(config, results_file, fid):\n video_name = '_'.join(Path(results_file).name.split('_')[:2])\n image_path = Path(config['data_dir']) / video_name / 'src' / ('%010d.png' % fid)\n return Image.open(image_path)\n\ndef main(argv):\n\n # get logger\n logging.basicConfig(\n format=\"%(name)s -- %(levelname)s -- %(lineno)s -- %(message)s\",\n level='INFO')\n\n logger = logging.getLogger(\"visualize\")\n logger.addHandler(logging.NullHandler())\n\n if len(argv) > 3:\n logger.error('Too many arguments')\n exit()\n \n # load configuration file to get the directory of dataset\n with open('configuration.yml', 'r') as f:\n config = yaml.load(f.read())\n\n # load regions from result_file\n results_file = argv[1]\n results = read_results_dict(results_file)\n\n has_baseline = False\n if len(argv) == 3:\n has_baseline = True\n baseline_file = argv[2]\n baseline_results = read_results_dict(baseline_file)\n\n # folder to save image\n save_folder = Path('visualize') / Path(results_file).name\n save_folder.mkdir(parents=True, exist_ok=True)\n \n for fid in range(max(results.keys())):\n\n if fid % 10 == 0:\n logger.info(f'Visualizing image with frame id {fid}')\n \n # load image\n image = load_image(config, results_file, fid)\n # drawer for this image\n draw = ImageDraw.Draw(image)\n\n width, height = image.size\n\n for region in results[fid]:\n x, y, w, h = region.x, region.y, region.w, region.h\n x1 = int(x * width + 0.5)\n x2 = int((x + w) * width + 0.5)\n y1 = int(y * height + 0.5)\n y2 = int((y + h) * height + 0.5)\n\n # filter out large regions, they are not true objects\n if w * h > max_area_threshold:\n continue\n\n # filter out irrelevant regions\n if region.label not in relevant_classes:\n continue\n\n # filter out low confidence regions\n if region.conf < confidence_threshold:\n continue\n\n # default color\n color = '#318fb5'\n\n # if this bounding box is not covered by baseline, mark it with other color\n if has_baseline:\n overlap = False\n for baseline_region in baseline_results[fid]:\n x_, y_, w_, h_ = baseline_region.x, baseline_region.y, baseline_region.w, baseline_region.h\n if iou((x,y,w,h), (x_,y_,w_,h_)) > iou_threshold:\n overlap = True\n break\n if not overlap:\n color = '#febf63'\n\n\n draw.rectangle([x1,y1,x2,y2], outline = color, width=10)\n \n image.save(save_folder / ('%010d.png' % fid))\n\n\n\n\nif __name__ == '__main__':\n\n main(sys.argv)\n\n" }, { "alpha_fraction": 0.7284623980522156, "alphanum_fraction": 0.7448200583457947, "avg_line_length": 31.785715103149414, "blob_id": "ea28a67f9bee42b4bcbaa6e4e9e29edb378a6f9b", "content_id": "bcf5c350e23da870a7b9fe9ccc4ad0dffeb3a647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 917, "license_type": "no_license", "max_line_length": 85, "num_lines": 28, "path": "/Dockerfile", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "FROM ubuntu\n# copy source code to container\nCOPY . /app\n\n# noninteractive only when build\nARG DEBIAN_FRONTEND=noninteractive\n\n# Ali apt-get source.list\nRUN sed -i s@/ports.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \\\n\t&& rm -Rf /var/lib/apt/lists/\n\n# install dependencies\nRUN apt update\nRUN apt install openssh-server python3 python3-pip python-is-python3 ffmpeg -y\nRUN python -m pip install flask munch networkx pyyaml requests \\\n\topencv-contrib-python\n\n# allow ssh to the container as root and add local public key to the authorized keys\nRUN echo \"PermitRootLogin without-password\" > /etc/ssh/sshd_config\nRUN --mount=type=secret,id=my_secret mkdir -p -m 0600 /root/.ssh \\\n\t&& echo $(cat /run/secrets/my_secret) > /root/.ssh/authorized_keyss\n\n# listening on port 22\nEXPOSE 22\n\nCMD echo \"export LD_LIBRARY_PATH=/opt/openblas/0.3.10/lib:\" >> /home/ubuntu/.bashrc \\\n\t&& sudo service ssh restart \\\n\t&& bash" }, { "alpha_fraction": 0.6025058627128601, "alphanum_fraction": 0.6045418977737427, "avg_line_length": 37.93902587890625, "blob_id": "0c38b819d0a4b076f84d715762b7efc122de957f", "content_id": "0d314d9b43b84b06f5ca8a080abbddc78fd340aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6385, "license_type": "no_license", "max_line_length": 118, "num_lines": 164, "path": "/workspace/entrance.py", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "\"\"\"\n entrance.py - user entrance for the platform\n author: Qizheng Zhang ([email protected])\n Kuntai Du ([email protected])\n\"\"\"\n\nimport os\nimport subprocess\nimport yaml\nimport sys\n\n# dirty fix\nsys.path.append('../')\n\ndef load_configuration():\n \"\"\"read configuration information from yaml file\n\n Returns:\n dict: information of the yaml file\n \"\"\"\n with open('configuration.yml', 'r') as config:\n config_info = yaml.load(config, Loader=yaml.FullLoader)\n return config_info\n\n\ndef execute_single(single_instance):\n \"\"\"execute an atomic instance\n\n Args:\n single_instance (dict): the instance to be executed\n \"\"\"\n # unpacking\n baseline = single_instance['method']\n\n # to be fixed:\n # gt and mpeg must be run for dds regardless of whether they are in config\n\n # branching based on baselines\n if baseline == 'gt':\n # unpacking\n video_name = single_instance['video_name']\n original_images_dir = os.path.join(data_dir, video_name, 'src')\n\n # skip if result file already exists\n result_file_name = f\"{video_name}_gt\"\n if single_instance['overwrite'] == False and os.path.exists(os.path.join(\"results\", result_file_name)):\n print(f\"Skipping {result_file_name}\")\n # otherwise, start execution\n else:\n single_instance['video_name'] = f'results/{result_file_name}'\n single_instance['high_images_path'] = f'{original_images_dir}'\n single_instance['outfile'] = 'stats'\n\n subprocess.run(['python', '../play_video.py', \n yaml.dump(single_instance)])\n\n # assume we are running emulation\n elif baseline == 'mpeg':\n # unpacking\n video_name = single_instance['video_name']\n mpeg_qp = single_instance['low_qp']\n mpeg_resolution = single_instance['low_resolution']\n original_images_dir = os.path.join(data_dir, video_name, 'src')\n\n # skip if result file already exists\n result_file_name = f\"{video_name}_mpeg_{mpeg_resolution}_{mpeg_qp}\"\n if single_instance['overwrite'] == False and os.path.exists(os.path.join(\"results\", result_file_name)):\n print(f\"Skipping {result_file_name}\")\n else:\n single_instance['video_name'] = f'results/{result_file_name}'\n single_instance['high_images_path'] = f'{original_images_dir}'\n single_instance['outfile'] = 'stats'\n single_instance['ground_truth'] = f'results/{video_name}_gt'\n\n subprocess.run(['python', '../play_video.py',\n yaml.dump(single_instance)])\n\n elif baseline == 'dds':\n # unpacking\n video_name = single_instance['video_name']\n original_images_dir = os.path.join(data_dir, video_name, 'src')\n low_qp = single_instance['low_qp']\n high_qp = single_instance['high_qp']\n low_res = single_instance['low_resolution']\n high_res = single_instance['high_resolution']\n rpn_enlarge_ratio = single_instance['rpn_enlarge_ratio']\n batch_size = single_instance['batch_size']\n prune_score = single_instance['prune_score']\n objfilter_iou = single_instance['objfilter_iou']\n size_obj = single_instance['size_obj']\n\n # skip if result file already exists\n # You could customize the way to serialize the parameters into filename by yourself\n result_file_name = (f\"{video_name}_dds_{low_res}_{high_res}_{low_qp}_{high_qp}_\"\n f\"{rpn_enlarge_ratio}_twosides_batch_{batch_size}_\"\n f\"{prune_score}_{objfilter_iou}_{size_obj}\")\n if single_instance['overwrite'] == False and os.path.exists(os.path.join(\"results\", result_file_name)):\n print(f\"Skipping {result_file_name}\")\n else:\n single_instance['video_name'] = f'results/{result_file_name}'\n single_instance['high_images_path'] = f'{original_images_dir}'\n single_instance['outfile'] = 'stats'\n single_instance['ground_truth'] = f'results/{video_name}_gt'\n single_instance['low_results_path'] = f'results/{video_name}_mpeg_{low_res}_{low_qp}'\n\n if single_instance[\"mode\"] == 'implementation':\n assert single_instance['hname'] != False, \"Must provide the server address for implementation, abort.\"\n # single_instance['hname'] = '127.0.0.1:5000'\n \n subprocess.run(['python', '../play_video.py',\n yaml.dump(single_instance)])\n \n\ndef parameter_sweeping(instances, new_instance, keys):\n \"\"\"recursive function for parameter sweeping\n\n Args:\n instances (dict): the instance in process\n new_instance (dict): recursive parameter\n keys (list): keys of the instance in process\n \"\"\"\n if keys == []: # base case\n execute_single(new_instance)\n else: # recursive step\n curr_key = keys[0]\n if (isinstance(instances[curr_key], list)): \n # need parameter sweeping\n for each_parameter in instances[curr_key]:\n # replace the list with a single value\n new_instance[curr_key] = each_parameter\n # proceed with the other parameters in keys\n parameter_sweeping(instances, new_instance, keys[1:])\n else: # no need for parameter sweeping\n new_instance[curr_key] = instances[curr_key]\n parameter_sweeping(instances, new_instance, keys[1:])\n\n\ndef execute_all(config_info):\n \"\"\"execute all instances based on user's config info and default config info\n\n Args:\n config_info (dict): configuration information from the yaml file\n \"\"\"\n all_instances = config_info['instances']\n default = config_info['default']\n\n for single_instance in all_instances:\n\n # propagate default config to current instance\n for key in default.keys():\n if key not in single_instance.keys():\n single_instance[key] = default[key]\n\n keys = list(single_instance.keys())\n new_instance = {} # initially empty\n parameter_sweeping(single_instance, new_instance, keys)\n\n\nif __name__ == \"__main__\":\n # load configuration information (only once)\n config_info = load_configuration()\n data_dir = config_info['data_dir']\n \n execute_all(config_info)" }, { "alpha_fraction": 0.6114385724067688, "alphanum_fraction": 0.6182056069374084, "avg_line_length": 36.85950469970703, "blob_id": "3a5eafba1cce89817857862daae87d4187f25ddd", "content_id": "d8c9b90a31034e8a2f27f530dbd9214c3e900d4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4581, "license_type": "no_license", "max_line_length": 83, "num_lines": 121, "path": "/play_video.py", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport logging\nimport sys\n\nfrom frontend.client import Client\nfrom dds_utils import (read_results_dict, evaluate, write_stats)\n\nfrom munch import *\nimport yaml\n\n\ndef main(args):\n logging.basicConfig(\n format=\"%(name)s -- %(levelname)s -- %(lineno)s -- %(message)s\",\n level=args.verbosity.upper())\n\n logger = logging.getLogger(\"dds\")\n logger.addHandler(logging.NullHandler())\n\n # Make simulation objects\n logger.info(f\"Starting server with high threshold of \"\n f\"{args.high_threshold} low threshold of \"\n f\"{args.low_threshold} tracker length of \"\n f\"{args.tracker_length}\")\n\n config = args\n\n # config = ServerConfig(\n # args.resolutions[0], args.resolutions[1], args.qp[0], args.qp[1],\n # args.batch_size, args.high_threshold, args.low_threshold,\n # args.max_object_size, args.min_object_size, args.tracker_length,\n # args.boundary, args.intersection_threshold, args.tracking_threshold,\n # args.suppression_threshold, args.simulate, args.rpn_enlarge_ratio,\n # args.prune_score, args.objfilter_iou, args.size_obj)\n\n server = None\n mode = None\n results, bw = None, None\n mode = \"implementation\"\n logger.warning(\n f\"Running DDS using a server client implementation with \"\n f\"server running on {args.hname} using video {args.hname}\")\n logger.info(\"Starting client\")\n client = Client(args.hname, config, server)\n results, bw = client.analyze_video(\n args.video_name, args.high_images_path, config,\n args.enforce_iframes)\n\n # Evaluation and writing results\n # Read Groundtruth results\n low, high = bw\n f1 = 0\n stats = (0, 0, 0)\n number_of_frames = len(\n [x for x in os.listdir(args.high_images_path) if \"png\" in x])\n if args.ground_truth:\n ground_truth_dict = read_results_dict(args.ground_truth)\n logger.info(\"Reading ground truth results complete\")\n tp, fp, fn, _, _, _, f1 = evaluate(\n number_of_frames - 1, results.regions_dict, ground_truth_dict,\n args.low_threshold, 0.5, 0.4, 0.4)\n stats = (tp, fp, fn)\n logger.info(f\"Got an f1 score of {f1} \"\n f\"for this experiment {mode} with \"\n f\"tp {stats[0]} fp {stats[1]} fn {stats[2]} \"\n f\"with total bandwidth {sum(bw)}\")\n else:\n logger.info(\"No groundtruth given skipping evalution\")\n\n # Write evaluation results to file\n write_stats(args.outfile, f\"{args.video_name}\", config, f1,\n stats, bw, number_of_frames, mode)\n\n\nif __name__ == \"__main__\":\n\n # load configuration dictonary from command line\n # use munch to provide class-like accessment to python dictionary\n args = munchify(yaml.load(sys.argv[1], Loader=yaml.SafeLoader))\n\n if not args.simulate and not args.hname and args.high_resolution != -1:\n if not args.high_images_path:\n print(\"Running DDS in emulation mode requires raw/high \"\n \"resolution images\")\n exit()\n\n if not re.match(\"DEBUG|INFO|WARNING|CRITICAL\", args.verbosity.upper()):\n print(\"Incorrect argument for verbosity.\"\n \"Verbosity can only be one of the following:\\n\"\n \"\\tdebug\\n\\tinfo\\n\\twarning\\n\\terror\")\n exit()\n\n if args.estimate_banwidth and not args.high_images_path:\n print(\"DDS needs location of high resolution images to \"\n \"calculate true bandwidth estimate\")\n exit()\n\n if not args.simulate and args.high_resolution != -1:\n if args.low_images_path:\n print(\"Discarding low images path\")\n args.low_images_path = None\n args.intersection_threshold = 1.0\n\n if args.method != \"dds\":\n assert args.high_resolution == -1, \"Only dds support two quality levels\"\n \n\n if args.high_resolution == -1:\n print(\"Only one resolution given, running MPEG emulation\")\n assert args.high_qp == -1, \"MPEG emulation only support one QP\"\n else:\n assert args.low_resolution <= args.high_resolution, \\\n f\"The resolution of low quality({args.low_resolution})\"\\\n f\"can't be larger than high quality({args.high_resolution})\"\n assert not(args.low_resolution == args.high_resolution and \n args.low_qp < args.high_qp),\\\n f\"Under the same resolution, the QP of low quality({args.low_qp})\"\\\n f\"can't be higher than the QP of high quality({args.high_qp})\"\n\n main(args)\n" }, { "alpha_fraction": 0.6430279016494751, "alphanum_fraction": 0.6430279016494751, "avg_line_length": 23.134614944458008, "blob_id": "72685f9c60166b798f42b55b32c7f62b3d7ad594", "content_id": "bed78b6b8ddb8fe4bfb97da74a66fccd16be7613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1255, "license_type": "no_license", "max_line_length": 76, "num_lines": 52, "path": "/backend/backend.py", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "import os\nimport logging\nfrom flask import Flask, request, jsonify\nfrom dds_utils import ServerConfig\nimport json\nimport yaml\nfrom .server import Server\n\napp = Flask(__name__)\nserver = None\n\nfrom munch import *\n\n\[email protected](\"/\")\[email protected](\"/index\")\ndef index():\n # TODO: Add debugging information to the page if needed\n return \"Much to do!\"\n\n\[email protected](\"/init\", methods=[\"POST\"])\ndef initialize_server():\n args = yaml.load(request.data, Loader=yaml.SafeLoader)\n global server\n if not server:\n logging.basicConfig(\n format=\"%(name)s -- %(levelname)s -- %(lineno)s -- %(message)s\",\n level=\"INFO\")\n server = Server(args, args[\"nframes\"])\n os.makedirs(\"server_temp\", exist_ok=True)\n os.makedirs(\"server_temp-cropped\", exist_ok=True)\n return \"New Init\"\n else:\n server.reset_state(int(args[\"nframes\"]))\n return \"Reset\"\n\n\[email protected](\"/low\", methods=[\"POST\"])\ndef low_query():\n file_data = request.files[\"media\"]\n results = server.perform_low_query(file_data)\n\n return jsonify(results)\n\n\[email protected](\"/high\", methods=[\"POST\"])\ndef high_query():\n file_data = request.files[\"media\"]\n results = server.perform_high_query(file_data)\n\n return jsonify(results)\n" }, { "alpha_fraction": 0.6326419115066528, "alphanum_fraction": 0.6532023549079895, "avg_line_length": 28.23404312133789, "blob_id": "20398bb83444fb1b2b9bb3b0b977e486f777aa0f", "content_id": "9601cceacd18962b523b43fceeb08b1cafaaa727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5496, "license_type": "no_license", "max_line_length": 204, "num_lines": 188, "path": "/workspace/examine.py", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "# USAGE\n# python examine.py video_name results_fold stats_file gt\nimport sys\nimport os\nimport yaml\n\nsys.path.append('../')\n\nvideo_name = sys.argv[1]\nresults_direc = sys.argv[2]\nstats_file = sys.argv[3]\ngt_mode = 'gt'\n# video_name = results_direc[11:]\n# print(video_name)\ndirs = os.listdir(results_direc)\nfrom dds_utils import *\n\n\ngt_confid_thresh_list = [0.3]\nmpeg_confid_thresh_list = [0.5]\nmax_area_thresh_gt_list = [0.04]\nmax_area_thresh_mpeg_list = max_area_thresh_gt_list\n\niou_thresh = 0.3\nrelevant_classes = 'vehicle'\n\ndef parse_stats(stats_path):\n\tfname_to_size = {}\n\t# return total size\n\twith open(stats_path) as f:\n\t\tfor cnt, line in enumerate(f):\n\t\t\tif cnt == 0:\n\t\t\t\tcontinue\n\t\t\tfields = line.split(',')\n\t\t\tfname = fields[0].split('/')[-1]\n\t\t\ttotal_size = int(float(fields[15])/1e3)\n\t\t\tfname_to_size[fname] = total_size\n\treturn fname_to_size\n\ndef parse(file_path, gt_flag):\n\tfid_to_bboxes = {}\n\tmax_fid = -1\n\tf = open(file_path)\n\tline = f.readline()\n\twhile line:\n\t\tfields = line.split(',')\n\t\tfid = int(fields[0])\n\t\tif fid > max_fid:\n\t\t\tmax_fid = fid\n\t\tx = float(fields[1])\n\t\ty = float(fields[2])\n\t\tw = float(fields[3])\n\t\th = float(fields[4])\n\t\tlabel = fields[5]\n\t\tconfid = float(fields[6])\n\t\tbbox = (x,y,w,h,label,confid)\n\t\tif fid not in fid_to_bboxes:\n\t\t\tfid_to_bboxes[fid] = []\n\t\tbboxes = fid_to_bboxes[fid]\n\t\tbboxes.append(bbox)\n\t\tfid_to_bboxes[fid] = bboxes\n\t\tline = f.readline()\n\tfor fid in range(max_fid+1):\n\t\tif fid not in fid_to_bboxes:\n\t\t\t\t\t\tfid_to_bboxes[fid] = []\n\treturn max_fid,fid_to_bboxes\n\ndef vote(max_fid, map_list, gt_confid_thresh, mpeg_confid_thresh, max_area_thresh_gt, max_area_thresh_mpeg):\n\tresult = {}\n\tfor fid in range(max_fid+1):\n\t\tbboxes_list = []\n\t\tfor i in range(len(map_list)):\n\t\t\tmap = map_list[i]\n\t\t\tbboxes = map[fid]\n\t\t\tbboxes = filter(bboxes, gt_flag = True, gt_confid_thresh=gt_confid_thresh, mpeg_confid_thresh=mpeg_confid_thresh, max_area_thresh_gt=max_area_thresh_gt, max_area_thresh_mpeg=max_area_thresh_mpeg)\n\t\t\tbboxes_list.append(bboxes)\n\t\tnew_boxes = []\n\t\tfor b1 in bboxes_list[0]:\n\t\t\tcount = 1\n\t\t\tfor i in range(len(map_list)-1):\n\t\t\t\tbboxes2 = bboxes_list[i+1]\n\t\t\t\tfor b2 in bboxes2:\n\t\t\t\t\tif iou(b1, b2) >= 0.5:\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tbreak\n\t\t\tif count >= 2: new_boxes.append(b1)\n\t\tresult[fid] = new_boxes\n\treturn result\n\ndef filter(bboxes, gt_flag, gt_confid_thresh, mpeg_confid_thresh, max_area_thresh_gt, max_area_thresh_mpeg):\n\tif gt_flag:\n\t\tconfid_thresh = gt_confid_thresh\n\t\tmax_area_thresh = max_area_thresh_gt\n\n\telse:\n\t\tconfid_thresh = mpeg_confid_thresh\n\t\tmax_area_thresh = max_area_thresh_mpeg\n\n\tresult = []\n\tfor b in bboxes:\n\t\t(x,y,w,h,label,confid) = b\n\t\tif confid >= confid_thresh and w*h <= max_area_thresh and label in relevant_classes:\n\t\t\tresult.append(b)\n\treturn result\n\ndef iou(b1, b2):\n\t(x1,y1,w1,h1,label1,confid1) = b1\n\t(x2,y2,w2,h2,label2,confid2) = b2\n\tx3 = max(x1,x2)\n\ty3 = max(y1,y2)\n\tx4 = min(x1+w1,x2+w2)\n\ty4 = min(y1+h1,y2+h2)\n\tif x3>x4 or y3>y4:\n\t\treturn 0\n\telse:\n\t\toverlap = (x4-x3)*(y4-y3)\n\t\treturn overlap/(w1*h1+w2*h2-overlap)\n\ndef eval(max_fid, map_dd, map_gt, gt_confid_thresh, mpeg_confid_thresh, max_area_thresh_gt, max_area_thresh_mpeg):\n\ttp_list = []\n\tfp_list = []\n\tfn_list = []\n\tcount_list = []\n\tfor fid in range(max_fid+1):\n\t\tbboxes_dd = map_dd[fid]\n\t\tbboxes_gt = map_gt[fid]\n\t\tbboxes_dd = filter(bboxes_dd, gt_flag = False, gt_confid_thresh=gt_confid_thresh, mpeg_confid_thresh=mpeg_confid_thresh, max_area_thresh_gt=max_area_thresh_gt, max_area_thresh_mpeg=max_area_thresh_mpeg)\n\t\tbboxes_gt = filter(bboxes_gt, gt_flag = True, gt_confid_thresh=gt_confid_thresh, mpeg_confid_thresh=mpeg_confid_thresh, max_area_thresh_gt=max_area_thresh_gt, max_area_thresh_mpeg=max_area_thresh_mpeg)\n\t\ttp = 0\n\t\tfp = 0\n\t\tfn = 0\n\t\tcount = 0\n\t\tfor b_dd in bboxes_dd:\n\t\t\tfound = False\n\t\t\tfor b_gt in bboxes_gt:\n\t\t\t\tif iou(b_dd,b_gt) >= iou_thresh:\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\t\tif found: tp += 1\n\t\t\telse: fp += 1\n\t\tfor b_gt in bboxes_gt:\n\t\t\tfound = False\n\t\t\tfor b_dd in bboxes_dd:\n\t\t\t\tif iou(b_dd,b_gt) >= iou_thresh:\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\t\tif not found: fn += 1\n\t\t\telse: count += 1\n\t\ttp_list.append(tp)\n\t\tfp_list.append(fp)\n\t\tfn_list.append(fn)\n\t\tcount_list.append(count)\n\ttp = sum(tp_list)\n\tfp = sum(fp_list)\n\tfn = sum(fn_list)\n\tcount = sum(count_list)\n\treturn tp, fp, fn, count, round(tp/(tp+fp),3), round(tp/(tp+fn),3), round((2.0*tp/(2.0*tp+fp+fn)),3)\n\nfname_to_size = parse_stats(stats_file)\nMAX_FID = -1\nfid_to_bboxes_dict = {}\nfor file in dirs:\n\t# Dont parse req regions\n\tif \"req_regions\" in file or \"jpg\" in file or \"segment_size\" in file or os.path.isdir(os.path.join(results_direc,file)): continue\n\tmax_fid, fid_to_bboxes = parse(os.path.join(results_direc,file), gt_flag = (\"gt\" in file))\n\tif max_fid > MAX_FID:\n\t\tMAX_FID = max_fid\n\tfid_to_bboxes_dict[file] = fid_to_bboxes\n\nif gt_mode == 'gt':\n\tgt_key = video_name + \"_gt\"\n\tgt = fid_to_bboxes_dict[gt_key]\n\tmax_f1_distance = -1\n\tfor max_area_thresh_gt in max_area_thresh_gt_list:\n\t\tfor max_area_thresh_mpeg in max_area_thresh_mpeg_list:\n\t\t\tfor gt_confid_thresh in gt_confid_thresh_list:\n\t\t\t\tfor mpeg_confid_thresh in mpeg_confid_thresh_list:\n\t\t\t\t\tfor key in sorted(fid_to_bboxes_dict):\n\t\t\t\t\t\tif key not in fname_to_size:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif 'gt' in key:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tprint(key, fname_to_size[key], end = 'KB ')\n\n\t\t\t\t\t\ttp, fp, fn, count, pr, recall, f1 = eval(MAX_FID, fid_to_bboxes_dict[key], gt, gt_confid_thresh, mpeg_confid_thresh, max_area_thresh_gt, max_area_thresh_mpeg)\n\n\t\t\t\t\t\t# print(f1, tp, fp, fn, count)\n\t\t\t\t\t\tprint(f1)\n" }, { "alpha_fraction": 0.5790228247642517, "alphanum_fraction": 0.6002606153488159, "avg_line_length": 58.496124267578125, "blob_id": "4140ffd108f98a5087b7e2e004197ee36f8afad8", "content_id": "fa64dc182936b754ec3f650f81fc014c13b27b1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7675, "license_type": "no_license", "max_line_length": 424, "num_lines": 129, "path": "/README.md", "repo_name": "ShadowFient/dds-modified", "src_encoding": "UTF-8", "text": "# DDS\n\n## 1. Related resources\n\nPlease check [Kuntai Du's home page](https://kuntaidu.github.io/aboutme.html) for more DDS-related resources.\n\n\n## 2. Install Instructions\n\nTo run our code, please make sure that conda is installed. Then, under dds repo, run\n\n```conda env create -f conda_environment_configuration.yml```\n\nto install dds environment. Note that this installation assumes that you have GPU resources on your machine. If not, please edit ```tensorflow-gpu=1.14``` to ```tensorflow=1.14``` in ```conda_environment_configuration.yml```.\n\nNow run\n\n```conda activate dds```\n\nto activate dds environment, and \n\n```cd workspace```\n\nand run \n\n```wget people.cs.uchicago.edu/~kuntai/frozen_inference_graph.pb```\n\nto download the object detection model (FasterRCNN-ResNet101).\n\n## 3. Run our code\n\nUnder ```DDSrepo/workspace```, run\n\n```python entrance.py```\n\nto run DDS!\n\n## 4. Get performance numbers\n\nUnder ```DDSrepo/workspace```, run\n\n```python examine.py trafficcam_1 results stats```\n\nyou should see something like\n\n```\ntrafficcam_1_dds_0.8_0.8_36_26_0.0_twosides_batch_15_0.5_0.3_0.01 3474KB 0.901\ntrafficcam_1_mpeg_0.8_26 8652KB 0.904\ntrafficcam_1_mpeg_0.8_36 2369KB 0.876\n```\n\nThe number might vary by platform.\n\n## 5. Some details\n\nIf you are considering building your projects based on our codebase, here are some details.\n\n### 5.1 Run in implementation mode\n\nImplementation means we run DDS under real network environment through HTTP post. To do so, in ```DDSrepo```, run\n\n```FLASK_APP=backend/backend.py flask run --port=5001```\n\nand copy the ```frozen_inference_graph.pb``` to ```DDSrepo``` to help the server find the model.\n\nThen use another terminal, cd to ```DDSrepo/workspace```, and edit the mode to ```implementation``` and edit the hname to ```ip:5001``` (ip should be 127.0.0.1 if you run the server locally) to run DDS on implementation mode. You can also run other methods in implementation mode by changing the default value of mode to ```implementation```. \n\n\n### 5.2 Inside workspace folder\n\nInside workspace folder, we use a configuration file ```configuration.yml``` to control the configuration for both the client and the server. This file will be only loaded **once** inside the whole ```python entrance.py``` process. You can add new keys and values in this file. We even support caching, parameter sweeping, and some fancy functionalities in this file. Please read the comments inside this file to utilize it.\n\n\n## 6. Dataset\n\n### 6.1 Detection dataset\n\n#### 6.1.1 Traffic camera videos and dash camera videos.\n\nWe search some keywords through youtube in the anonymous mode of Chrome. The top-ranked search results, corresponding URLS are listed below. We filter out some of these videos.\n\n| Keyword | Source | Type | URL | Why we filter it out |\n| ---------------------- | ------- | ---------- | --------------------------------------------- | -------------------- |\n| | | | | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=7HaJArMDKgI> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=kOMWAnxKq58> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=RTLwaQFtXbE> | night |\n| city drive around | youtube | trafficcam | <https://www.youtube.com/watch?v=g_4RT0We1F8> | nearly no object |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=Cw0d-nqSNE8> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=fkps18H3SXY> | |\n| city drive around | youtube | trafficcam | <https://www.youtube.com/watch?v=Ujyu8foke60> | night |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=7o5PYCeEo2I> | |\n| city drive around | youtube | | <https://www.youtube.com/watch?v=lTvYjERVAnY> | night |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=6tyFAtgy4JA> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=n1xkO0_lSU0> | night |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=LF22Ybb_pyQ> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=y1OCipyZefA> | |\n| city drive around | youtube | dashcam | <https://www.youtube.com/watch?v=2LXwr2bRNic> | |\n| | | | | |\n| | | | | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=MNn9qKG2UFI> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=PJ5xXXcfuTc> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=y3NOhpkoR-w> | |\n| highway traffic camera | youtube | dashcam | <https://www.youtube.com/watch?v=hxyhulJYz5I> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=5_XSYlAfJZM> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=b46xvHwxpcY> | low resolution |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=4koxy_7uqcg> | not a real video |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=jjlBnrzSGjc> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=fxec0tHMkk4> | |\n| highway traffic camera | youtube | dashcam | <https://www.youtube.com/watch?v=jQcuhLqebPk> | |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=8XoTvbqsT68> | not a real video |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=6VsYw7NcQLo> | not a real video |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=HLiAJ0gW0kk> | need 18+ |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=1EiC9bvVGnk> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=WxgtahHmhiw> | |\n| highway traffic camera | youtube | trafficcam | <https://www.youtube.com/watch?v=PmrSOPMkfAo> | night |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=X7qGtl9lW2A> | not a real video |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=pcpL9rAhRVA> | night |\n| highway traffic camera | youtube | | <https://www.youtube.com/watch?v=w6gs10P2e1k> | not a real video |\n\nAfter downloading these videos, we get 7 traffic camera videos and 9 dash camera videos.\n\n#### 6.1.2 Drone videos\n\nWe obtain drone videos through a public dataset called [VisDrone](https://www.aiskyeye.com). We only use 13 videos in this dataset (there are too many videos inside...).\n\n#### 6.1.3 Face videos\n\nWe use clips from TBBT and Friends.\n" } ]
7
morenocl/esp-files
https://github.com/morenocl/esp-files
61567427d2497bb777f7c96b0fcdb2d3206c44c6
527ef5a1b5708faed4f9b3fe73673acd63350b48
a57f7a7e2e42206a5b85c864f1006c1d8df66e89
refs/heads/master
2022-11-21T08:33:53.562256
2020-07-10T20:10:15
2020-07-10T20:10:15
281,532,931
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.55759596824646, "alphanum_fraction": 0.5676127076148987, "avg_line_length": 25.04347801208496, "blob_id": "8cad17a3dadf3a8c9b6f5198541555b871be9993", "content_id": "fb455adc7fee690a27de890adc3ad1b17f4d1272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/ftp-server/ftpserver.py", "repo_name": "morenocl/esp-files", "src_encoding": "UTF-8", "text": "import usocket\nfrom connection import Connection\nimport network\nimport gc\n\n\nclass Server:\n\n def __init__(self):\n wlan = network.WLAN()\n self.ip = wlan.ifconfig()[0]\n self.s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)\n self.s.bind((self.ip, 21))\n\n def serve(self):\n self.s.listen(1)\n print('Running: %s:%d' % (self.ip, 21))\n while True:\n cliente, addCliente = self.s.accept()\n conexion = Connection(cliente)\n while conexion.is_connected:\n conexion.handle()\n gc.collect()\n" }, { "alpha_fraction": 0.5361105799674988, "alphanum_fraction": 0.5464798808097839, "avg_line_length": 32.932098388671875, "blob_id": "c5250fc7d75a35b4468cb0323304fc628df008f7", "content_id": "56d4c6fcdb1a7b81639f5d93c15f18bd31d0cf1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5500, "license_type": "no_license", "max_line_length": 76, "num_lines": 162, "path": "/ftp-server/connection.py", "repo_name": "morenocl/esp-files", "src_encoding": "UTF-8", "text": "import usocket\nimport uos\nfrom ubinascii import b2a_base64, a2b_base64\n\n\n# Constantes:\nDELIM = b'/'\nEOL = b'\\r\\n'\nCODE_OK = 0\nBAD_EOL = 100\nBAD_REQUEST = 101\nINTERNAL_ERROR = 199\nINVALID_COMMAND = 200\nINVALID_ARGUMENTS = 201\nFILE_NOT_FOUND = 202\nBAD_OFFSET = 203\n\nerror_messages = {\n CODE_OK: b'OK',\n # 1xx: Errores fatales (no se pueden atender más pedidos)\n BAD_EOL: b'BAD EOL',\n BAD_REQUEST: b'BAD REQUEST',\n INTERNAL_ERROR: b'INTERNAL SERVER ERROR',\n # 2xx: Errores no fatales (no se pudo atender este pedido)\n INVALID_COMMAND: b'NO SUCH COMMAND',\n INVALID_ARGUMENTS: b'INVALID ARGUMENTS FOR COMMAND',\n FILE_NOT_FOUND: b'FILE NOT FOUND',\n BAD_OFFSET: b'OFFSET EXCEEDS FILE SIZE',\n}\n\nvalid_commands = [\n b'get_metadata',\n b'get_slice',\n b'get_file_listing',\n b'quit'\n]\n\n\nclass Connection(object):\n \"\"\"\n Conexión punto a punto entre el servidor y un cliente.\n Se encarga de satisfacer los pedidos del cliente hasta\n que termina la conexión.\n \"\"\"\n\n def __init__(self, socket):\n self.socket = socket\n self.directory = b'/data'\n self.package = b''\n self.is_connected = True\n\n def handle(self):\n self.package = self.package + self.socket.recv(1024)\n print('Request: %s' % str(self.package))\n while EOL in self.package:\n # En un mismo paquete puede recibir multiples commandos.\n request, self.package = self.package.split(EOL, 1)\n self.process(request)\n\n def process(self, request):\n if b'\\n' in request:\n response = self.make_response(BAD_EOL)\n self.socket.send(response)\n else:\n s = request.split(b' ')\n n = len(s)\n if s[0] not in valid_commands:\n response = self.make_response(INVALID_COMMAND)\n self.socket.send(response)\n elif n == 1:\n if s[0] == b'get_file_listing':\n print('get_file_listing')\n self.get_file_listing()\n elif s[0] == b'quit':\n print('quit')\n self.quit()\n else:\n response = self.make_response(INVALID_ARGUMENTS)\n self.socket.send(response)\n elif n == 2:\n if s[0] == b'get_metadata':\n print('get_metadata')\n self.get_metadata(s[1])\n else:\n response = self.make_response(INVALID_ARGUMENTS)\n self.socket.send(response)\n elif n == 4:\n if s[0] == b'get_slice':\n print('get_slice')\n self.get_slice(s[1], s[2], s[3])\n else:\n response = self.make_response(INVALID_ARGUMENTS)\n self.socket.send(response)\n\n def get_file_listing(self):\n try:\n list = b''\n items = uos.listdir(self.directory)\n for nombre in items:\n # 46 is '.' in ASCII, ignore hidden files.\n if nombre[0] != 46:\n list = list + nombre + EOL\n list = self.make_response(CODE_OK, list + EOL)\n except:\n print('Ocurrio una excepcion en get_file_listing')\n list = self.make_response(INTERNAL_ERROR)\n self.socket.send(list)\n\n def get_metadata(self, filename):\n try:\n path = DELIM.join([self.directory, filename])\n sizefile = uos.stat(path)[6]\n list = self.make_response(CODE_OK, str(sizefile).encode() + EOL)\n except ValueError:\n print('Ocurrio una excepcion en get_slice: ValueError')\n list = self.make_response(INVALID_ARGUMENTS)\n except OSError:\n print('Ocurrio una excepcion en get_slice: OSError')\n list = self.make_response(FILE_NOT_FOUND)\n except:\n print('Ocurrio una excepcion en get_slice')\n list = self.make_response(INTERNAL_ERROR)\n finally:\n self.socket.send(list)\n\n def get_slice(self, filename, OFFSET, SIZE):\n try:\n path = DELIM.join([self.directory, filename])\n offset, size = int(OFFSET), int(SIZE)\n size_file = uos.stat(path)[6]\n if(size_file < size + offset):\n response = self.make_response(BAD_OFFSET)\n else:\n file = open(path, 'rb')\n file.seek(offset)\n data = file.read(size)\n encoded = b2a_base64(data) + EOL\n response = self.make_response(CODE_OK, encoded)\n except ValueError:\n print('Ocurrio una excepcion en get_slice: ValueError')\n response = self.make_response(INVALID_ARGUMENTS)\n except OSError:\n print('Ocurrio una excepcion en get_slice: OSError')\n response = self.make_response(FILE_NOT_FOUND)\n except:\n print('Ocurrio una excepcion en get_slice')\n response = self.make_response(INTERNAL_ERROR)\n finally:\n self.socket.sendall(response)\n\n def quit(self):\n response = self.make_response(CODE_OK)\n self.socket.send(response)\n self.socket.close()\n self.is_connected = False\n\n def exit(self):\n self.socket.close()\n self.is_connected = False\n\n def make_response(self, code, msg=\"\"):\n return str(code).encode() + b' ' + error_messages[code] + EOL + msg\n" }, { "alpha_fraction": 0.5625531673431396, "alphanum_fraction": 0.5761702060699463, "avg_line_length": 22.989795684814453, "blob_id": "a8c943a4b48505f45dfce0d14057e7fec9566f13", "content_id": "82f5ad657e571104771e4c52ebc7062e58cf9b3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2350, "license_type": "no_license", "max_line_length": 72, "num_lines": 98, "path": "/myhttp.py", "repo_name": "morenocl/esp-files", "src_encoding": "UTF-8", "text": "from machine import Pin\nimport ujson\nimport usocket as socket\n\n\nOK = b\"\"\"\\\nHTTP/1.0 200 OK\n\n\"\"\"\n\nCONTENT = b\"\"\"\\\nHello #%d from MicroPython!\n\"\"\"\n\nSTATUS_LED = b\"\"\"\\\nStatus led: #%d\n\"\"\"\n\nNOT_METHOD = b\"\"\"\\\nMethod not valid. Use GET and POST\n\"\"\"\n\nNO_PATH = b\"\"\"\\\nPath not valid. Use a valid path.\n\"\"\"\n\ndef parse_header(header):\n d = {}\n header = header.split('\\r\\n')\n for line in header:\n if ':' in line:\n key, value = line.split(':',1)\n d[key] = value\n return d\n\ndef get_request(client_stream):\n req = client_stream.recv(1024)\n req = req.decode('ascii')\n return req\n\ndef get(path, header):\n if path == '/':\n return OK + CONTENT\n elif path == '/led':\n led = Pin(2, Pin.OUT)\n return OK + STATUS_LED % led.value()\n else:\n return OK + NO_PATH\n\ndef post(path, header, client_stream):\n if header['Content-Length'] and int(header['Content-Length']):\n body = ujson.loads(client_stream.recv(1024))\n print(body)\n return OK + CONTENT\n\ndef main(micropython_optimize=False):\n s = socket.socket()\n\n ai = socket.getaddrinfo(\"0.0.0.0\", 8080)\n print(\"Bind address info:\", ai)\n addr = ai[0][-1]\n\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(addr)\n s.listen(5)\n print(\"Listening, connect your browser to http://<this_host>:8080/\")\n\n while True:\n client_stream, client_addr = s.accept()\n print(\"Client address:\", client_addr)\n print(\"Client socket:\", client_stream)\n\n print(\"Request:\")\n # get the firsth line and header of the request\n request = get_request(client_stream)\n initial_line, header = request.split('\\r\\n', 1)\n method, path, http_version = initial_line.split(' ')\n print(method, path, http_version)\n\n header = parse_header(header)\n print(header)\n # Select the method to exec\n if method == 'GET':\n print('Used GET method')\n msj = get(path, header)\n elif method == 'POST':\n print('Used POST method')\n msj = post(path, header, client_stream)\n else:\n print('Used another method: %s' % method)\n msj = OK + NOT_METHOD\n\n # Send the response and close the socket.\n client_stream.write(msj)\n client_stream.close()\n\n\nmain()" }, { "alpha_fraction": 0.7259461879730225, "alphanum_fraction": 0.7569539546966553, "avg_line_length": 48.84090805053711, "blob_id": "a2faa07861d6fa0ff24374fe0961bdfdad24e500", "content_id": "3abd8f2c3b078d0065a861a56299aa17bfd3da5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2198, "license_type": "no_license", "max_line_length": 254, "num_lines": 44, "path": "/README.md", "repo_name": "morenocl/esp-files", "src_encoding": "UTF-8", "text": "## Modulos y scripts para ejecutar en esp8266 con Micropython\n\n\n[Micropython](https://micropython.org/) es una eficiente implementacion de Python3 que incluye uno pequeño subconjunto de la libreria estandar de Python y esta optimizado para ejecutarse en microcontroladores.\n\n\nEl [ESP8266](https://es.wikipedia.org/wiki/ESP8266) es un chip de bajo costo Wi-Fi con un stack TCP/IP completo y un microcontrolador, fabricado por Espressif. \nPosee un Xtensa LX106 a 80 MHz, RAM de instrucción de 64 KiB, RAM de datos de 96 KiB, flash de 4MiB, IEEE 802.11 b/g/n Wi-Fi, y mas.\n\n\nContiene:\n+ myhttp: \n - Implementacion ultrapequeña del protocolo http. Este unicamnete implementa metodos GET y POST, con los codigos de respuesta 200 y 404. \n - Ejecutar `import myhttp` y ya esta corriendo un servidor!\n+ ftp-server:\n - Pequeño servidor ftp. Permite listar archivos, obtener su tamaño y descarga total o parcial.\n - Primero `from ftpserver import Server` luego `ftp = Server()` y finalmente `ftp.serve()` y ya tiene su server ftp ejecutando!\n + wlan:\n - Una simple funcion para conectar al wifi.\n\n\n\n-------------\n\n\n## Modules and scripts to run on esp8266 with Micropython\n\n\n[Micropython](https://micropython.org/) is a lean and efficient implementation of the Python 3 programming language that includes a small subset of the Python standard library and is optimised to run on microcontrollers and in constrained environments. \n\n\nThe [ESP8266](https://es.wikipedia.org/wiki/ESP8266) is a low-cost Wi-Fi microchip, with a full TCP/IP stack and microcontroller capability, produced by Espressif. \nProcessor Xtensa LX106 @ 80 MHz, 64 KiB instruction RAM, 96 KiB data RAM, 4MiB flash memory, IEEE 802.11 b/g/n Wi-Fi, and more.\n\n\nContent:\n+ myhttp: \n - Ultra small implementation of http protocol. This only implement GET and POST methods, with 200 and 404 responde codes. \n - Run `import myhttp` and a server is already running!\n+ ftp-server:\n - Little server ftp. Allows listing file, obtain size and full or partial download.\n - First `from ftpserver import Server` then `ftp = Server()` finally `ftp.serve()` and a ftp server is already running!\n+ wlan:\n - A simple function to connect to wifi.\n" } ]
4
LoginovaEl/HelloWorld
https://github.com/LoginovaEl/HelloWorld
b4090e57dd7537b423a09b515e2d877b470fbbc0
44bd8a14b231f1eef99607b399e4259cf8157374
290c07b6fa35637c24ee100cc538a5045999234c
refs/heads/main
2023-08-22T18:34:57.572215
2021-09-20T11:55:35
2021-09-20T11:55:35
398,516,192
0
0
null
2021-08-21T09:21:10
2021-08-21T09:21:13
2021-08-21T09:37:52
null
[ { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.695652186870575, "avg_line_length": 16.5, "blob_id": "d5dd9c2f66949e2a7a223dc204fbb5f19bf274ee", "content_id": "0a0aa86be6bc8433e309e735f06f91f57e01c751", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/file1.py", "repo_name": "LoginovaEl/HelloWorld", "src_encoding": "UTF-8", "text": "print('Первый')\nprint('hi people')\nprint('free commit')\nprint('four')" }, { "alpha_fraction": 0.8372092843055725, "alphanum_fraction": 0.8372092843055725, "avg_line_length": 27.66666603088379, "blob_id": "15e1900d44c63e24ed18749a74c075e3340ffd62", "content_id": "09e03dbb515d9207a6214447c3c162d0ac576db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "no_license", "max_line_length": 56, "num_lines": 3, "path": "/README.md", "repo_name": "LoginovaEl/HelloWorld", "src_encoding": "UTF-8", "text": "# HelloWorld\nПервый, пробный\nЭтот файл создан для ознакомления с возможностями githab\n" } ]
2
Gussiny/Practicas_BDAvanzadas
https://github.com/Gussiny/Practicas_BDAvanzadas
2ca1b8e2efc22642ea59b0333de78b410fd491fc
5b803960bde9370300545bc469013297ff4114dc
ad570bc81505b57995a6750214ff364bcc89381b
refs/heads/master
2022-04-25T10:00:17.710192
2020-04-26T07:08:31
2020-04-26T07:08:31
245,281,151
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8169013857841492, "alphanum_fraction": 0.8169013857841492, "avg_line_length": 34.5, "blob_id": "ac08b54638e8a4edb579ea39c622cf7e201241bb", "content_id": "6e2c3ca9d18466ffa2ec690861e429d16cdfdd45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/README.md", "repo_name": "Gussiny/Practicas_BDAvanzadas", "src_encoding": "UTF-8", "text": "# Practicas_BDAvanzadas\nAqui van todas las practicas de bases de datos\n" }, { "alpha_fraction": 0.572360634803772, "alphanum_fraction": 0.5800711512565613, "avg_line_length": 20.291139602661133, "blob_id": "d896f17acdc8784c0d6f0599d032db380ef88e89", "content_id": "f5db803c7502d388c068fe3c7e788ea344c0f9be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 227, "num_lines": 79, "path": "/sriptsPythonMongo/readSQLcreateMongo.py", "repo_name": "Gussiny/Practicas_BDAvanzadas", "src_encoding": "UTF-8", "text": "import sqlite3 \nimport json\nfrom datetime import datetime\nfrom pymongo import MongoClient\n\n\nclient = MongoClient('mongodb+srv://ShyGuy118:[email protected]/test?retryWrites=true&w=majority')\ndb = client\n\nconnection = sqlite3.connect(\"reta60Tournaments.db\") \ncrsr = connection.cursor() \n\ndef printTables(documents):\n for row in documents:\n for field in row:\n print(field)\n \ndef insertUser(db, _id, fname, lname, gender, userType, birthday, joiningDate):\n db.users.insert({ \"_id\": _id,\"fname\": fname, \"lname\": lname, \"gender\": gender, \"userType\": userType, \"birthdate\": datetime.strptime(birthday, '%Y-%m-%d'), \"joiningDate\": datetime.strptime(joiningDate, '%Y-%m-%d %H:%M:%S')})\n\n\n\ndef readAndInsertUsers(db):\n sql_command=\"SELECT * FROM USER\"\n crsr.execute(sql_command)\n documents = crsr.fetchall() \n #print(documents)\n fields = list(map(lambda x: x[0], crsr.description))\n for row in documents:\n document={}\n i=0\n for field in row:\n if field=='birthDate':\n field=datetime.strptime(field, '%Y-%m-%d')\n elif field=='joiningDate':\n field=datetime.strptime(field, '%Y-%m-%d %H:%M:%S')\n document[fields[i]]=field\n i+=1\n db.users.insert(document)\n print(document)\n print(\"1 new user inserted\")\n \n\n \n \n\n \n\n \n \n \n \n\n \n \n \n\n \n\n \n\n\n\n\ndef main():\n db = client.reta60Mongo\n readAndInsertUsers(db)\n\n \n \n \n #cierra cluster mongo\n client.close() \n #cierra y guarda base de datos local\n connection.commit() \n connection.close() \n\n\nmain()\n\n\n\n\n" }, { "alpha_fraction": 0.6511365175247192, "alphanum_fraction": 0.6759847402572632, "avg_line_length": 32.54976272583008, "blob_id": "9897680b5b01f18bf15627fd3b751678c618df85", "content_id": "188905f890d2c78b35ef4ee1153a98401f546135", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7083, "license_type": "no_license", "max_line_length": 150, "num_lines": 211, "path": "/sriptsPythonMongo/createSQL.py", "repo_name": "Gussiny/Practicas_BDAvanzadas", "src_encoding": "UTF-8", "text": "import sqlite3 \nconnection = sqlite3.connect(\"reta60Tournaments.db\") \ncrsr = connection.cursor() \n\n\ncrsr.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='USER' ''')\n\n\ndef printTables(docums):\n for row in docums:\n print(row)\n\nif crsr.fetchone()[0]==1 : \n\tprint('Table already exists.')\nelse :\n sql_command = \"\"\"CREATE TABLE USER( \n userId INTEGER PRIMARY KEY AUTOINCREMENT, \n fname VARCHAR(20), \n lname VARCHAR(30), \n gender CHAR(1), \n userType VARCHAR(30),\n birthDate DATE,\n joiningDate DATE DEFAULT(datetime('now','localtime')) );\"\"\"\n\n crsr.execute(sql_command)\n\n print(\"Table created\")\n \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Fabian\", \"Ramirez\", \"M\", \"player\", \"1999-10-04\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Gustavo\", \"Flores\", \"M\", \"player\", \"1998-01-14\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Erick\", \"Mendoza\", \"M\", \"player\", \"1999-06-23\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Oscar\", \"Del Bull\", \"M\", \"player\", \"1999-07-26\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Sergio\", \"Sgioma\", \"M\", \"admin\", \"1987-05-19\");\"\"\"\n crsr.execute(sql_command) \n\n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Micheal\", \"Douglas\", \"M\", \"player\", \"1992-04-21\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Jerry\", \"Seinfield\", \"M\", \"player\", \"1993-01-16\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"George\", \"Coztanza\", \"M\", \"player\", \"1995-05-11\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Lucas\", \"Donis\", \"M\", \"player\", \"1991-05-26\");\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO user(fname, lname, gender, userType, birthDate) VALUES (\"Pete\", \"Sampras\", \"M\", \"player\", \"1996-05-21\");\"\"\"\n crsr.execute(sql_command) \n\n\n\n\nsql_command=\"SELECT * FROM USER\"\ncrsr.execute(sql_command)\nans = crsr.fetchall() \nprintTables(ans)\n\nsql_command=\"PRAGMA foreign_keys = ON;\"\ncrsr.execute(sql_command)\n\n\ncrsr.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='TOURNAMENT' ''')\nif crsr.fetchone()[0]==1 : \n\tprint('Table already exists TOURNAMENT.')\nelse :\n print(\"Table TOURNAMENT MISS\")\n sql_command = \"\"\"CREATE TABLE TOURNAMENT( \n tournamentId INTEGER PRIMARY KEY AUTOINCREMENT, \n tName VARCHAR(20), \n tMode CHAR(1), \n tGender CHAR(1),\n startDate DATE,\n userId INTEGER NOT NULL,\n tCreation DATE DEFAULT(datetime('now','localtime')),\n FOREIGN KEY(userId) REFERENCES user(userId) );\"\"\"\n\n crsr.execute(sql_command)\n\n print(\"Table TOURNAMENT CREATED\")\n sql_command = \"\"\"INSERT INTO TOURNAMENT(tName, tMode, tGender, startDate, userId) VALUES (\"Abierto Tec 2020\", \"S\", \"M\", \"2020-05-22\", 5);\"\"\"\n crsr.execute(sql_command) \n\n\n\n\n\nsql_command=\"SELECT * FROM TOURNAMENT\"\ncrsr.execute(sql_command)\nans = crsr.fetchall() \nprint(ans)\n\n\n\n\n\n\n\nsql_command=\"SELECT * FROM TOURNAMENT NATURAL JOIN USER\"\ncrsr.execute(sql_command)\nans = crsr.fetchall() \nprintTables(ans)\n\n\n\n\n\ncrsr.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='REGISTRATIONS' ''')\nif crsr.fetchone()[0]==1 : \n\tprint('Table already exists REGISTRATIONS.')\nelse :\n print(\"Table REGISTRATIONS MISS\")\n sql_command = \"\"\"CREATE TABLE REGISTRATIONS( \n registrationId INTEGER PRIMARY KEY AUTOINCREMENT, \n userId INTEGER NOT NULL, \n tournamentId INTEGER NOT NULL,\n joiningDate DATE DEFAULT(datetime('now','localtime')),\n status VARCHAR(20) DEFAULT \"regular\",\n FOREIGN KEY(userId) REFERENCES user(userId),\n FOREIGN KEY(tournamentId) REFERENCES TOURNAMENT(tournamentId) );\"\"\"\n crsr.execute(sql_command)\n print(\"Table REGISTRATIONS created\")\n\n sql_command = \"\"\"INSERT INTO REGISTRATIONS(userId, tournamentId) VALUES (1, 1);\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO REGISTRATIONS(userId, tournamentId) VALUES (2, 1);\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO REGISTRATIONS(userId, tournamentId) VALUES (3, 1);\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO REGISTRATIONS(userId, tournamentId) VALUES (4, 1);\"\"\"\n crsr.execute(sql_command) \n\n\n\nsql_command=\"SELECT tName, REGISTRATIONS.userId, USER.fName, status FROM TOURNAMENT JOIN REGISTRATIONS JOIN USER ON REGISTRATIONS.userId=USER.userId\"\ncrsr.execute(sql_command)\nans = crsr.fetchall() \nprintTables(ans)\n\n\ncrsr.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='MATCH' ''')\nif crsr.fetchone()[0]==1 : \n\tprint('Table already exists MATCH.')\nelse :\n print(\"Table MATCH MISS\")\n sql_command = \"\"\"CREATE TABLE MATCH( \n matchId INTEGER PRIMARY KEY AUTOINCREMENT, \n userId INTEGER NOT NULL, \n user2Id INTEGER NOT NULL,\n tournamentId INTEGER NOT NULL,\n creationDate DATE DEFAULT(datetime('now','localtime')),\n matchDate DATETIME,\n status VARCHAR(20) DEFAULT \"regular\",\n round INTEGER NOT NULL,\n winnerId INTEGER NOT NULL DEFAULT \"\",\n FOREIGN KEY(winnerId) REFERENCES user(userId),\n CONSTRAINT \"player1\" FOREIGN KEY(userId) REFERENCES user(userId),\n CONSTRAINT \"player2\" FOREIGN KEY(user2Id) REFERENCES user(userId),\n FOREIGN KEY(tournamentId) REFERENCES TOURNAMENT(tournamentId) );\"\"\"\n \n crsr.execute(sql_command)\n print(\"Table MATCH created\")\n\n sql_command = \"\"\"INSERT INTO MATCH(matchId, userId, user2Id, tournamentId, matchDate, round) VALUES (101, 1, 2, 1, '2020-05-23 14:00:00', 1);\"\"\"\n crsr.execute(sql_command) \n\n\n\n\n\ncrsr.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='SETS' ''')\nif crsr.fetchone()[0]==1 : \n\tprint('Table already exists SETS.')\nelse :\n print(\"Table SETS MISS\")\n\n sql_command = \"\"\"CREATE TABLE SETS( \n setId INTEGER PRIMARY KEY AUTOINCREMENT, \n user1Points INTEGER NOT NULL DEFAULT 0, \n user2Points INTEGER NOT NULL DEFAULT 0,\n numSet INTEGER NOT NULL,\n matchId INTEGER NOT NULL,\n FOREIGN KEY(matchId) REFERENCES MATCH(matchId));\"\"\"\n \n crsr.execute(sql_command)\n print(\"Table SETS created\")\n\n\n sql_command = \"\"\"INSERT INTO SETS(user1Points, user2Points, numSet, matchId) VALUES (6, 4, 1, 101);\"\"\"\n crsr.execute(sql_command) \n sql_command = \"\"\"INSERT INTO SETS(user1Points, user2Points, numSet, matchId) VALUES (6, 0, 2, 101);\"\"\"\n crsr.execute(sql_command) \n\n\n\n \nsql_command=\"SELECT * FROM SETS\"\ncrsr.execute(sql_command)\nans = crsr.fetchall() \nprintTables(ans)\n\n\n\n\n\n\n\n\n\nconnection.commit() \nconnection.close() \n\n\n\n" }, { "alpha_fraction": 0.543138861656189, "alphanum_fraction": 0.5702547430992126, "avg_line_length": 26.636363983154297, "blob_id": "10e9a4e8ade508837058b0e6ee9f8a02830ed8f9", "content_id": "33e149b4f555a8342f3e936f7ff39f0a94247b51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2434, "license_type": "no_license", "max_line_length": 149, "num_lines": 88, "path": "/sriptsPythonMongo/testConnection.py", "repo_name": "Gussiny/Practicas_BDAvanzadas", "src_encoding": "UTF-8", "text": "\nfrom pymongo import MongoClient\n\nclient = MongoClient('mongodb+srv://ShyGuy118:[email protected]/test?retryWrites=true&w=majority')\ndb = client.sample_mflix\n\n\n\n\n\n\n\ndef printDocs(res):\n i=1\n for doc in res:\n print(i,'-> title: '+str(doc['title'])+' year: '+str(doc['year'])+' rating: '+str(doc['imdb']))\n i+=1\n\n\ndef searchBy(yearInp, byRating, top):\n if(byRating):\n print('Best '+str(top)+' movies from '+str(yearInp))\n moviesBy = db.movies.find( {\"year\":yearInp},{ \"id\" : 1, \"title\": 1, 'imdb.rating':1, 'directors':1, 'year':1} ).sort('imdb.rating',-1)[0:top]\n elif(byRating is None):\n print(str(top)+' movies from '+str(yearInp))\n moviesBy = db.movies.find( {\"year\":yearInp},{ \"id\" : 1, \"title\": 1, 'imdb.rating':1, 'directors':1, 'year':1} )[0:top]\n else:\n print('Worst '+str(top)+' movies from '+str(yearInp))\n moviesBy = db.movies.find( {\"year\":yearInp},{ \"id\" : 1, \"title\": 1, 'imdb.rating':1, 'directors':1, 'year':1} ).sort('imdb.rating',1)[0:top]\n \n printDocs(moviesBy)\n\n\ndef topAll(byRating, top):\n if(byRating):\n print(str(top)+ ' Best movies of all time')\n moviesBy = db.movies.find( { 'year': { '$nin': [''] } ,'imdb.rating': { '$nin': [''] }}).sort('imdb.rating',-1)[0:top]\n elif(byRating is None):\n print(str(top)+ ' movies')\n moviesBy = db.movies.find()[0:top]\n else:\n print(str(top)+ ' Worst movies of all time')\n moviesBy = db.movies.find( { 'year': { '$nin': [''] } ,'imdb.rating': { '$nin': [''] }}).sort('imdb.rating',1)[0:top]\n\n printDocs(moviesBy)\n\ndef searchByTitle(title):\n print('Resulst for: '+str(title))\n\n moviesBy = db.movies.find({'title': {'$regex' : \".*\"+str(title)+\".*\"} }).sort('year',-1)[0:10]\n\n printDocs(moviesBy)\n\ndef searchByDirector(director):\n print('Resulst for: '+str(director))\n\n moviesBy = db.movies.find({'directors': {'$regex' : \".*\"+str(director)+\".*\"} })[0:50]\n\n printDocs(moviesBy)\n\n\ndef searchByDirAndTitle(word):\n print('Resulst for: '+str(word))\n moviesBy = db.movies.find({'directors': {'$regex' : \".*\"+str(word)+\".*\"} , 'title': {'$regex' : \".*\"+str(word)+\".*\"} })[0:50]\n printDocs(moviesBy)\n\n\n\n\ndef main():\n #searchBy(1999,True,10)\n\n #searchBy(2002,False,5)\n\n #searchBy(2002,None,10)\n\n #searchBy(2010,None,100)\n \n\n\n #topAll(True, 20)\n\n searchByTitle('Matrix')\n \n \n client.close()\n \n\nmain()\n\n" }, { "alpha_fraction": 0.6240549683570862, "alphanum_fraction": 0.7120274901390076, "avg_line_length": 39.44444274902344, "blob_id": "4df1677bd9f570b4df13b35e877002a488765fb4", "content_id": "8dea8015c67e2b352044f617db0e57a632ce6ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 271, "num_lines": 36, "path": "/sriptsPythonMongo/classCommands.py", "repo_name": "Gussiny/Practicas_BDAvanzadas", "src_encoding": "UTF-8", "text": "db.listingsAndReviews.find( {name: {$regex : \".*a.*\"}}, { _id : 0, name:1}).explain(\"executionStats\").executionStats}\n\n\ndb.listingsAndReviews.aggregate([\n{ $match: {\"address.country\": \n{ $eq: 'Turkey'}}},\n { $group: {\n_id: null, avgP: { $avg: '$price'}}}\n])\n\n\n\ndb.listingsAndReviews.aggregate([\n { $group: {\n_id: \"$address.country\", sumRooms: { $sum: '$bedrooms'}}}])\n\n\ndb.listingsAndReviews.aggregate([\n { $group: {\n_id: null, avgP: { $avg: '$price'}}}\n])\n\n\ndb.listingsAndReviews.aggregate([\n { $group: {\n_id: \"$address.country\", avgP: { $avg: '$price'}}}])\n\n\nmongodump --host Cluster0-shard-0/cluster0-shard-00-00-lqqq1.mongodb.net:27017,cluster0-shard-00-01-lqqq1.mongodb.net:27017,cluster0-shard-00-02-lqqq1.mongodb.net:27017 --ssl --username ShyGuy118 --password fg1511973 --authenticationDatabase ShyGuy118 --db sample_airbnb\n\n\nmongodump --host Cluster0-shard-0/cluster0-shard-00-00-lqqq1.mongodb.net:27017,cluster0-shard-00-01-lqqq1.mongodb.net:27017,cluster0-shard-00-02-lqqq1.mongodb.net:27017 --ssl --username ShyGuy118 --password fg1511973 --authenticationDatabase admin --db sample_airbnb\n\ndocker run --rm -v $(pwd):/workdir/ -w /workdir/ mongo:4.0 mongodump -h server -d $sample_airbnb --out /workdir/dump/\n\nmongodump --forceTableScan -h Cluster0-shard-0/cluster0-shard-00-00-lqqq1.mongodb.net:27017,cluster0-shard-00-01-lqqq1.mongodb.net:27017,cluster0-shard-00-02-lqqq1.mongodb.net:27017 -u ShyGuy118 -p fg1511973 -d sample_airbnb -o /" } ]
5
ha2398/pln-tps
https://github.com/ha2398/pln-tps
ead2b6857d649aa65c965e5f4f3247a01298a184
700e4f0b706666de6e2bf5dbc96a8a092a87a9fa
a5b9accce9e34ef0ae363275e018b5681241c92a
refs/heads/master
2021-08-22T16:59:00.263311
2017-11-30T18:13:36
2017-11-30T18:13:36
103,792,551
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6478679776191711, "alphanum_fraction": 0.6636863946914673, "avg_line_length": 25.321266174316406, "blob_id": "c5059a30bd6be2f0d5a76850f01f451dd0949864", "content_id": "038fbf9847b79daeed0029f0d905b1de21e18f0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5817, "license_type": "permissive", "max_line_length": 73, "num_lines": 221, "path": "/tp2/src/tp2.py", "repo_name": "ha2398/pln-tps", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n'''\ntp2.py: Trabalho Prático II - Processamento de Linguagem Natural\n@author: Hugo Araujo de Sousa [2013007463]\n@email: [email protected]\n@DCC030 - Processamento de Linguagem Natural - UFMG\n'''\n\n\nimport argparse as ap\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.feature_extraction import FeatureHasher\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# Add command line arguments to the program.\nparser = ap.ArgumentParser()\nparser.add_argument('train_file', type=str, help='Name of train file')\nparser.add_argument('test_file', type=str, help='Name of test file')\nparser.add_argument('validation_file', type=str,\n\thelp='Name of validation file')\nparser.add_argument('-s', dest='RSEED', default=0, type=int,\n\thelp='Random number generation seed')\n\nargs = parser.parse_args()\n\n\n# Global variables\ntags = {}\nid_tag = {}\n\n\ndef features(sentence, index):\n\t''' Return the features of the word at a given index in the sentence.\n\n\t\t@param \tsentence: \tSentence in which the word is.\n\t\t@type \tsentence: \tList of String.\n\n\t\t@param \tindex:\t\tIndex of word in the sentence.\n\t\t@type \tindex: \t\tInteger.\n\n\t\t@return: \tWord features.\n\t\t@rtype: \tDictionary.\n\t\t'''\n\n\tword = sentence[index].split('_')[0]\n\n\treturn {\n\t\t'word': word.lower(),\n\t\t'is_first': index == 0,\n\t\t'is_last': index == len(sentence) - 1,\n\t\t'is_capitalized': word[0].upper() == word[0],\n\t\t'is_all_caps': word.upper() == word,\n\t\t'is_all_lower': word.lower() == word,\n\t\t'prefix-1': word[0].lower(),\n\t\t'prefix-2': word[:2].lower(),\n\t\t'prefix-3': word[:3].lower(),\n\t\t'suffix-1': word[-1].lower(),\n\t\t'suffix-2': word[-2:].lower(),\n\t\t'suffix-3': word[-3:].lower(),\n\t\t'prev_tag': '' if index == 0 else sentence[index - 1].split('_')[1],\n\t\t'next_tag': '' if index == len(sentence) - 1 else \\\n\t\t\tsentence[index + 1].split('_')[1],\n\t\t'has_hyphen': '-' in word,\n\t\t'is_numeric': word.isdigit(),\n\t}\n\ndef build_dataset(file):\n\t''' Read a file with words and their POS tags and create an array\n\t\twith words and their target POS.\n\t\n\t\t@param \tfile: Input file.\n\t\t@type \tfile: File.\n\n\t\t@return: \tData and its targets.\n\t\t@rtype:\t\tNumpy array, Numpy array\n\t\t'''\n\n\tglobal tags\n\n\th = FeatureHasher(n_features=17)\n\n\tdata = []\n\ttarget = []\n\n\tfor line in file:\n\t\twords = line.split()\n\n\t\tfor index in range(len(words)):\n\t\t\tdata.append(features(words, index))\n\t\t\ttag = words[index].split('_')[1]\n\n\t\t\tif tag not in tags:\n\t\t\t\ttag_id = len(tags)\n\t\t\t\ttags[tag] = tag_id\n\t\t\t\tid_tag[tag_id] = tag\n\n\t\t\ttag = tags[tag]\n\t\t\ttarget.append(tag)\n\n\tdata_array = h.transform(data).toarray()\n\ttarget_array = np.array(target)\n\n\treturn data_array, target_array\n\n\ndef read_data(train_filename, test_filename, validation_filename):\n\t''' Read input data from input files.\n\t\t\n\t\t@param \ttrain_filename: Training data file name.\n\t\t@type \ttrain_filename: String.\n\n\t\t@param \ttest_filename: Test data file name.\n\t\t@type \ttest_filename: String.\n\n\t\t@param \tvalidation_filename: Validation data file name.\n\t\t@type \tvalidation_filename: String.\n\n\t\t@return: \tTraining data, test data and validation data.\n\t\t@rtype:\t\tTuple of Tuple of Numpy Array\t\n\t\t'''\n\n\tprint('[+] Reading training file')\n\ttrain_file = open(train_filename, 'r')\n\ttrain_data = build_dataset(train_file)\n\n\tprint('[+] Reading validation file')\n\tvalidation_file = open(validation_filename, 'r')\n\tvalidation_data = build_dataset(validation_file)\n\n\tprint('[+] Reading test file')\n\ttest_file = open(test_filename, 'r')\n\ttest_data = build_dataset(test_file)\n\n\ttrain_file.close()\n\ttest_file.close()\n\tvalidation_file.close()\n\n\tprint()\n\n\treturn train_data, test_data, validation_data\n\n\ndef print_most_precise_pos(real_output, model_output):\n\t''' Print the POS tags for which the model was more precise.\n\n\t\t@param \treal_output: Real data outputs.\n\t\t@type \treal_output: Numpy Array.\n\n\t\t@param \tmodel_output: Model outputs.\n\t\t@type \tmodel_output: Numpy Array.\n\t\t'''\n\n\thits = [0] * len(tags)\n\tcounts = [0] * len(tags)\n\n\tfor i in range(len(real_output)):\n\t\ttag_id = real_output[i]\n\t\tpredicted_tag_id = model_output[i]\n\n\t\tcounts[tag_id] += 1\n\n\t\tif tag_id == predicted_tag_id:\n\t\t\thits[tag_id] += 1\n\n\tprecision = [0] * len(tags)\n\tfor tag in tags:\n\t\ttag_id = tags[tag]\n\t\ttag_precision = hits[tag_id] / counts[tag_id]\n\t\tprecision[tag_id] = (tag, tag_precision)\n\n\tprecision = sorted(precision, key=lambda x: x[1], reverse=True)\n\n\tfor i in range(len(precision)):\n\t\ttag_precision = round(precision[i][1] * 100, 2)\n\t\tprint('\\t', precision[i][0], 'precision: {}%'.format(tag_precision))\n\n\tprint()\n\n\ndef main():\n\n\ttrain_data, test_data, validation_data = \\\n\t\tread_data(args.train_file, args.test_file, args.validation_file)\n\n\tprint('\\tNAIVE BAYES\\n')\n\tgnb = GaussianNB()\n\tpredictor = gnb.fit(train_data[0], train_data[1])\n\n\tnb_y_valid = predictor.predict(validation_data[0])\n\tprecision = ((validation_data[1] == nb_y_valid).sum()) \\\n\t\t/ len(validation_data[0])\n\tprint('[+] Validation precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(validation_data[1], nb_y_valid)\n\n\tnb_y_test = predictor.predict(test_data[0])\n\tprecision = ((test_data[1] == nb_y_test).sum()) / len(test_data[0])\n\tprint('[+] Test precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(test_data[1], nb_y_test)\n\n\tprint(('-' * 80) + '\\n')\n\t\n\tprint('\\tSVM\\n')\n\tsvmc = svm.SVC(random_state=args.RSEED)\n\tpredictor = svmc.fit(train_data[0], train_data[1])\n\n\tsvm_y_valid = predictor.predict(validation_data[0])\n\tprecision = ((validation_data[1] == svm_y_valid).sum()) \\\n\t\t/ len(validation_data[0])\n\tprint('[+] Validation precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(validation_data[1], svm_y_valid)\n\n\tsvm_y_test = predictor.predict(test_data[0])\n\tprecision = ((test_data[1] == svm_y_test).sum()) / len(test_data[0])\n\tprint('[+] Test precision: {}%'.format(round((precision*100), 2)))\n\tprint_most_precise_pos(test_data[1], svm_y_test)\n\t\n\nmain()" }, { "alpha_fraction": 0.6339852213859558, "alphanum_fraction": 0.6580338478088379, "avg_line_length": 19.44864845275879, "blob_id": "e306a5a1f691631a09c9ac3df081a2f8a849f70b", "content_id": "65f61679f904f19d27744a9a64dd0eadad2f3417", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3785, "license_type": "permissive", "max_line_length": 70, "num_lines": 185, "path": "/tp1/src/build_distance_matrices.py", "repo_name": "ha2398/pln-tps", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n'''\nbuild_distance_matrices.py\nTrabalho Prático 1 - Processamento de Linguagem Natural\nUFMG/DCC\n@author: Hugo Araujo de Sousa [2013007463]\n@DCC030\n'''\n\nimport os\n\n# Global variables.\nDATA_FOLDER = '__temp_data__'\nVECTORS_FOLDER = 'vectors'\n\nvocabs = {}\ndmatrices = {}\n\ndef read_vectors():\n\t''' Read word vectors for all books.\n\t\n\t\t@rtype:\t\tDictionary\n\t\t@return:\tDictionary with filename strings as keys and dictionaris \n\t\t\t\t\tas values. These values are word string -> float list\n\t\t\t\t\tdictionaries.\n\t\t'''\n\n\tprint('[+] Reading word vectors')\n\tpath = DATA_FOLDER + '/' + VECTORS_FOLDER\n\tvector_names = [f for f in os.listdir(path)]\n\tvector_names.sort()\n\n\tvectors = {}\n\n\tfor vector_name in vector_names:\n\t\tprint('\\t- ' + vector_name)\n\n\t\tvector_file = open(path + '/' + vector_name, 'r')\n\n\t\tindex = vector_name[:-4]\n\n\t\tvectors[index] = {}\n\n\t\t# Ignore header\n\t\tvector_file.readline()\n\n\t\tfor line in vector_file:\n\t\t\tline_list = line.strip().split(' ')\n\t\t\tword = line_list[0]\n\t\t\tvector = [float(v) for v in line_list[1:]]\n\n\t\t\tvectors[index][word] = vector\n\n\treturn vectors\n\n\ndef cosine_similarity(vector1, vector2):\n\t''' Get the cosine similarity of two vectors.\n\n\t\t@type\tvector1:\tfloat list\n\t\t@param\tvector1: \tFirst vector\n\n\t\t@type\tvector2:\tfloat list\n\t\t@param\tvector2: \tSecond vector\n\t\t'''\n\n\tnum = sum([(a*b) for (a,b) in zip(vector1, vector2)])\n\tden1 = sum((a ** 2 for a in vector1)) ** 0.5\n\tden2 = sum((b ** 2 for b in vector2)) ** 0.5\n\n\treturn num/(den1 * den2)\n\n\ndef build_distance_matrices(vectors):\n\t''' Build distance matrices for all books, where each entry in these\n\t\tmatrices represents a pair of words.\n\n\t\t@type\tvectors:\tDictionary\n\t\t@param \tvectors:\tDictionary with filename strings as keys and\n\t\t\t\t\t\t\tdictionaries as values. These values are word\n\t\t\t\t\t\t\tstring -> float list dictionaries.\n\t\t'''\n\n\tglobal vocabs\n\tglobal dmatrices\n\n\tprint('[+] Creating distance matrices')\n\n\tfor filename in vectors:\n\t\tprint('\\t- ' + filename)\n\t\tmatrix = {}\n\t\tcur_vocab = set()\n\n\t\tkey = int(filename.split(' ')[0])\n\n\t\tdistances = vectors[filename]\n\t\twords = list(distances)\n\n\t\t# For each pair of words, calculate distance.\n\t\tfor word1 in words:\n\t\t\tcur_vocab.add(word1)\n\t\t\tfor word2 in words:\n\t\t\t\tif word1 <= word2 and (word1, word2) not in matrix:\n\t\t\t\t\tmatrix[word1, word2] = cosine_similarity(distances[word1],\n\t\t\t\t\t\tdistances[word2])\n\n\t\tdmatrices[key] = matrix\n\t\tvocabs[key] = cur_vocab\n\n\treturn\n\n\ndef compare_matrices(book1, book2):\n\t''' Compare two matrices and calculate how similar they are.\n\n\t\t@type\tmatrixA:\tint\n\t\t@param\tmatrixA:\tIndex of first book to compare.\n\n\t\t@type\tmatrixB:\tint\n\t\t@param\tmatrixB:\tIndex of second book to compare.\n\n\t\t@rtype:\t\tfloat\n\t\t@return:\tA number that represents the distance between the two input\n\t\t\t\t\tmatrices.\n\t\t'''\n\n\tmatrixA = dmatrices[book1]\n\tmatrixB = dmatrices[book2]\n\n\tvocab1 = vocabs[book1]\n\tvocab2 = vocabs[book2]\n\t\n\tvocab = vocab1.union(vocab2)\n\n\tdist = 0\n\tfor word1 in vocab:\n\t\tfor word2 in vocab:\n\t\t\tif (word1 > word2):\n\t\t\t\tcontinue\n\n\t\t\tif (word1, word2) in matrixA:\n\t\t\t\taij = matrixA[word1, word2]\n\t\t\telse:\n\t\t\t\taij = 0\n\n\t\t\tif (word1, word2) in matrixB:\n\t\t\t\tbij = matrixB[word1, word2]\n\t\t\telse:\n\t\t\t\tbij = 0\n\n\t\t\tdist += (aij - bij) ** 2\n\n\tdist = dist ** 0.5\n\treturn dist\n\n\ndef get_books_distances():\n\t''' Calculate the distance between all books. '''\n\n\tglobal dmatrices\n\n\tpath = DATA_FOLDER + '/' + VECTORS_FOLDER\n\tnames = [f[:-4] for f in os.listdir(path)]\n\n\tfor name1 in names:\n\t\tfor name2 in names:\n\t\t\tprint('Comparing books:')\n\n\t\t\tbook1 = int(name1.split(' ')[0])\n\t\t\tbook2 = int(name2.split(' ')[0])\n\n\t\t\tprint(name1)\n\t\t\tprint(name2)\n\t\t\tdist = compare_matrices(book1, book2)\n\t\t\tprint('Distance:', dist)\n\t\t\tprint()\n\n\ndef main():\n\t''' Main program. '''\n\n\tvectors = read_vectors()\n\tbuild_distance_matrices(vectors)\n\tget_books_distances()\n\t" }, { "alpha_fraction": 0.6729264259338379, "alphanum_fraction": 0.6964006423950195, "avg_line_length": 15, "blob_id": "0534eb5f618ba92aa1e062e9466164157b9848b1", "content_id": "13e1f562ad82a5fc537456e01b98004644a44ba9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "permissive", "max_line_length": 55, "num_lines": 40, "path": "/tp1/src/main.py", "repo_name": "ha2398/pln-tps", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n'''\nmain.py\nTrabalho Prático 1 - Processamento de Linguagem Natural\nUFMG/DCC\n@author: Hugo Araujo de Sousa [2013007463]\n@DCC030\n'''\n\nimport argparse as ap\nimport build_vectors as bv\nimport build_distance_matrices as bdm\n\n\n# Global variables.\nparser = ap.ArgumentParser()\n\n\ndef parse_arguments():\n\t''' Parse the program's arguments. \n\t\t\n\t\t@rtype:\t\targs.Namespace\n\t\t@return:\tProgram's arguments\n\t\t'''\n\n\tparser.add_argument('INPUT_FOLDER', type=str,\n\t\thelp='Name of input folder')\n\n\treturn parser.parse_args()\n\n\ndef main():\n\t''' Main program. '''\n\targs = parse_arguments()\n\tvocabs = bv.main(args.INPUT_FOLDER)\n\tbdm.main()\n\t\n\t\nmain()" }, { "alpha_fraction": 0.6259124279022217, "alphanum_fraction": 0.6379562020301819, "avg_line_length": 18.30281639099121, "blob_id": "5020df2bdd91d20d907730251039228fbdc1c870", "content_id": "6220050b1da8120427c7b64f33504efbc7cf88a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2741, "license_type": "permissive", "max_line_length": 74, "num_lines": 142, "path": "/tp1/src/build_vectors.py", "repo_name": "ha2398/pln-tps", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n'''\nbuild_vectors.py\nTrabalho Prático 1 - Processamento de Linguagem Natural\nUFMG/DCC\n@author: Hugo Araujo de Sousa [2013007463]\n@DCC030\n'''\n\n\nimport argparse as ap\nimport os\nimport re\nimport subprocess as sp\n\n\n# Global variables.\nDATA_FOLDER = '__temp_data__'\nBOOKS_FOLDER = 'books'\nVECTORS_FOLDER = 'vectors'\n\nNULL = None\n\n\ndef setup():\n\t''' Set up current working folder. '''\n\n\tprint('[+] Setting up')\n\n\tglobal NULL\n\n\tNULL = open(os.devnull, 'w')\n\tvocabs = {}\n\tsp.call(['mkdir', DATA_FOLDER])\n\tsp.call(['mkdir', DATA_FOLDER + '/' + BOOKS_FOLDER])\n\tsp.call(['mkdir', DATA_FOLDER + '/' + VECTORS_FOLDER])\n\n\tprint('\\t- Building word2vec')\n\tsp.call(['make', 'all', '-C', 'word2vec'])\n\n\treturn\n\n\ndef trim_file(input_folder, filename):\n\t''' Remove symbols from a particular book file, leaving only alphanumeric\n\t\tcharacters, and dump the result to file. \n\n\t\t@type \tinput_folder:\tstr\n\t\t@param \tinput_folder:\tInput folder path\n\n\t\t@type\tfilename:\tstr\n\t\t@param \tfilename:\tName of the file to trim\n\t\t'''\n\n\tglobal vocabs\n\n\tfile = open(input_folder + '/' + filename, 'r')\n\tout_file = open(DATA_FOLDER + '/' + BOOKS_FOLDER + '/' + filename, 'w')\n\n\tprint('\\t- ' + filename)\n\n\tcontent = file.read()\n\n\t# Normalize text.\n\tnew = re.sub('(\\s|\\W)+', ' ', content).lower()\n\tout_file.write(new)\n\n\tfile.close()\n\tout_file.close()\n\n\treturn\n\n\ndef pre_process(input_folder):\n\t''' Process input books and produces new files without unnecessary\n\t\tcharacters.\n\n\t\t@type\tinput_folder:\tstring\n\t\t@param \tinput_folder:\tName of input folder\n\t\t'''\n\n\tprint('[+] Processing input files')\n\n\tfile_names = [f for f in os.listdir(input_folder)]\n\tfile_names.sort()\n\n\tfor filename in file_names:\n\t\ttrim_file(input_folder, filename)\n\n\treturn\n\n\ndef build_vectors():\n\t''' Build word vectors for each book. '''\n\n\tprint('[+] Building word vectors')\n\tbook_names = [f for f in os.listdir(DATA_FOLDER + '/' + BOOKS_FOLDER)]\n\tbook_names.sort()\n\n\tfor book_name in book_names:\n\t\tvector_name = book_name[:-4] + '.vec'\n\t\tprint('\\t- ' + vector_name)\n\t\tsp.call([\n\t\t\t'./word2vec/word2vec',\n\t\t\t'-train',\n\t\t\t'{}/{}/{}'.format(DATA_FOLDER, BOOKS_FOLDER, book_name),\n\t\t\t'-output',\n\t\t\t'{}/{}/{}'.format(DATA_FOLDER, VECTORS_FOLDER, vector_name),\n\t\t\t'-cbow', '1', '-size', '200', '-window', '8', '-threads', '20',\n\t\t\t'-binary', '0', '-iter', '15', 'min-count', '10'])\n\n\treturn\n\n\ndef finish():\n\t''' Clean the directory and perform final operations. '''\n\n\tprint('[+] Finishing...')\n\n\tprint('\\t- Cleaning files')\n\tsp.call(['make', 'clean', '-C', 'word2vec'])\n\n\tNULL.close()\n\tprint('Done.')\n\n\treturn\n\n\ndef main(input_folder):\n\t''' Main program. \n\n\t\t@type\tinput_folder:\tstring\n\t\t@param \tinput_folder:\tName of input folder\n\t\t'''\n\n\tsetup()\n\tpre_process(input_folder)\n\tbuild_vectors()\n\tfinish()\n\n\treturn" } ]
4
JustAnotherPythonProgrammer/KahootBot
https://github.com/JustAnotherPythonProgrammer/KahootBot
0b33bee18aab5548e773943856d4191f3ba11f46
82c85b9de905d79c8540cd2e1aa861987b96bdf3
c32d7f105dfbf6de14729d5087e07fb97a738282
refs/heads/master
2022-06-10T12:23:06.686230
2020-05-02T05:16:51
2020-05-02T05:16:51
260,619,227
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6425625681877136, "alphanum_fraction": 0.6541105508804321, "avg_line_length": 38.96703338623047, "blob_id": "64518e943ade07d9a89382372a444ef87f60e7aa", "content_id": "9dbacad90ee1e441d0873d158e10ab3b52f9d789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3637, "license_type": "no_license", "max_line_length": 230, "num_lines": 91, "path": "/Bot.pyw", "repo_name": "JustAnotherPythonProgrammer/KahootBot", "src_encoding": "UTF-8", "text": "from selenium.webdriver import Chrome\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nclass Question:\n def __init__(self, q, a):\n self.q = q\n self.a = a\n\n\n\ndef getData(URL):\n print(\"\\n\")\n driver = Chrome()\n driver.get(URL)\n\n showAnswers = WebDriverWait(driver, 60).until(expected_conditions.presence_of_element_located((\"css selector\", \"#layout > div.layout__body-wrapper > main > div.question-list-and-resource-credits > section > header > button\")))\n showAnswers.click()\n\n WebDriverWait(driver, 60).until(expected_conditions.presence_of_element_located((\"class name\", \"question-list__item\")))\n quests = driver.find_elements_by_class_name(\"question-list__item\")\n\n arr = []\n for index, q in enumerate(quests):\n\n text = q.find_element_by_class_name(\"question-media__text-inner-wrapper\")\n ans = q.find_elements_by_class_name(\"choices__choice\")\n for a in ans:\n try:\n a.find_element_by_class_name(\"choices__choice--correct\")\n element = a.find_element_by_tag_name(\"span\")\n answer = a.text.split(\"\\n\")[0]\n answerShape = element.get_attribute(\"class\").split(\"--\")[1]\n arr.append(Question(text.text, answerShape))\n break\n except NoSuchElementException as e:\n pass\n\n\n print(\"Retrieved answers.\")\n driver.close()\n return arr\n\n\ndef connectToLobby(pin):\n driver = Chrome()\n driver.get(\"https://kahoot.it/\")\n pinBox = WebDriverWait(driver, 60).until(expected_conditions.presence_of_element_located((\"css selector\", \"#game-input\")))\n pinBox.send_keys(pin)\n confirm = driver.find_element_by_css_selector(\"#root > div > div > div > main > div > form > button\")\n confirm.click()\n print(\"Connected to lobby.\")\n return driver\n\n\ndef enterName(driver, name):\n nameBox = WebDriverWait(driver, 60).until(expected_conditions.presence_of_element_located((\"css selector\", \"#nickname\")))\n nameBox.send_keys(name)\n confirm = driver.find_element_by_css_selector(\"#root > div > div > div > main > div > form > button\")\n confirm.click()\n print(\"Joined game.\")\n\n\ndef playGame(driver, questions):\n\n selectors = {\"triangle\": \"#root > div > main > div.question__PageMainContent-sc-12j7dwx-0.dhkrXm > div > button.card-button__CardButton-vbewcy-1.eRSCLD.flat-button__FlatButton-sc-6uljam-0.bbSHdR\",\n \"diamond\": \"#root > div > main > div.question__PageMainContent-sc-12j7dwx-0.dhkrXm > div > button.card-button__CardButton-vbewcy-1.fabXZJ.flat-button__FlatButton-sc-6uljam-0.bbSHdR\",\n \"circle\": \"#root > div > main > div.question__PageMainContent-sc-12j7dwx-0.dhkrXm > div > button.card-button__CardButton-vbewcy-1.eYFENK.flat-button__FlatButton-sc-6uljam-0.bbSHdR\",\n \"square\": \"#root > div > main > div.question__PageMainContent-sc-12j7dwx-0.dhkrXm > div > button.card-button__CardButton-vbewcy-1.bDfINc.flat-button__FlatButton-sc-6uljam-0.bbSHdR\"}\n\n for num, q in enumerate(questions, start=1):\n button = WebDriverWait(driver, 600).until(expected_conditions.presence_of_element_located((\"css selector\", selectors[q.a])))\n button.click()\n print(f\"Question {num} answered.\")\n\n\n\n\ndef main():\n URL = input(\"URL: \")\n pin = input(\"PIN: \")\n name = input(\"NAME: \")\n questions = getData(URL)\n game = connectToLobby(pin)\n enterName(game, name)\n playGame(game, questions)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 17, "blob_id": "f50bea67ef33d2fe680c2dca165dcbca1aa91ac1", "content_id": "7b616ef6258a21166ede3f296a9e807d5250141f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "JustAnotherPythonProgrammer/KahootBot", "src_encoding": "UTF-8", "text": "# KahootBot\n This bot plays Kahoot.\n" } ]
2
JiaXiangYu/AutoTest
https://github.com/JiaXiangYu/AutoTest
2d9734a95dfa18dc59d81f0f4b48bd45b0159a9b
a8e41fe0c490d2f8cbde6e3031f71d08f0e9bf96
77396150807e0d171505e0b5a9ef2390cd24df3c
refs/heads/master
2020-04-06T21:24:20.869599
2018-11-16T02:40:54
2018-11-16T02:40:54
157,802,118
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5525093078613281, "alphanum_fraction": 0.5683085322380066, "avg_line_length": 26.342105865478516, "blob_id": "0b13ab2ad1639a367481753a84a3f6d2f12137ae", "content_id": "e9535aac97357e586d1d6a910ec910a6dc2b8f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2284, "license_type": "no_license", "max_line_length": 85, "num_lines": 76, "path": "/Voctest/Common/sendMail.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: sendMail.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-15 17:14\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\nimport os\r\nimport smtplib\r\nimport time\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom Common.log import Log\r\nfrom Config.globalConfig import report_path\r\n\r\n\r\nlogger = Log()\r\n# 配置收发件人\r\nrecv_address = ['[email protected]']\r\n# 163的用户名和密码\r\nsend_addr_name = '[email protected]'\r\nsend_addr_pswd = 'jiaxy19920319'\r\n\r\n\r\nclass SendMail:\r\n def __init__(self, recver=None):\r\n \"\"\"接收邮件的人:list or tuple\"\"\"\r\n if recver is None:\r\n self.sendTo = recv_address\r\n else:\r\n self.sendTo = recver\r\n\r\n def get_report(self):\r\n \"\"\"获取最新测试报告\"\"\"\r\n lists = os.listdir(report_path)\r\n lists.sort()\r\n send_report = lists[-1]\r\n print('The send report name: {0}'.format(send_report))\r\n return send_report\r\n\r\n def take_messages(self):\r\n \"\"\"生成邮件的内容,和html报告附件\"\"\"\r\n report = self.get_report()\r\n self.msg = MIMEMultipart()\r\n self.msg['Subject'] = 'VOC自动化测试报告'\r\n self.msg['date'] = time.strftime('%a, %d %b %Y %H:%M:%S %z')\r\n\r\n with open(os.path.join(report_path, report), 'rb') as f:\r\n mailbody = f.read()\r\n html = MIMEText(mailbody, _subtype='html', _charset='utf-8')\r\n self.msg.attach(html)\r\n\r\n # html附件\r\n att1 = MIMEText(mailbody, 'base64', 'gb2312')\r\n att1[\"Content-Type\"] = 'application/octet-stream'\r\n att1[\"Content-Disposition\"] = 'attachment; filename=\"VocAutoTestReport.html\"'\r\n self.msg.attach(att1)\r\n\r\n def send(self):\r\n \"\"\"发送邮件\"\"\"\r\n self.take_messages()\r\n self.msg['from'] = send_addr_name\r\n try:\r\n smtp = smtplib.SMTP('smtp.163.com', 25)\r\n smtp.login(send_addr_name, send_addr_pswd)\r\n smtp.sendmail(self.msg['from'], self.sendTo, self.msg.as_string())\r\n smtp.close()\r\n logger.log_info(\"发送邮件成功\")\r\n except Exception:\r\n logger.log_error('发送邮件失败')\r\n raise" }, { "alpha_fraction": 0.6302682161331177, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 19.83333396911621, "blob_id": "7d06ce31cfbefb431f94151a5359547481ce8c4d", "content_id": "6955b00c955edaba74339002f4604dedf941a4c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "no_license", "max_line_length": 61, "num_lines": 24, "path": "/Voctest/Config/globalConfig.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: globalConfig.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-15 11:39\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\nimport os\r\n\r\n# 项目路径\r\nprojectj_path = 'F:\\python\\Voctest'\r\n\r\n# 日志路径\r\nlog_path = os.path.join(projectj_path, 'Report', 'log')\r\n# 截图文件路径\r\nimg_path = os.path.join(projectj_path, 'Report', 'image')\r\n# 测试报告路径\r\nreport_path = os.path.join(projectj_path, 'Report', 'report')\r\n# 测试数据路径\r\ndata_path = os.path.join(projectj_path, 'Data')" }, { "alpha_fraction": 0.5477105975151062, "alphanum_fraction": 0.5704299211502075, "avg_line_length": 32.03571319580078, "blob_id": "300dbe2749a6a1f74df6b4db8fdf1da682091ec4", "content_id": "be0db8b0b40432a93db20e3bc9e605e053777847", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2935, "license_type": "no_license", "max_line_length": 94, "num_lines": 84, "path": "/Voctest/TestCase/test_login.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: test_login.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-10-31 16:44\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\nimport unittest\r\nimport time\r\nimport traceback\r\nfrom PageObjects.loginPage import LoginPage\r\nfrom selenium import webdriver\r\nfrom Common.log import Log\r\n\r\nclass TestLogin(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.logger = Log()\r\n self.logger.log_info('「Login Test Start:')\r\n url = 'http://10.10.15.153'\r\n self.driver = webdriver.Chrome()\r\n self.driver.maximize_window()\r\n self.lp = LoginPage(self.driver,url)\r\n time.sleep(3)\r\n\r\n def tearDown(self):\r\n self.driver.quit()\r\n self.logger.log_info('_Login Test End」')\r\n\r\n # 登录成功\r\n def test_1_login_success(self):\r\n self.lp.login('system','123456')\r\n time.sleep(3)\r\n try:\r\n msg = self.driver.find_element_by_xpath('//*[@class=\"gd-topbar-tool-text\"]').text\r\n self.assertEquals('system',msg)\r\n self.logger.log_info('Test Case \"test_1_login_success\" Passed !')\r\n except Exception as e:\r\n self.logger.log_error('Test Case \"test_4_login_fail\" Failed ! \\n{0}'.format(e))\r\n raise e\r\n\r\n # 无用户名登录\r\n def test_2_login_no_username(self):\r\n self.lp.login('','123456')\r\n time.sleep(3)\r\n try:\r\n msg = self.driver.find_element_by_xpath('//*[@class=\"gd-login-submit-text\"]').text\r\n self.assertEquals('登 录',msg)\r\n self.logger.log_info('Test Case \"test_2_login_no_username\" Passed !')\r\n except Exception as e:\r\n self.logger.log_error('Test Case \"test_4_login_fail\" Failed ! \\n{0}'.format(e))\r\n raise e\r\n\r\n\r\n # 无密码登录\r\n def test_3_login_no_passwd(self):\r\n self.lp.login('system','')\r\n time.sleep(3)\r\n msg = self.driver.find_element_by_xpath('//*[@class=\"gd-login-submit-text\"]').text\r\n try:\r\n msg = self.driver.find_element_by_xpath('//*[@class=\"gd-login-submit-text\"]').text\r\n self.assertEquals('登 录',msg)\r\n self.logger.log_info('Test Case \"test_3_login_no_passwd\" Passed !')\r\n except Exception as e:\r\n self.logger.log_error('Test Case \"test_4_login_fail\" Failed ! \\n{0}'.format(e))\r\n raise e\r\n\r\n\r\n # 用户名/密码错误\r\n def test_4_login_fail(self):\r\n self.lp.login('system','1234567890')\r\n time.sleep(3)\r\n try:\r\n msg = self.driver.find_element_by_xpath('//*[@class=\"gd-login-msg\"]').text\r\n self.assertEquals('用户名或密码错误',msg)\r\n self.logger.log_info('Test Case \"test_4_login_fail\" Passed !')\r\n except Exception as e:\r\n self.logger.log_error('Test Case \"test_4_login_fail\" Failed ! \\n{0}'.format(e))\r\n raise e\r\n\r\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 20, "blob_id": "9416113e80ea0f42e3697d33fbbe1dd9734a5074", "content_id": "6e907a8bd254b987871f62521405d83e2a893725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/Voctest/Report/__init__.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "__author__ = 'Jimmy'\r\n" }, { "alpha_fraction": 0.5623167157173157, "alphanum_fraction": 0.5813782811164856, "avg_line_length": 26.375, "blob_id": "fbb5a2bae6ec53aa87e70715877ea02cb91c1d3c", "content_id": "904ca6ff70da9519f30151891769f01824e892bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 111, "num_lines": 48, "path": "/Voctest/TestCase/test_delete_mail.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: 111test_delete_mail.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-16 09:01\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\nimport unittest\r\nimport time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains as AC\r\n\r\nclass TestDelMail(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.dr = webdriver.Chrome()\r\n self.dr.maximize_window()\r\n self.dr.get('http://mail.goldencis.com/')\r\n\r\n def test_del_mail(self):\r\n\r\n # login\r\n self.dr.find_element_by_id('account_name').send_keys('[email protected]')\r\n self.dr.find_element_by_id('password').send_keys('147258a?')\r\n self.dr.find_element_by_id('submit-btn').click()\r\n time.sleep(5)\r\n\r\n # 切换到 已发送\r\n self.dr.find_element_by_xpath('//*[@class=\"nui-tree-item-text\"][@title=\"已发送\"]').click()\r\n time.sleep(3)\r\n for i in range(0,1500):\r\n self.dr.find_element_by_xpath('//*[@class=\"js-component-icon nui-ico nui-ico-checkbox \"]').click()\r\n\r\n time.sleep(3)\r\n self.dr.find_element_by_xpath('//span[contains(text(),\"删 除\")]').click()\r\n time.sleep(3)\r\n i += 1\r\n\r\n def tearDown(self):\r\n self.dr.quit()\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n\r\n" }, { "alpha_fraction": 0.5972396731376648, "alphanum_fraction": 0.6135508418083191, "avg_line_length": 24.5, "blob_id": "c5e08af803f555de575c33f9cc5f8614eb291ba6", "content_id": "478489d6d503db6c4b0826c0bd6d1c8e6709be25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "no_license", "max_line_length": 82, "num_lines": 30, "path": "/Voctest/PageObjects/loginPage.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: loginPage.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-14 13:53\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\n\r\nclass LoginPage:\r\n\r\n # 用户名输入框\r\n login_username = '//*[@class=\"gd-login-user\"]'\r\n # 密码输入框\r\n login_passwd = '//*[@class=\"gd-login-password\"]'\r\n # 登录按钮\r\n login_button = '//*[@class=\"gd-login-submit-bg\"]'\r\n\r\n def __init__(self,driver,url):\r\n self.driver = driver\r\n self.driver.get(url)\r\n\r\n def login(self,username,passwd):\r\n self.driver.find_element_by_xpath(self.login_username).send_keys(username)\r\n self.driver.find_element_by_xpath(self.login_passwd).send_keys(passwd)\r\n self.driver.find_element_by_xpath(self.login_button).click()\r\n\r\n" }, { "alpha_fraction": 0.49694857001304626, "alphanum_fraction": 0.5091543197631836, "avg_line_length": 24.720930099487305, "blob_id": "36c9db3c225b02035fd4d92624845a5a85942ae4", "content_id": "361bbc22579c39c9765a9240a05921aafba3ea01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 130, "num_lines": 43, "path": "/Voctest/TestSuite/testRun.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: testRun.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-14 15:49\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\nimport unittest\r\nimport HTMLTestRunner\r\nimport time\r\n\r\nfrom Config.globalConfig import *\r\nfrom TestSuite import testSuite\r\nfrom Common.log import Log\r\nfrom Common.sendMail import SendMail\r\n\r\ndef run_test():\r\n\r\n\r\n runner = unittest.TextTestRunner()\r\n curTime = time.strftime('%Y-%m-%d_%H_%M_%S')\r\n report_name = report_path + '\\\\' + 'TestResult-' + curTime + '.html'\r\n with open(report_name,'wb') as f:\r\n runner = HTMLTestRunner.HTMLTestRunner(\r\n stream = f,\r\n title = '测试报告'\r\n )\r\n runner.run(testSuite.suite)\r\n\r\n time.sleep(3)\r\n mail = SendMail()\r\n mail.send()\r\n\r\nif __name__ == '__main__':\r\n logger = Log()\r\n logger.log_info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Auto Test Comming -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')\r\n run_test()\r\n logger.log_info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Auto Test Done -*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')" }, { "alpha_fraction": 0.5431472063064575, "alphanum_fraction": 0.6091370582580566, "avg_line_length": 15.909090995788574, "blob_id": "c1f307123e9b9203dfb5f5f08ec5b7acb390516f", "content_id": "868981f1684d544b2de65a602a44c579e99bb581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 41, "num_lines": 11, "path": "/Voctest/TestSuite/__init__.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: __init__.py.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-14 15:38\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n" }, { "alpha_fraction": 0.6964980363845825, "alphanum_fraction": 0.7217898964881897, "avg_line_length": 18.639999389648438, "blob_id": "949013fa9a12c4ce9aa5e74d512c083a917498cc", "content_id": "bd45b011b3f9ad57ca9035859b9c0ff30efc2220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/Voctest/TestSuite/testSuite.py", "repo_name": "JiaXiangYu/AutoTest", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\n'''\r\n@project: Voctest\r\n@author: Jimmy\r\n@file: testSuite.py\r\n@ide: PyCharm Community Edition\r\n@time: 2018-11-14 15:40\r\n@blog: https://www.cnblogs.com/gotesting/\r\n\r\n'''\r\n\r\nimport unittest\r\nfrom TestCase import test_login\r\n# from TestCase.test_login import TestLogin\r\n\r\nsuite = unittest.TestSuite()\r\nloader = unittest.TestLoader()\r\n\r\n# 通过加载测试类所在模块加载测试用例\r\nsuite.addTest(loader.loadTestsFromModule(test_login))\r\n\r\n\r\n# 通过加载测试类来加载测试用例\r\n# suite.addTest((loader.loadTestsFromTestCase(TestLogin)))" } ]
9
kalaikannan26s/hunter.p
https://github.com/kalaikannan26s/hunter.p
e0b2e2292dbfa7f92949d422f9db2b508f9545ee
a7e23a6c2212e4e17b16a0c8c56af6b59998344d
144e7bee2720e562a12609f09e4dd3d47c210196
refs/heads/master
2020-06-14T00:52:30.010932
2019-07-02T10:34:23
2019-07-02T10:34:23
194,841,993
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4922480583190918, "alphanum_fraction": 0.5, "avg_line_length": 20.5, "blob_id": "2685fa850a0aeefa63e8a643e1abc81cf600b6ee", "content_id": "eb05bd47e4ba9b0bed68dbf30046e09b420e0712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 46, "num_lines": 12, "path": "/hun.py", "repo_name": "kalaikannan26s/hunter.p", "src_encoding": "UTF-8", "text": "l = int(input())\ns = list(map(int,input().split()))\nr = []\nfor i in range(len(s)):\n if s.count(s[i]) > 1:\n if s[i] not in r:\n r.append(s[i])\nr.sort()\nif len(r)==0:\n print(\"unique\")\nelse:\n print(\" \".join([str(elem) for elem in r]))\n" } ]
1
dvska/django-admin-ip-whitelist
https://github.com/dvska/django-admin-ip-whitelist
4a0c1a014c9d15d5c86dca1b96f9cb53ab7b2a5c
6692667808d7dd7774a06a9e3cba1bc82cacb32f
3cebc0febeea48f3a5cc33048c8795de041f9f5f
refs/heads/master
2021-12-26T12:52:30.260158
2021-12-20T10:16:55
2021-12-20T10:16:55
31,018,416
12
12
null
2015-02-19T14:19:13
2021-11-29T13:41:04
2021-12-20T10:16:56
Python
[ { "alpha_fraction": 0.557361364364624, "alphanum_fraction": 0.5783938765525818, "avg_line_length": 33.86666488647461, "blob_id": "0dae4be372d41918f59fbbd6b0fcf1e9bcf73b4a", "content_id": "c6527f1e293b161aad1aa460d2881a894c427398", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1046, "license_type": "permissive", "max_line_length": 114, "num_lines": 30, "path": "/migrations/0001_initial.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.4 on 2016-07-17 20:53\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='DjangoAdminAccessIPWhitelist',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('whitelist_reason', models.CharField(help_text=b'Reason for the whitelist?', max_length=255)),\n ('ip', models.CharField(help_text=b'Enter an IP to whitelist', max_length=255)),\n ],\n options={\n 'db_table': 'django_admin_access_ip_whitelist',\n 'verbose_name': 'Django /admin access IP whitelist',\n 'verbose_name_plural': 'Django /admin access allowed IPs',\n 'permissions': (('can_whitelist_user', 'Can Whitelist User'),),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6402943730354309, "alphanum_fraction": 0.6430543065071106, "avg_line_length": 37.82143020629883, "blob_id": "cc93c382b9bd1d1dd063a5a391b03f14022ee544", "content_id": "777166ec302718bb4579ceff6370be55c9844e07", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "permissive", "max_line_length": 120, "num_lines": 28, "path": "/setup.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nVersion = '0.1.1'\nsetup(name='django-admin-ip-whitelist',\n version=Version,\n # install_requires='redis',\n description=\"Django middleware to allow access to /admin only for users, whose IPs are in the white list\",\n long_description=\"django-admin-ip-whitelist is a django middleware app to allow access to /admin by IP addresses\",\n author=\"dvska\",\n url=\"http://github.com/dvska/django-admin-ip-whitelist\",\n packages=['admin_ip_whitelist'],\n license='Apache',\n platforms='Posix; MacOS X;',\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n )\n" }, { "alpha_fraction": 0.5821428298950195, "alphanum_fraction": 0.6095714569091797, "avg_line_length": 39.69767379760742, "blob_id": "b979a0f9af8164a3652a9e962652eea7c143f2e8", "content_id": "5368a5fbbb41a43ea371f5a09a044a825fcc9c43", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7000, "license_type": "permissive", "max_line_length": 94, "num_lines": 172, "path": "/admin_ip_whitelist/tests.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "from django.core.cache import cache\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase, override_settings\nfrom testfixtures import LogCapture, log_capture\n\nfrom .models import ADMIN_ACCESS_WHITELIST_PREFIX, DjangoAdminAccessIPWhitelist\n\n\nclass MiddlewareTests(TestCase):\n def tearDown(self):\n cache.clear()\n\n def test_other_view(self):\n other_url = reverse('test')\n response = self.client.get(other_url, REMOTE_ADDR=\"5.5.5.5\")\n self.assertEquals(response.status_code, 200)\n self.assertEquals(response.content, 'Hello, World!')\n\n def test_denied(self):\n admin_url = reverse('admin:index')\n\n with LogCapture() as l:\n response = self.client.get(admin_url, REMOTE_ADDR=\"5.5.5.5\")\n\n expected_response = \"You are banned.\\n<!-- 5.5.5.5 -->\"\n self.assertEquals(response.status_code, 403) # forbidden\n self.assertEquals(response.content, expected_response)\n self.assertEquals(response['content-type'], 'text/html')\n\n module_name = 'admin_ip_whitelist.middleware'\n l.check(\n (module_name, \"DEBUG\", \"[django-admin-ip-whitelist] status = enabled\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 5.5.5.5 and User Agent None\"),\n )\n\n @override_settings(ADMIN_ACCESS_WHITELIST_MESSAGE='Leave, now.')\n def test_denied_custom_message(self):\n admin_url = reverse('admin:index')\n\n with LogCapture() as l:\n response = self.client.get(admin_url, REMOTE_ADDR=\"5.5.5.5\")\n expected_response = \"Leave, now.\\n<!-- 5.5.5.5 -->\"\n self.assertEquals(response.status_code, 403) # forbidden\n self.assertEquals(response.content, expected_response)\n self.assertEquals(response['content-type'], 'text/html')\n\n module_name = 'admin_ip_whitelist.middleware'\n l.check(\n (module_name, \"DEBUG\", \"[django-admin-ip-whitelist] status = enabled\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 5.5.5.5 and User Agent None\"),\n )\n\n @override_settings(ADMIN_ACCESS_WHITELIST_USE_HTTP_X_FORWARDED_FOR=True)\n @log_capture()\n def test_http_x_forward_for(self, l):\n DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason='You are special',\n ip='1.2.3.4',\n )\n admin_url = reverse('admin:index')\n\n # Allowed, the FORWARDED address is being considered.\n response = self.client.get(\n admin_url, REMOTE_ADDR=\"5.5.5.5\",\n HTTP_X_FORWARDED_FOR=\"1.2.3.4, 4.4.4.4, 3.3.3.3\")\n self.assertEquals(response.status_code, 302) # redirect\n expected_url = \"{}?next={}\".format(reverse('admin:login'), admin_url)\n self.assertEquals(response.url, expected_url)\n\n # Allowed, If no forwarded address is given, it falls back\n # to REMOTE_ADDR.\n response = self.client.get(\n admin_url, REMOTE_ADDR=\"1.2.3.4\")\n self.assertEquals(response.status_code, 302) # redirect\n expected_url = \"{}?next={}\".format(reverse('admin:login'), admin_url)\n self.assertEquals(response.url, expected_url)\n\n module_name = 'admin_ip_whitelist.middleware'\n l.check(\n (module_name, \"DEBUG\", \"[django-admin-ip-whitelist] status = enabled\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 1.2.3.4 and User Agent None\"),\n (module_name, \"DEBUG\", \"/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 1.2.3.4 and User Agent None\"),\n (module_name, \"DEBUG\", \"/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4\"),\n )\n\n @log_capture()\n def test_allowed(self, l):\n DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason='You are special',\n ip='1.2.3.4',\n )\n admin_url = reverse('admin:index')\n\n # This user is not allowed.\n response = self.client.get(admin_url, REMOTE_ADDR=\"5.5.5.5\")\n expected_response = \"You are banned.\\n<!-- 5.5.5.5 -->\"\n self.assertEquals(response.status_code, 403) # forbidden\n self.assertEquals(response.content, expected_response)\n self.assertEquals(response['content-type'], 'text/html')\n\n # This user is special.\n response = self.client.get(admin_url, REMOTE_ADDR=\"1.2.3.4\")\n self.assertEquals(response.status_code, 302) # redirect\n expected_url = \"{}?next={}\".format(reverse('admin:login'), admin_url)\n self.assertEquals(response.url, expected_url)\n\n module_name = 'admin_ip_whitelist.middleware'\n l.check(\n (module_name, \"DEBUG\", \"[django-admin-ip-whitelist] status = enabled\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 5.5.5.5 and User Agent None\"),\n (module_name, \"DEBUG\", \"GOT IP FROM Request: 1.2.3.4 and User Agent None\"),\n (module_name, \"DEBUG\", \"/Admin access IP: DJANGO_ADMIN_ACCESS_WHITELIST:1.2.3.4\"),\n )\n\n\nclass ModelTests(TestCase):\n def tearDown(self):\n cache.clear()\n\n def test_instance_create_and_update(self):\n self.assertEquals(len(cache._cache.keys()), 0)\n cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '1.2.3.4'\n self.assertEquals(cache.get(cache_key), None)\n obj = DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason='You are special',\n ip='1.2.3.4',\n )\n self.assertEquals(len(cache._cache.keys()), 1)\n self.assertEquals(cache.get(cache_key), '1')\n\n obj.ip = '5.5.5.5'\n obj.save()\n\n self.assertEquals(cache.get(cache_key), None)\n new_cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '5.5.5.5'\n self.assertEquals(cache.get(new_cache_key), '1')\n self.assertEquals(len(cache._cache.keys()), 1)\n\n def test_instance_delete(self):\n self.assertEquals(len(cache._cache.keys()), 0)\n obj = DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason='You are special',\n ip='1.2.3.4',\n )\n self.assertEquals(len(cache._cache.keys()), 1)\n cache_key = ADMIN_ACCESS_WHITELIST_PREFIX + '1.2.3.4'\n self.assertEquals(cache.get(cache_key), '1')\n obj.delete()\n self.assertEquals(cache.get(cache_key), None)\n\n def test_unicode(self):\n obj = DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason=u\"This is what a cat looks like: \\U0001F408\",\n ip='1.2.3.4',\n )\n\n self.assertEquals(\n unicode(obj),\n u\"Whitelisted 1.2.3.4 (This is what a cat looks like: \\U0001F408)\"\n )\n\n def test_str(self):\n obj = DjangoAdminAccessIPWhitelist.objects.create(\n whitelist_reason=u\"This is what a cat looks like: \\U0001F408\",\n ip='1.2.3.4',\n )\n\n self.assertEquals(\n str(obj),\n \"Whitelisted 1.2.3.4 (This is what a cat looks like: \\xF0\\x9F\\x90\\x88)\"\n )\n" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.658965528011322, "avg_line_length": 38.17567443847656, "blob_id": "afc9cb7d771650014e1d0834ca19ad76155d7aa5", "content_id": "b434e68ad4c6ad92937781c8ff3a6e395ce27d88", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2900, "license_type": "permissive", "max_line_length": 139, "num_lines": 74, "path": "/admin_ip_whitelist/middleware.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "import logging\n\nimport django\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import MiddlewareNotUsed\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.http import HttpResponseForbidden\n\nfrom .models import DjangoAdminAccessIPWhitelist, ADMIN_ACCESS_WHITELIST_PREFIX\n\nlog = logging.getLogger(__name__)\n\n\nclass AdminAccessIPWhiteListMiddleware(MiddlewareMixin):\n def __init__(self):\n \"\"\"\n Middleware init is called once per server on startup - do the heavy\n lifting here.\n \"\"\"\n # If disabled or not enabled raise MiddleWareNotUsed so django\n # processes next middleware.\n self.ENABLED = getattr(settings, 'ADMIN_ACCESS_WHITELIST_ENABLED', False)\n self.USE_HTTP_X_FORWARDED_FOR = getattr(settings, 'ADMIN_ACCESS_WHITELIST_USE_HTTP_X_FORWARDED_FOR', False)\n self.ADMIN_ACCESS_WHITELIST_MESSAGE = getattr(settings, 'ADMIN_ACCESS_WHITELIST_MESSAGE', 'You are banned.')\n\n if not self.ENABLED:\n raise MiddlewareNotUsed(\"django-admin-ip-whitelist is not enabled via settings.py\")\n\n log.debug(\"[django-admin-ip-whitelist] status = enabled\")\n\n # Prefix All keys in cache to avoid key collisions\n self.ABUSE_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST_ABUSE:'\n self.WHITELIST_PREFIX = ADMIN_ACCESS_WHITELIST_PREFIX\n\n for whitelist in DjangoAdminAccessIPWhitelist.objects.all():\n cache_key = self.WHITELIST_PREFIX + whitelist.ip\n cache.set(cache_key, \"1\")\n\n def _get_ip(self, request):\n ip = request.META['REMOTE_ADDR']\n if self.USE_HTTP_X_FORWARDED_FOR or not ip or ip == '127.0.0.1':\n ip = request.META.get('HTTP_X_FORWARDED_FOR', ip).split(',')[0].strip()\n return ip\n\n def process_request(self, request):\n if not request.path.startswith('/admin'):\n return None\n\n ip = self._get_ip(request)\n\n user_agent = request.META.get('HTTP_USER_AGENT', None)\n\n log.debug(\"GOT IP FROM Request: %s and User Agent %s\" % (ip, user_agent))\n\n if self.is_whitelisted(ip):\n return None\n else:\n return self.http_response_forbidden(self.ADMIN_ACCESS_WHITELIST_MESSAGE + '\\n<!-- {} -->'.format(ip), content_type=\"text/html\")\n\n @staticmethod\n def http_response_forbidden(message, content_type):\n if django.VERSION[:2] > (1, 3):\n kwargs = {'content_type': content_type}\n else:\n kwargs = {'mimetype': content_type}\n return HttpResponseForbidden(message, **kwargs)\n\n def is_whitelisted(self, ip):\n # If a whitelist key exists, return True to allow the request through\n is_whitelisted = cache.get(self.WHITELIST_PREFIX + ip)\n if is_whitelisted:\n log.debug(\"/Admin access IP: \" + self.WHITELIST_PREFIX + ip)\n return is_whitelisted\n\n" }, { "alpha_fraction": 0.8602941036224365, "alphanum_fraction": 0.8602941036224365, "avg_line_length": 25.799999237060547, "blob_id": "2dcdf96f0cdb7905efdd985c8ed9e910d94b1759", "content_id": "916adc252f9936fe4c933eed4b0804a94ef036b8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "permissive", "max_line_length": 49, "num_lines": 5, "path": "/admin_ip_whitelist/admin.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "\n\nfrom django.contrib import admin\n\nfrom .models import DjangoAdminAccessIPWhitelist\n\nadmin.site.register(DjangoAdminAccessIPWhitelist)\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 22.33333396911621, "blob_id": "19fdaa38de10d3d1a76a4c1788bc9dee8aefe504", "content_id": "8583cccdb61df722478877d0bcd9190158025e87", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "permissive", "max_line_length": 52, "num_lines": 9, "path": "/admin_ip_whitelist/test_urls.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .test_views import TestView\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^test/', TestView.as_view(), name='test'),\n]\n" }, { "alpha_fraction": 0.7317517995834351, "alphanum_fraction": 0.7372262477874756, "avg_line_length": 24.292306900024414, "blob_id": "046ab90c9286692866a54a281ce89d45269f459d", "content_id": "9e91312a68b77927a62c3a7888ebafb7e230de87", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1644, "license_type": "permissive", "max_line_length": 148, "num_lines": 65, "path": "/README.md", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "django-admin-ip-whitelist\n====\ndjango-admin-ip-whitelist is a Django middleware app to ban users whose IPs are not whitelisted.\n\nStores whole 'whitelist' in memory to avoid database lookups on every request. \n\n\nInstallation\n------------\n\nRequirements:\n\n* Python 2.5+\n* Django\n* Memcache/Redis/.. \n\nGet django-admin-ip-whitelist \n--------\n\nGet the source:\n\nBrowse the source on GitHub: <http://github.com/dvska/django-admin-ip-whitelist>\n\nClone with Git:\n\n $ git clone git://github.com/dvska/django-admin-ip-whitelist\n\n\nInstall via easy_install or pip\n\n easy_install django-admin-ip-whitelist\n pip install django-admin-ip-whitelist\n\n\nSetup\n------\nInstall django-admin-ip-whitelist. Make sure it is on your PYTHONPATH or in your django project directory.\n\nIn your django project settings.py you must set the following options:\n\n 1) Add 'admin_ip_whitelist.middleware.AdminAccessIPWhiteListMiddleware' to MIDDLEWARE_CLASSES\n\n 2) Add 'admin_ip_whitelist' to INSTALLED_APPS\n\n 3) Add ADMIN_ACCESS_WHITELIST_ENABLED = True to enable django-admin-ip-whitelis (handy if you lock yourself out, you can just set this to False)\n\n 4) Run migrations to create the table for whitelisted IPs:\n\n ./manage.py migrate admin_ip_whitelist\n\n 4) Optionally set ADMIN_ACCESS_WHITELIST_MESSAGE (default is \"You are banned.\") to change default message for banned user.\n\nIssues\n------\nFind a bug? Want a feature? Submit an [issue\nhere](http://github.com/dvska/django-admin-ip-whitelist/issues). Patches welcome!\n\nLicense\n------\ndjango-admin-ip-whitelist is released under the Apache Software License, Version 2.0\n\n\nAuthors\n-------\n * dvska\n" }, { "alpha_fraction": 0.6809908747673035, "alphanum_fraction": 0.6860465407371521, "avg_line_length": 31.42622947692871, "blob_id": "dfb5522f87aefd7ddf20b11a0640e4dc74a99582", "content_id": "7fce545350b02979db41e17dfe60ef38e5aa5af5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "permissive", "max_line_length": 94, "num_lines": 61, "path": "/admin_ip_whitelist/models.py", "repo_name": "dvska/django-admin-ip-whitelist", "src_encoding": "UTF-8", "text": "# dvska made\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n\n\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.db.models.signals import post_delete, pre_save\n\nADMIN_ACCESS_WHITELIST_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST:'\nWHITELIST_PREFIX = 'DJANGO_ADMIN_ACCESS_WHITELIST:'\n\n\nclass DjangoAdminAccessIPWhitelist(models.Model):\n whitelist_reason = models.CharField(max_length=255, help_text=\"Reason for the whitelist?\")\n ip = models.CharField(max_length=255, help_text='Enter an IP to whitelist')\n\n def __unicode__(self):\n return \"Whitelisted %s (%s)\" % (self.ip, self.whitelist_reason)\n\n def __str__(self):\n return self.__unicode__().encode('utf-8')\n\n class Meta:\n permissions = ((\"can_whitelist_user\", \"Can Whitelist User\"),)\n verbose_name = \"Django /admin access IP whitelist\"\n verbose_name_plural = \"Django /admin access allowed IPs\"\n db_table = 'django_admin_access_ip_whitelist'\n\n\ndef _generate_cache_key(instance):\n return ADMIN_ACCESS_WHITELIST_PREFIX + instance.ip\n\n\ndef _update_cache(sender, **kwargs):\n # add a whitelist entry\n\n new_instance = kwargs.get('instance')\n\n # If the entry has changed, remove the old cache entry and\n # add the new one.\n if new_instance.pk:\n old_instance = DjangoAdminAccessIPWhitelist.objects.get(\n pk=new_instance.pk)\n\n if _generate_cache_key(old_instance) != \\\n _generate_cache_key(new_instance):\n old_cache_key = _generate_cache_key(old_instance)\n cache.delete(old_cache_key)\n\n cache_key = _generate_cache_key(new_instance)\n cache.set(cache_key, \"1\")\n\n\ndef _delete_cache(sender, **kwargs):\n instance = kwargs.get('instance')\n cache_key = _generate_cache_key(instance)\n cache.delete(cache_key)\n\n\npre_save.connect(_update_cache, sender=DjangoAdminAccessIPWhitelist)\npost_delete.connect(_delete_cache, sender=DjangoAdminAccessIPWhitelist)\n" } ]
8
BrianIshii/StockAnalyzer
https://github.com/BrianIshii/StockAnalyzer
117e8a02ace3b885b99f50768e04e609e02728d0
6154b24d3cab9265fdbc94961c4542affe344c55
701ec38e3de42e18ee2fb0ee55f0ec101bd057b6
refs/heads/master
2021-07-07T05:46:30.099263
2017-10-01T18:23:22
2017-10-01T18:23:22
79,314,231
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6049864888191223, "alphanum_fraction": 0.6230099201202393, "avg_line_length": 32.96938705444336, "blob_id": "cdc5b0287c4ca5b2e03aeb3d81cbab2fd85a17f1", "content_id": "9dfd7083119a3f1d2b782ba8878aeac176b62ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3329, "license_type": "no_license", "max_line_length": 98, "num_lines": 98, "path": "/AAPL.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "import datetime\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport Analysis.trade as trade\n\nglobal dates\n\ndef main():\n global dates\n start_date = \"2015-01-02\"\n symbols = ['SPY']\n\n dates = pd.date_range(\"2015-01-02\", \"2015-02-02\")\n symbols.append(\"AAPL\") \n df = trade.get_data(symbols,dates)\n print(str(df)) \n print(df)\n graph_close(df, \"AAPL\") \n\n# \ndef graph_close(df,symbol):\n #past_year = df[symbol][252:]\n past_year = df[symbol][5:]\n past_year\n print(type(past_year[0]))\n #need to change it to float\n #df[symbol].hist(bins=2,label = symbol)\n #past_year.hist(bins=2,label = \"Past Year\")\n #plt.legend(loc=\"upper right\")\n #past_year_mean = past_year.mean()\n #past_year_std = past_year.std()\n #mean = df[symbol].mean()\n #print(\"mean = \" + str(mean))\n #std = df[symbol].std()\n #print(\"std = \" + str(std))\n #simplify with func\n #plt.axvline(mean,color='w',linestyle=\"dashed\",linewidth=2)\n #plt.axvline(std + mean,color='r',linestyle=\"dashed\",linewidth=2)\n #plt.axvline(-std + mean,color='r',linestyle=\"dashed\",linewidth=2)\n #plt.axvline(past_year_mean,color='w',linewidth=2)\n #plt.axvline(past_year_std + past_year_mean,color='r',linewidth=2)\n #plt.axvline(-past_year_std + past_year_mean,color='r',linewidth=2)\n #plt.axvline(df[symbol][-1],color='black',linewidth=2)\n #print(df.kurtosis())\n\ndef buy_sell(df,symbol):\n rm,upper_band,lower_band = trade.get_bollinger_bands(symbol,df[symbol],20,False)\n rm = pd.DataFrame(rm)\n rm = rm.rename(columns={symbol:\"rm\"})\n df = df.join(rm)\n upper_band = pd.DataFrame(upper_band)\n upper_band = upper_band.rename(columns={symbol:\"upper_band\"})\n df = df.join(upper_band)\n lower_band = pd.DataFrame(lower_band)\n lower_band = lower_band.rename(columns={symbol:\"lower_band\"})\n df = df.join(lower_band)\n df = df.dropna(subset=[\"rm\"])\n add = float(df[symbol][0]/1000)\n df['sell_points'] = (df[symbol] >= df[\"upper_band\"]-add).astype(float)\n df['buy_points'] = (df[symbol] <= df[\"lower_band\"]+add).astype(float)\n df['sell'] = 0\n df['buy'] = 0\n df['sell'][df['sell_points'] == 1] = df[symbol]\n df['sell'][df['sell_points'] == 0] = \"NaN\"\n df['buy'][df['buy_points'] == 1] = df[symbol]\n df['buy'][df['buy_points'] == 0] = \"NaN\"\n plt.plot(df['sell'],'go')\n plt.plot(df[\"buy\"],'ro')\n plt.plot(df[symbol])\n plt.plot(df['lower_band'],'r')\n plt.plot(df['upper_band'],'g')\n\ndef volume(df,symbol):\n global dates\n symbols = [symbol]\n df_vol = trade.get_data(symbols,dates,\"Volume\")\n df_vol = df_vol.rename(columns={symbol:\"Volume\"})\n df_vol = (df_vol/1000000)\n plt.plot(df[symbol])\n plt.plot(df_vol[\"Volume\"])\n print(df_vol)\n\ndef scatter_plot(symbol,symbols):\n global dates\n df= trade.get_data(symbols,dates)\n daily_returns = trade.compute_daily_returns(df)\n daily_returns.plot(kind='scatter',x='SPY',y=symbol)\n beta_symbol,alpha_symbol = np.polyfit(daily_returns['SPY'],daily_returns[symbol],1)\n plt.plot(daily_returns['SPY'], beta_symbol*daily_returns['SPY'] + alpha_symbol, '-',color='r')\n print(\"alpha of symbol: \" , alpha_symbol)\n print(\"beta of symbol: \" , beta_symbol)\n print(daily_returns.corr(method='pearson'))\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5671902298927307, "alphanum_fraction": 0.5710296630859375, "avg_line_length": 31.191011428833008, "blob_id": "e3efa61f2472eb46e5edcd718528586667a6cad5", "content_id": "e266b9ea1a7307d9b76a9ac82a421adf8e0cfcf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2865, "license_type": "no_license", "max_line_length": 73, "num_lines": 89, "path": "/Data/data.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"\ndata.py is an object to look at stock data\n\nBrian Ishii 2017\n\"\"\"\n\nimport os\nimport pandas as pd\nimport json\n\nclass Data:\n\n def __init__(self, start_date, end_date, data_type=\"Adj Close\"):\n self.symbols = [\"SPY\"]\n self.dates = self.get_dates(start_date, end_date)\n self.data_type = data_type\n self.df = self.get_data() \n\n def __repr__(self):\n return \"Data({!r}, {!r}, {!r})\".format(\n self.symbols, self.dates, self.data_type)\n\n def __eq__(self):\n pass\n\n def get_data(self):\n \"\"\"Returns a pd.DataFrame with desired data\n \n Keyword arguments:\n symbols: (List) list of symbols i.e. [\"SPY\", \"AAPL\"]\n dates: (DatetimeIndex) range of dates desired\n col: (String) column name of data requested (default \"Adj Close\")\n \"\"\"\n df = pd.DataFrame(index=self.dates)\n\n for symbol in self.symbols:\n temp = pd.read_csv(self.path_to_symbol(symbol),\n index_col=\"Date\", usecols=[\"Date\", self.data_type],\n parse_dates=True, na_values = [\"NaN\"])\n temp = temp.rename(columns={self.data_type:symbol})\n df = df.join(temp)\n if symbol == \"SPY\":\n df = df.dropna(subset=[\"SPY\"])\n return df\n\n def path_to_symbol(self, symbol, base_dir=\"Data\"):\n \"\"\"returns the CSV file path for the given symbol\n\n Keyword arguments:\n symbol: (String) stock name i.e. \"AAPL\"\n base_dir: (String) base directory for the file (default \"Data\")\n \"\"\"\n path = os.getcwd()\n return os.path.join(path + \"/{!s}.csv\".format(symbol))\n \n def get_dates(self, start_date, end_date):\n \"\"\"returns a pandas date range indexed for each day\n\n Keyword arguments:\n start_date: (String) YYYY-MM-DD\n end_date: (String) YYYY-MM-DD\n \"\"\"\n return pd.date_range(start_date, end_date)\n\n def get_bollinger_bands(self, symbol, window=20):\n \"\"\"returns a tuple with (rolling mean, upper_band, and\n lower_band)\n\n Keyword arguments:\n symbol: (String) stock symbol i.e. \"AAPL\"\n window: (int) number of days to include in the mean (default 20)\n \"\"\"\n if symbol not in self.symbols:\n raise IndexError()\n values = self.df[symbol]\n rolling_mean = values.rolling(window=window).mean()\n rolling_std = values.rolling(window=window).std()\n upper_band = rolling_mean + rolling_std * 2\n lower_band = rolling_mean - rolling_std * 2\n return rolling_mean, upper_band, lower_band\n\n def add_stock(self, symbol):\n \"\"\"appends the stock symbol to the symbols list\n \n Keyword arguments:\n symbol: (String) stock symbol i.e. \"AAPL\" \n \"\"\"\n self.symbols.append(symbol)\n" }, { "alpha_fraction": 0.6020066738128662, "alphanum_fraction": 0.6220735907554626, "avg_line_length": 13.949999809265137, "blob_id": "8c4b7ffc9d3baa2966dcefaee6aaab8b4b0c7f96", "content_id": "9db473183fd91dc27d9859bf65eee1c4f228baf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/Stock/stock_tests.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"\nstock_tests.py has test for stock.py\n\nBrian Ishii 2017\n\"\"\"\n\nimport unittest\n\nfrom stock import *\nclass StockTestCases(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n pass\n\n def test_1(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5141509175300598, "alphanum_fraction": 0.525943398475647, "avg_line_length": 16.66666603088379, "blob_id": "bbaa65689047e9e2ae38abbabe0ec2d2184296bb", "content_id": "e46083592252b51d1481d002393bcbf7ef60a06c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "no_license", "max_line_length": 45, "num_lines": 24, "path": "/Stock/Stock.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "#/usr/bin/env python3\n\"\"\"\ndata.py is an object to look at stock data\n\nBrian Ishii 2017\n\"\"\"\nimport json\n\nclass Stock:\n def __init__(self, symbol):\n self.symbol = symbol \n \n def __repr__(self):\n pass\n\n def __str__(self):\n pass\n\n\n def get_json_data(self, symbol):\n f = open(\"stock.json\", 'r')\n temp = json.load(f)\n f.close()\n return temp[\"Stocks\"][\"Tech\"][symbol]\n" }, { "alpha_fraction": 0.5429936051368713, "alphanum_fraction": 0.5971337556838989, "avg_line_length": 22.296297073364258, "blob_id": "f25ff1819298245bb00fee7bb8786e7fb194028f", "content_id": "65d2d386b100cd4abc4ca08cdb5f20029d931bbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 46, "num_lines": 27, "path": "/Analysis/practice_numpy.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef OneD_array():\n print np.array([2,3,4])\ndef TwoD_array():\n\tprint np.array([(2,3,4),(5,6,7)])\ndef empty_array():\n #print np.empty(5)\n #print np.empty((4,5))\n print np.empty((5,4,3))\ndef ones_array():\n\tprint np.ones((5,4,3))\ndef random_array():\n\tprint np.random.random((5,4))\ndef randInt_array():\n #print np.random.randint(10)\n #print np.random.randint(0,10)\n #print np.random.randint(0,10,size=5)\n print np.random.randint(0, 10, size=(2,3))\n\nif __name__ == \"__main__\":\n #OneD_array()\n #TwoD_array()\n #empty_array()\n #ones_array()\n #random_array()\n #randInt_array()" }, { "alpha_fraction": 0.5665979385375977, "alphanum_fraction": 0.6131958961486816, "avg_line_length": 28.22891616821289, "blob_id": "b68946ffa95eb0e67649a17f0f029d01a67a7755", "content_id": "fb851b392cd21a16d225e1850bfeff5ab1286bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2425, "license_type": "no_license", "max_line_length": 146, "num_lines": 83, "path": "/practice.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport trade\n\ndef first_graph():\n df = pd.read_csv(\"StockData/AAPL_stock_history.csv\")\n plt.plot(df[\"Adj Close\"])\n print df\n plt.show()\n\ndef new_dataFrame():\n\t#define date range\n start_date = \"2016-01-22\"\n end_date = \"2016-12-26\"\n dates = pd.date_range(start_date,end_date)\n\n #create empty data frame\n df1 = pd.DataFrame(index = dates)\n \n #Read AAPL data\n df_AAPL = pd.read_csv(\"StockData/AAPL_stock_history.csv\",index_col = \"Date\",parse_dates=True,na_values = ['nan'])\n new_df = df1.join(df_AAPL)\n new_df = new_df.dropna()\n plt.plot(new_df[\"Adj Close\"])\n plt.show()\n print new_df\n\ndef join_dataFrame():\n\t#set range for df1\n start_date = \"1999-01-22\"\n end_date = \"2016-12-26\"\n dates = pd.date_range(start_date,end_date)\n\n #create empty data frame\n df1 = pd.DataFrame(index = dates)\n \n #Read AAPL data\n df_AAPL = pd.read_csv(\"StockData/AAPL_stock_history.csv\",index_col = \"Date\",usecols=[\"Date\",\"Adj Close\"],parse_dates=True,na_values = ['nan'])\n \n #Rename 'Adj Close' to \"AAPL\" \n df_AAPL = df_AAPL.rename(columns={'Adj Close':'AAPL'})\n \n df1 = df1.join(df_AAPL, how = 'inner')\n df1 = df1.dropna()\n\n #read SPY data\n df_SPY = pd.read_csv(\"StockData/SPY_stock_history.csv\",index_col = \"Date\",usecols=[\"Date\",\"Adj Close\"],parse_dates=True,na_values = ['nan'])\n df_SPY = df_SPY.rename(columns={'Adj Close':'SPY'})\n \n #join df_SPY with df1\n df1 = df1.join(df_SPY)\n \n print df1\n plt.plot(df1[[\"AAPL\",\"SPY\"]])\n plt.show()\n\ndef first_time_using_lib():\n # Define a date range\n dates = pd.date_range('2009-01-22', '2010-01-26')\n\n # Choose stock symbols to read\n symbols = ['AAPL','GOOGL','AMZN']\n \n # Get stock data\n df = trade.get_data(symbols, dates)\n df2 = df.ix['2009-01-22':'2010-01-28',[\"AAPL\"]]\n #plt.plot(df2[\"AAPL\"])\n #plt.show()\n trade.plot_data(df2)\n print df2\ndef multiple_stocks_on_a_graph():\n dates = pd.date_range('2016-01-01','2017-01-01')\n symbols = ['AAPL','GOOGL','AMZN']\n df = trade.get_data(symbols, dates)\n print df\n trade.plot_selected(trade.normalize_data(df),symbols,'2016-01-01','2017-01-01')\nif __name__ == \"__main__\":\n #first_graph()\n #new_dataFrame()\n #join_dataFrame()\n #first_time_using_lib()\n multiple_stocks_on_a_graph()\n print trade.symbol_to_path(\"AAPL\")" }, { "alpha_fraction": 0.6061462163925171, "alphanum_fraction": 0.6209819912910461, "avg_line_length": 31.360000610351562, "blob_id": "f8a443c3f427e239a1d4c182aceec97dd698a500", "content_id": "0753504951b35029ecb8838c862af631ef19c699", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5662, "license_type": "no_license", "max_line_length": 129, "num_lines": 175, "path": "/Analysis/trade.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "'''\nTrade.py is a library to look at stock data\nWritten by Brian Ishii 2017\n\n'''\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n#import updateData\n\ndef plot_selected(df, columns, start_index, end_index):\n \"\"\"\n plots selected data\n\n Arguments:\n df -- (pd.DataFrame) pandas dataframe\n columns -- (List) list of stock names i.e. \"AAPL\"\n start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'\n end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'\n \"\"\"\n plot_data(df.ix[start_index:end_index,columns],title=\"Selected Data ({})-({})\".format(start_index,end_index))\n\ndef path_to_symbol(symbol, base_dir=\"Data\"):\n \"\"\"\n returns the CSV file path given the ticker symbol\n\n Arguments:\n symbol -- (String) stock name i.e. \"AAPL\"\n \"\"\"\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))\n\ndef get_data(symbols, dates, col=\"Adj Close\"):\n \"\"\"\n Read stock data (adjusted close) for given symbols from CSV files.\n\n Arguments:\n symbols -- (List) list of symbols i.e. [\"AAPL\",\"GOOGL\"]\n dates -- (pd.date_range) range of dates called\n col -- (String) column name of data requested i.e. 'Volume'\n \"\"\"\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n df_temp = pd.read_csv(path_to_symbol(symbol),index_col=\"Date\",usecols= [\"Date\",col],parse_dates=True,na_values = ['nan'])\n df_temp = df_temp.rename(columns={col:symbol})\n df = df.join(df_temp)\n if symbol == 'SPY':\n df = df.dropna(subset=[\"SPY\"])\n return df\n\ndef plot_data(df,title=\"Stock Prices\",ylabel=\"Prices\"):\n \"\"\"\n plots stock prices with labels\n\n Arguments:\n df -- (pd.DataFrame) dataframe with price and date\n \"\"\"\n ax = df.plot(title=title,fontsize=12)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(ylabel)\n plt.show()\n\ndef normalize_data(df):\n \"\"\"\n Normalize Data\n\n Arguments:\n df -- (pd.DataFrame) pandas dataframe\n \"\"\"\n return df/ df.ix[0,:]\ndef get_bollinger_bands(symbol,values,window,plot):\n \"\"\"\n Get Upper and lower bands\n\n Arguments:\n symbol -- (String) stock name i.e. \"AAPL\"\n values --(pd.Dataframe) i.e. df['AAPL']\n window -- (int) how many days i.e. 20\n plot -- (Bool) plot True or False\n \"\"\"\n rm = pd.rolling_mean(values,window=window)\n rstd = pd.rolling_std(values,window=window)\n upper_band = rm + rstd * 2\n lower_band = rm - rstd * 2\n if plot is True:\n ax = values.plot(title=\"Bollinger Bands\", label=symbol)\n rm.plot(label=\"Rolling Mean\", ax=ax)\n upper_band.plot(label=\"Upper-Band\", ax=ax)\n lower_band.plot(label=\"Lower-Band\", ax=ax)\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Price\")\n ax.legend(loc='upper left')\n plt.show()\n return rm,upper_band,lower_band\ndef compute_daily_returns(df):\n \"\"\"\n Compute the Daily returns of a stock\n\n Arguments:\n df -- (pd.DataFrame) i.e. df['AAPL']\n \"\"\"\n daily_returns = ((df / df.shift(1))-1)\n daily_returns.ix[0,:] = 0\n return daily_returns\ndef daily_returns_hist(symbols,dates):\n \"\"\"\n Read stock data (adjusted close) for given symbols from CSV files.\n\n Arguments:\n symbols -- (List) list of symbols i.e. [\"AAPL\",\"GOOGL\"]\n dates -- (pd.date_range) range of dates called\n \"\"\"\n df = get_data(symbols,dates)\n daily_returns = compute_daily_returns(df)\n for symbol in symbols:\n daily_returns[symbol].hist(bins=20,label = symbol)\n plt.legend(loc=\"upper right\")\n if len(symbols) == 1:\n mean = daily_returns['SPY'].mean()\n print(\"mean = \" + str(mean))\n std = daily_returns['SPY'].std()\n plt.axvline(mean,color='w',linestyle=\"dashed\",linewidth=2)\n plt.axvline(std,color='r',linestyle=\"dashed\",linewidth=2)\n plt.axvline(-std,color='r',linestyle=\"dashed\",linewidth=2)\n print(daily_returns.kurtosis())\n plt.show()\ndef compute_cumulative_returns(df, start_index, end_index):\n \"\"\"\n compute the cumulative returns of a stock in a time period\n\n Arguments:\n df -- (pd.DataFrame) i.e. df['AAPL']\n start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'\n end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'\n \"\"\"\n cumulative_returns = (df.loc[end_index]/df.loc[start_index])-1\n return cumulative_returns\ndef sharpe_ratio(df,symbol,start_index, end_index):\n \"\"\"\n computes the sharpe ratio returns sharpe, c, mean, and std\n\n Arguments:\n df -- (pd.DataFrame) i.e. df['AAPL']\n start_index -- (date) starting date of data graphed year-month-day formmat i.e. '2017-01-01'\n end_index -- (date) ending date of data graphed year-month-day formmat i.e. '2017-01-01'\n \"\"\"\n d = compute_daily_returns(df)\n c = compute_cumulative_returns(df,start_index,end_index)\n mean = d[symbol].mean()\n std = d[symbol].std()\n sharpe = math.sqrt(252)*(mean/std)\n return sharpe,c,mean,std\n\ndef check_data(today,now):\n \"\"\"\n Checks Date and updates the CSV data files if necessary\n\n Arguments:\n today -- (String) string in date form i.e. '2017-01-01'\n \"\"\"\n df_check = pd.read_csv(path_to_symbol(\"SPY\"))\n date = df_check[\"Date\"][0]\n if date != today:\n if int(now.strftime('%w')) == 1:\n print(\"Data up to date\")\n return\n else:\n updateData.update_data(\"StockData\")\n return\n else: \n print(\"Data up to date\")\n return" }, { "alpha_fraction": 0.6549707651138306, "alphanum_fraction": 0.6608186960220337, "avg_line_length": 30.18181800842285, "blob_id": "9c0757cfa82cdb629e81300ae181262b70465e83", "content_id": "3025ed9a4cb43bec91cb32322b24c0dcdca0e231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 71, "num_lines": 11, "path": "/Analysis/test_trade.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "import unittest\nfrom trade import *\n\nclass TimerTests (unittest.TestCase):\n def test_symbol_to_path_1(self):\n self.assertEqual(symbol_to_path(\"AAPL\"),\"StockData/AAPL.csv\")\n \n def test_symbol_to_path_2(self):\n self.assertEqual(symbol_to_path(\"GOOGL\"),\"StockData/GOOGL.csv\")\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.2815625071525574, "alphanum_fraction": 0.48656249046325684, "avg_line_length": 32.68421173095703, "blob_id": "6fe2cad42d9421e690e1aca830cfc1e088bb7582", "content_id": "19c43985c82869362c63b5d61294dccbfd4dd72a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3200, "license_type": "no_license", "max_line_length": 77, "num_lines": 95, "path": "/Data/data_tests.py", "repo_name": "BrianIshii/StockAnalyzer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"\ndata_tests.py has tests for data.py\n\nBrian Ishii 2017\n\"\"\"\n\n\nimport unittest\nimport os\n\nfrom data import *\nclass DataTests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n test = Data(\"2015-01-02\", \"2015-01-06\")\n cls.test = test\n cls.cwd = os.getcwd()\n\n def test_path_to_symbol_aapl(self):\n self.assertEqual(self.test.path_to_symbol(\"AAPL\"),\n self.cwd + \"/AAPL.csv\")\n\n def test_path_to_symbol_spy(self):\n self.assertEqual(self.test.path_to_symbol(\"SPY\"),\n self.cwd + \"/SPY.csv\")\n\n def test_get_data(self):\n output = (\"\" \n + \" SPY\\n\"\n + \"2015-01-02 197.045185\\n\"\n + \"2015-01-05 193.486620\\n\"\n + \"2015-01-06 191.664176\\n\"\n + \"2015-01-07 194.052535\\n\"\n + \"2015-01-08 197.496002\\n\"\n + \"2015-01-09 195.913355\\n\"\n + \"2015-01-12 194.378654\\n\"\n + \"2015-01-13 193.831927\")\n temp = Data(\"2015-01-02\", \"2015-01-13\")\n self.assertEqual(str(temp.get_data()), output)\n\n def test_get_dates(self):\n dates = (\"DatetimeIndex(['2015-01-02',\"\n + \" '2015-01-03', '2015-01-04', '2015-01-05',\\n\" \n + \" '2015-01-06'],\\n\"\n + \" dtype='datetime64[ns]', freq='D')\")\n self.assertEqual(\n str(self.test.get_dates(\"2015-01-02\", \"2015-01-06\")),\n dates)\n\n def test_get_bollinger_bands_error(self):\n self.assertRaises(IndexError, self.test.get_bollinger_bands, \"AAPL\")\n\n def test_get_bollinger_bands(self):\n temp = Data(\"2015-01-01\", \"2015-01-15\")\n rm = (\"2015-01-02 NaN\\n\"\n + \"2015-01-05 NaN\\n\"\n + \"2015-01-06 NaN\\n\"\n + \"2015-01-07 NaN\\n\"\n + \"2015-01-08 194.748904\\n\"\n + \"2015-01-09 194.522538\\n\"\n + \"2015-01-12 194.700944\\n\"\n + \"2015-01-13 195.134495\\n\"\n + \"2015-01-14 194.856332\\n\"\n + \"2015-01-15 193.536497\\n\"\n + \"Name: SPY, dtype: float64\")\n ub = (\"2015-01-02 NaN\\n\"\n + \"2015-01-05 NaN\\n\"\n + \"2015-01-06 NaN\\n\"\n + \"2015-01-07 NaN\\n\"\n + \"2015-01-08 199.689884\\n\"\n + \"2015-01-09 199.021440\\n\"\n + \"2015-01-12 199.063118\\n\"\n + \"2015-01-13 198.236422\\n\"\n + \"2015-01-14 198.621840\\n\"\n + \"2015-01-15 197.302006\\n\"\n + \"Name: SPY, dtype: float64\")\n lb = (\"2015-01-02 NaN\\n\"\n + \"2015-01-05 NaN\\n\"\n + \"2015-01-06 NaN\\n\"\n + \"2015-01-07 NaN\\n\"\n + \"2015-01-08 189.807923\\n\"\n + \"2015-01-09 190.023635\\n\"\n + \"2015-01-12 190.338771\\n\"\n + \"2015-01-13 192.032567\\n\"\n + \"2015-01-14 191.090823\\n\"\n + \"2015-01-15 189.770988\\n\"\n + \"Name: SPY, dtype: float64\")\n test_rm, test_ub, test_lb = temp.get_bollinger_bands(\"SPY\", window=5)\n self.assertEqual(str(test_rm), rm)\n self.assertEqual(str(test_ub), ub)\n self.assertEqual(str(test_lb), lb)\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
9